repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/exex/exex/src/backfill/stream.rs | crates/exex/exex/src/backfill/stream.rs | use super::job::BackfillJobResult;
use crate::{BackfillJob, SingleBlockBackfillJob};
use alloy_primitives::BlockNumber;
use futures::{
stream::{FuturesOrdered, Stream},
StreamExt,
};
use reth_ethereum_primitives::EthPrimitives;
use reth_evm::{
execute::{BlockExecutionError, BlockExecutionOutput},
ConfigureEvm,
};
use reth_node_api::NodePrimitives;
use reth_primitives_traits::RecoveredBlock;
use reth_provider::{BlockReader, Chain, StateProviderFactory};
use reth_prune_types::PruneModes;
use reth_stages_api::ExecutionStageThresholds;
use reth_tracing::tracing::debug;
use std::{
ops::RangeInclusive,
pin::Pin,
task::{ready, Context, Poll},
};
use tokio::task::JoinHandle;
/// The default parallelism for active tasks in [`StreamBackfillJob`].
pub(crate) const DEFAULT_PARALLELISM: usize = 4;
/// The default batch size for active tasks in [`StreamBackfillJob`].
const DEFAULT_BATCH_SIZE: usize = 100;
/// Boxed thread-safe iterator that yields [`BackfillJobResult`]s.
type BackfillTaskIterator<T> =
Box<dyn Iterator<Item = BackfillJobResult<T>> + Send + Sync + 'static>;
/// Backfill task output.
struct BackfillTaskOutput<T> {
job: BackfillTaskIterator<T>,
result: Option<BackfillJobResult<T>>,
}
/// Ordered queue of [`JoinHandle`]s that yield [`BackfillTaskOutput`]s.
type BackfillTasks<T> = FuturesOrdered<JoinHandle<BackfillTaskOutput<T>>>;
type SingleBlockStreamItem<N = EthPrimitives> = (
RecoveredBlock<<N as NodePrimitives>::Block>,
BlockExecutionOutput<<N as NodePrimitives>::Receipt>,
);
type BatchBlockStreamItem<N = EthPrimitives> = Chain<N>;
/// Stream for processing backfill jobs asynchronously.
///
/// This struct manages the execution of [`SingleBlockBackfillJob`] tasks, allowing blocks to be
/// processed asynchronously but in order within a specified range.
#[derive(Debug)]
pub struct StreamBackfillJob<E, P, T> {
evm_config: E,
provider: P,
prune_modes: PruneModes,
range: RangeInclusive<BlockNumber>,
tasks: BackfillTasks<T>,
parallelism: usize,
batch_size: usize,
thresholds: ExecutionStageThresholds,
}
impl<E, P, T> StreamBackfillJob<E, P, T>
where
T: Send + Sync + 'static,
{
/// Configures the parallelism of the [`StreamBackfillJob`] to handle active tasks.
pub const fn with_parallelism(mut self, parallelism: usize) -> Self {
self.parallelism = parallelism;
self
}
/// Configures the batch size for the [`StreamBackfillJob`].
pub const fn with_batch_size(mut self, batch_size: usize) -> Self {
self.batch_size = batch_size;
self
}
/// Spawns a new task calling the [`BackfillTaskIterator::next`] method and pushes it to the end
/// of the [`BackfillTasks`] queue.
fn push_back(&mut self, mut job: BackfillTaskIterator<T>) {
self.tasks.push_back(tokio::task::spawn_blocking(move || BackfillTaskOutput {
result: job.next(),
job,
}));
}
/// Spawns a new task calling the [`BackfillTaskIterator::next`] method and pushes it to the
/// front of the [`BackfillTasks`] queue.
fn push_front(&mut self, mut job: BackfillTaskIterator<T>) {
self.tasks.push_front(tokio::task::spawn_blocking(move || BackfillTaskOutput {
result: job.next(),
job,
}));
}
/// Polls the next task in the [`BackfillTasks`] queue until it returns a non-empty result.
fn poll_next_task(&mut self, cx: &mut Context<'_>) -> Poll<Option<BackfillJobResult<T>>> {
while let Some(res) = ready!(self.tasks.poll_next_unpin(cx)) {
let task_result = res.map_err(BlockExecutionError::other)?;
if let BackfillTaskOutput { result: Some(job_result), job } = task_result {
// If the task returned a non-empty result, a new task advancing the job is created
// and pushed to the __front__ of the queue, so that the next item of this returned
// next.
self.push_front(job);
return Poll::Ready(Some(job_result))
};
}
Poll::Ready(None)
}
}
impl<E, P> Stream for StreamBackfillJob<E, P, SingleBlockStreamItem<E::Primitives>>
where
E: ConfigureEvm<Primitives: NodePrimitives<Block = P::Block>> + 'static,
P: BlockReader + StateProviderFactory + Clone + Unpin + 'static,
{
type Item = BackfillJobResult<SingleBlockStreamItem<E::Primitives>>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = self.get_mut();
// Spawn new tasks only if we are below the parallelism configured.
while this.tasks.len() < this.parallelism {
// Get the next block number from the range. If it is empty, we are done.
let Some(block_number) = this.range.next() else {
debug!(target: "exex::backfill", tasks = %this.tasks.len(), range = ?this.range, "No more single blocks to backfill");
break;
};
// Spawn a new task for that block
debug!(target: "exex::backfill", tasks = %this.tasks.len(), ?block_number, "Spawning new single block backfill task");
let job = Box::new(SingleBlockBackfillJob {
evm_config: this.evm_config.clone(),
provider: this.provider.clone(),
range: block_number..=block_number,
stream_parallelism: this.parallelism,
}) as BackfillTaskIterator<_>;
this.push_back(job);
}
this.poll_next_task(cx)
}
}
impl<E, P> Stream for StreamBackfillJob<E, P, BatchBlockStreamItem<E::Primitives>>
where
E: ConfigureEvm<Primitives: NodePrimitives<Block = P::Block>> + 'static,
P: BlockReader + StateProviderFactory + Clone + Unpin + 'static,
{
type Item = BackfillJobResult<BatchBlockStreamItem<E::Primitives>>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = self.get_mut();
loop {
// Spawn new tasks only if we are below the parallelism configured.
while this.tasks.len() < this.parallelism {
// Take the next `batch_size` blocks from the range and calculate the range bounds
let mut range = this.range.by_ref().take(this.batch_size);
let start = range.next();
let range_bounds = start.zip(range.last().or(start));
// Create the range from the range bounds. If it is empty, we are done.
let Some(range) = range_bounds.map(|(first, last)| first..=last) else {
debug!(target: "exex::backfill", tasks = %this.tasks.len(), range = ?this.range, "No more block batches to backfill");
break;
};
// Spawn a new task for that range
debug!(target: "exex::backfill", tasks = %this.tasks.len(), ?range, "Spawning new block batch backfill task");
let job = Box::new(BackfillJob {
evm_config: this.evm_config.clone(),
provider: this.provider.clone(),
prune_modes: this.prune_modes.clone(),
thresholds: this.thresholds.clone(),
range,
stream_parallelism: this.parallelism,
}) as BackfillTaskIterator<_>;
this.push_back(job);
}
let res = ready!(this.poll_next_task(cx));
if res.is_some() {
return Poll::Ready(res);
}
if this.range.is_empty() {
// only terminate the stream if there are no more blocks to process
return Poll::Ready(None);
}
}
}
}
impl<E, P> From<SingleBlockBackfillJob<E, P>> for StreamBackfillJob<E, P, SingleBlockStreamItem> {
fn from(job: SingleBlockBackfillJob<E, P>) -> Self {
Self {
evm_config: job.evm_config,
provider: job.provider,
prune_modes: PruneModes::default(),
range: job.range,
tasks: FuturesOrdered::new(),
parallelism: job.stream_parallelism,
batch_size: 1,
thresholds: ExecutionStageThresholds { max_blocks: Some(1), ..Default::default() },
}
}
}
impl<E, P> From<BackfillJob<E, P>> for StreamBackfillJob<E, P, BatchBlockStreamItem<E::Primitives>>
where
E: ConfigureEvm,
{
fn from(job: BackfillJob<E, P>) -> Self {
let batch_size = job.thresholds.max_blocks.map_or(DEFAULT_BATCH_SIZE, |max| max as usize);
Self {
evm_config: job.evm_config,
provider: job.provider,
prune_modes: job.prune_modes,
range: job.range,
tasks: FuturesOrdered::new(),
parallelism: job.stream_parallelism,
batch_size,
thresholds: ExecutionStageThresholds {
max_blocks: Some(batch_size as u64),
..job.thresholds
},
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
backfill::test_utils::{
blocks_and_execution_outcome, blocks_and_execution_outputs, chain_spec,
execute_block_and_commit_to_database,
},
BackfillJobFactory,
};
use alloy_consensus::{constants::ETH_TO_WEI, Header, TxEip2930};
use alloy_primitives::{b256, Address, TxKind, U256};
use eyre::Result;
use futures::StreamExt;
use reth_chainspec::{ChainSpec, EthereumHardfork, MIN_TRANSACTION_GAS};
use reth_db_common::init::init_genesis;
use reth_ethereum_primitives::{Block, BlockBody, Transaction};
use reth_evm_ethereum::EthEvmConfig;
use reth_primitives_traits::{
crypto::secp256k1::public_key_to_address, Block as _, FullNodePrimitives,
};
use reth_provider::{
providers::{BlockchainProvider, ProviderNodeTypes},
test_utils::create_test_provider_factory_with_chain_spec,
ProviderFactory,
};
use reth_stages_api::ExecutionStageThresholds;
use reth_testing_utils::{generators, generators::sign_tx_with_key_pair};
use secp256k1::Keypair;
use std::sync::Arc;
#[tokio::test(flavor = "multi_thread")]
async fn test_single_blocks() -> eyre::Result<()> {
reth_tracing::init_test_tracing();
// Create a key pair for the sender
let key_pair = generators::generate_key(&mut generators::rng());
let address = public_key_to_address(key_pair.public_key());
let chain_spec = chain_spec(address);
let executor = EthEvmConfig::ethereum(chain_spec.clone());
let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone());
init_genesis(&provider_factory)?;
let blockchain_db = BlockchainProvider::new(provider_factory.clone())?;
// Create first 2 blocks
let blocks_and_execution_outcomes =
blocks_and_execution_outputs(provider_factory, chain_spec, key_pair)?;
// Backfill the first block
let factory = BackfillJobFactory::new(executor.clone(), blockchain_db.clone());
let mut backfill_stream = factory.backfill(1..=1).into_single_blocks().into_stream();
// execute first block
let (block, mut execution_output) = backfill_stream.next().await.unwrap().unwrap();
execution_output.state.reverts.sort();
let expected_block = blocks_and_execution_outcomes[0].0.clone();
let expected_output = &blocks_and_execution_outcomes[0].1;
assert_eq!(block, expected_block);
assert_eq!(&execution_output, expected_output);
// expect no more blocks
assert!(backfill_stream.next().await.is_none());
Ok(())
}
#[tokio::test(flavor = "multi_thread")]
async fn test_batch() -> eyre::Result<()> {
reth_tracing::init_test_tracing();
// Create a key pair for the sender
let key_pair = generators::generate_key(&mut generators::rng());
let address = public_key_to_address(key_pair.public_key());
let chain_spec = chain_spec(address);
let executor = EthEvmConfig::ethereum(chain_spec.clone());
let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone());
init_genesis(&provider_factory)?;
let blockchain_db = BlockchainProvider::new(provider_factory.clone())?;
// Create first 2 blocks
let (blocks, execution_outcome) =
blocks_and_execution_outcome(provider_factory, chain_spec, key_pair)?;
// Backfill the same range
let factory = BackfillJobFactory::new(executor.clone(), blockchain_db.clone())
.with_thresholds(ExecutionStageThresholds { max_blocks: Some(2), ..Default::default() })
.with_stream_parallelism(1);
let mut backfill_stream = factory.backfill(1..=2).into_stream();
let mut chain = backfill_stream.next().await.unwrap().unwrap();
chain.execution_outcome_mut().state_mut().reverts.sort();
assert!(chain.blocks_iter().eq(&blocks));
assert_eq!(chain.execution_outcome(), &execution_outcome);
// expect no more blocks
assert!(backfill_stream.next().await.is_none());
Ok(())
}
fn create_blocks(
chain_spec: &Arc<ChainSpec>,
key_pair: Keypair,
n: u64,
) -> Result<Vec<RecoveredBlock<reth_ethereum_primitives::Block>>> {
let mut blocks = Vec::with_capacity(n as usize);
let mut parent_hash = chain_spec.genesis_hash();
for (i, nonce) in (1..=n).zip(0..n) {
let block = Block {
header: Header {
parent_hash,
// Hardcoded receipts_root matching the original test (same tx in each block)
receipts_root: b256!(
"0xd3a6acf9a244d78b33831df95d472c4128ea85bf079a1d41e32ed0b7d2244c9e"
),
difficulty: chain_spec.fork(EthereumHardfork::Paris).ttd().expect("Paris TTD"),
number: i,
gas_limit: MIN_TRANSACTION_GAS,
gas_used: MIN_TRANSACTION_GAS,
..Default::default()
},
body: BlockBody {
transactions: vec![sign_tx_with_key_pair(
key_pair,
Transaction::Eip2930(TxEip2930 {
chain_id: chain_spec.chain.id(),
nonce,
gas_limit: MIN_TRANSACTION_GAS,
gas_price: 1_500_000_000,
to: TxKind::Call(Address::ZERO),
value: U256::from(0.1 * ETH_TO_WEI as f64),
..Default::default()
}),
)],
..Default::default()
},
}
.try_into_recovered()?;
parent_hash = block.hash();
blocks.push(block);
}
Ok(blocks)
}
fn execute_and_commit_blocks<N>(
provider_factory: &ProviderFactory<N>,
chain_spec: &Arc<ChainSpec>,
blocks: &[RecoveredBlock<reth_ethereum_primitives::Block>],
) -> Result<()>
where
N: ProviderNodeTypes<
Primitives: FullNodePrimitives<
Block = reth_ethereum_primitives::Block,
BlockBody = reth_ethereum_primitives::BlockBody,
Receipt = reth_ethereum_primitives::Receipt,
>,
>,
{
for block in blocks {
execute_block_and_commit_to_database(provider_factory, chain_spec.clone(), block)?;
}
Ok(())
}
#[tokio::test]
async fn test_batch_parallel_range_advance() -> Result<()> {
reth_tracing::init_test_tracing();
// Create a key pair for the sender
let key_pair = generators::generate_key(&mut generators::rng());
let address = public_key_to_address(key_pair.public_key());
let chain_spec = chain_spec(address);
let executor = EthEvmConfig::ethereum(chain_spec.clone());
let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone());
init_genesis(&provider_factory)?;
let blockchain_db = BlockchainProvider::new(provider_factory.clone())?;
// Create and commit 4 blocks
let blocks = create_blocks(&chain_spec, key_pair, 4)?;
execute_and_commit_blocks(&provider_factory, &chain_spec, &blocks)?;
// Create factory with batch size 2 (via thresholds max_blocks=2) and parallelism=2
let factory = BackfillJobFactory::new(executor.clone(), blockchain_db.clone())
.with_thresholds(ExecutionStageThresholds { max_blocks: Some(2), ..Default::default() })
.with_stream_parallelism(2);
// Stream backfill for range 1..=4
let mut backfill_stream = factory.backfill(1..=4).into_stream();
// Collect the two expected chains from the stream
let mut chain1 = backfill_stream.next().await.unwrap()?;
let mut chain2 = backfill_stream.next().await.unwrap()?;
assert!(backfill_stream.next().await.is_none());
// Sort reverts for comparison
chain1.execution_outcome_mut().state_mut().reverts.sort();
chain2.execution_outcome_mut().state_mut().reverts.sort();
// Compute expected chains using non-stream BackfillJob (sequential)
let factory_seq =
BackfillJobFactory::new(executor.clone(), blockchain_db.clone()).with_thresholds(
ExecutionStageThresholds { max_blocks: Some(2), ..Default::default() },
);
let mut expected_chain1 =
factory_seq.backfill(1..=2).collect::<Result<Vec<_>, _>>()?.into_iter().next().unwrap();
let mut expected_chain2 =
factory_seq.backfill(3..=4).collect::<Result<Vec<_>, _>>()?.into_iter().next().unwrap();
// Sort reverts for expected
expected_chain1.execution_outcome_mut().state_mut().reverts.sort();
expected_chain2.execution_outcome_mut().state_mut().reverts.sort();
// Assert the streamed chains match the expected sequential ones
assert_eq!(chain1.blocks(), expected_chain1.blocks());
assert_eq!(chain1.execution_outcome(), expected_chain1.execution_outcome());
assert_eq!(chain2.blocks(), expected_chain2.blocks());
assert_eq!(chain2.execution_outcome(), expected_chain2.execution_outcome());
Ok(())
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/exex/exex/src/backfill/test_utils.rs | crates/exex/exex/src/backfill/test_utils.rs | use std::sync::Arc;
use alloy_consensus::{constants::ETH_TO_WEI, BlockHeader, Header, TxEip2930};
use alloy_primitives::{b256, Address, TxKind, U256};
use reth_chainspec::{ChainSpec, ChainSpecBuilder, EthereumHardfork, MAINNET, MIN_TRANSACTION_GAS};
use reth_ethereum_primitives::{Block, BlockBody, Receipt, Transaction};
use reth_evm::{
execute::{BlockExecutionOutput, Executor},
ConfigureEvm,
};
use reth_evm_ethereum::EthEvmConfig;
use reth_node_api::FullNodePrimitives;
use reth_primitives_traits::{Block as _, RecoveredBlock};
use reth_provider::{
providers::ProviderNodeTypes, BlockWriter as _, ExecutionOutcome, LatestStateProviderRef,
ProviderFactory,
};
use reth_revm::database::StateProviderDatabase;
use reth_testing_utils::generators::sign_tx_with_key_pair;
use secp256k1::Keypair;
use seismic_alloy_genesis::{Genesis, GenesisAccount};
pub(crate) fn to_execution_outcome(
block_number: u64,
block_execution_output: &BlockExecutionOutput<Receipt>,
) -> ExecutionOutcome {
ExecutionOutcome {
bundle: block_execution_output.state.clone(),
receipts: vec![block_execution_output.receipts.clone()],
first_block: block_number,
requests: vec![block_execution_output.requests.clone()],
}
}
pub(crate) fn chain_spec(address: Address) -> Arc<ChainSpec> {
// Create a chain spec with a genesis state that contains the
// provided sender
Arc::new(
ChainSpecBuilder::default()
.chain(MAINNET.chain)
.genesis(Genesis {
alloc: [(
address,
GenesisAccount { balance: U256::from(ETH_TO_WEI), ..Default::default() },
)]
.into(),
..MAINNET.genesis.clone()
})
.paris_activated()
.build(),
)
}
pub(crate) fn execute_block_and_commit_to_database<N>(
provider_factory: &ProviderFactory<N>,
chain_spec: Arc<ChainSpec>,
block: &RecoveredBlock<reth_ethereum_primitives::Block>,
) -> eyre::Result<BlockExecutionOutput<Receipt>>
where
N: ProviderNodeTypes<
Primitives: FullNodePrimitives<
Block = reth_ethereum_primitives::Block,
BlockBody = reth_ethereum_primitives::BlockBody,
Receipt = reth_ethereum_primitives::Receipt,
>,
>,
{
let provider = provider_factory.provider()?;
// Execute the block to produce a block execution output
let mut block_execution_output = EthEvmConfig::ethereum(chain_spec)
.batch_executor(StateProviderDatabase::new(LatestStateProviderRef::new(&provider)))
.execute(block)?;
block_execution_output.state.reverts.sort();
// Convert the block execution output to an execution outcome for committing to the database
let execution_outcome = to_execution_outcome(block.number(), &block_execution_output);
// Commit the block's execution outcome to the database
let provider_rw = provider_factory.provider_rw()?;
provider_rw.append_blocks_with_state(
vec![block.clone()],
&execution_outcome,
Default::default(),
Default::default(),
)?;
provider_rw.commit()?;
Ok(block_execution_output)
}
fn blocks(
chain_spec: Arc<ChainSpec>,
key_pair: Keypair,
) -> eyre::Result<(
RecoveredBlock<reth_ethereum_primitives::Block>,
RecoveredBlock<reth_ethereum_primitives::Block>,
)> {
// First block has a transaction that transfers some ETH to zero address
let block1 = Block {
header: Header {
parent_hash: chain_spec.genesis_hash(),
receipts_root: b256!(
"0xd3a6acf9a244d78b33831df95d472c4128ea85bf079a1d41e32ed0b7d2244c9e"
),
difficulty: chain_spec.fork(EthereumHardfork::Paris).ttd().expect("Paris TTD"),
number: 1,
gas_limit: MIN_TRANSACTION_GAS,
gas_used: MIN_TRANSACTION_GAS,
..Default::default()
},
body: BlockBody {
transactions: vec![sign_tx_with_key_pair(
key_pair,
Transaction::Eip2930(TxEip2930 {
chain_id: chain_spec.chain.id(),
nonce: 0,
gas_limit: MIN_TRANSACTION_GAS,
gas_price: 1_500_000_000,
to: TxKind::Call(Address::ZERO),
value: U256::from(0.1 * ETH_TO_WEI as f64),
..Default::default()
}),
)],
..Default::default()
},
}
.try_into_recovered()?;
// Second block resends the same transaction with increased nonce
let block2 = Block {
header: Header {
parent_hash: block1.hash(),
receipts_root: b256!(
"0xd3a6acf9a244d78b33831df95d472c4128ea85bf079a1d41e32ed0b7d2244c9e"
),
difficulty: chain_spec.fork(EthereumHardfork::Paris).ttd().expect("Paris TTD"),
number: 2,
gas_limit: MIN_TRANSACTION_GAS,
gas_used: MIN_TRANSACTION_GAS,
..Default::default()
},
body: BlockBody {
transactions: vec![sign_tx_with_key_pair(
key_pair,
Transaction::Eip2930(TxEip2930 {
chain_id: chain_spec.chain.id(),
nonce: 1,
gas_limit: MIN_TRANSACTION_GAS,
gas_price: 1_500_000_000,
to: TxKind::Call(Address::ZERO),
value: U256::from(0.1 * ETH_TO_WEI as f64),
..Default::default()
}),
)],
..Default::default()
},
}
.try_into_recovered()?;
Ok((block1, block2))
}
pub(crate) fn blocks_and_execution_outputs<N>(
provider_factory: ProviderFactory<N>,
chain_spec: Arc<ChainSpec>,
key_pair: Keypair,
) -> eyre::Result<
Vec<(RecoveredBlock<reth_ethereum_primitives::Block>, BlockExecutionOutput<Receipt>)>,
>
where
N: ProviderNodeTypes<
Primitives: FullNodePrimitives<
Block = reth_ethereum_primitives::Block,
BlockBody = reth_ethereum_primitives::BlockBody,
Receipt = reth_ethereum_primitives::Receipt,
>,
>,
{
let (block1, block2) = blocks(chain_spec.clone(), key_pair)?;
let block_output1 =
execute_block_and_commit_to_database(&provider_factory, chain_spec.clone(), &block1)?;
let block_output2 =
execute_block_and_commit_to_database(&provider_factory, chain_spec, &block2)?;
Ok(vec![(block1, block_output1), (block2, block_output2)])
}
pub(crate) fn blocks_and_execution_outcome<N>(
provider_factory: ProviderFactory<N>,
chain_spec: Arc<ChainSpec>,
key_pair: Keypair,
) -> eyre::Result<(Vec<RecoveredBlock<reth_ethereum_primitives::Block>>, ExecutionOutcome)>
where
N: ProviderNodeTypes,
N::Primitives: FullNodePrimitives<
Block = reth_ethereum_primitives::Block,
Receipt = reth_ethereum_primitives::Receipt,
>,
{
let (block1, block2) = blocks(chain_spec.clone(), key_pair)?;
let provider = provider_factory.provider()?;
let evm_config = EthEvmConfig::new(chain_spec);
let executor = evm_config
.batch_executor(StateProviderDatabase::new(LatestStateProviderRef::new(&provider)));
let mut execution_outcome = executor.execute_batch(vec![&block1, &block2])?;
execution_outcome.state_mut().reverts.sort();
// Commit the block's execution outcome to the database
let provider_rw = provider_factory.provider_rw()?;
provider_rw.append_blocks_with_state(
vec![block1.clone(), block2.clone()],
&execution_outcome,
Default::default(),
Default::default(),
)?;
provider_rw.commit()?;
Ok((vec![block1, block2], execution_outcome))
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/exex/exex/src/backfill/mod.rs | crates/exex/exex/src/backfill/mod.rs | mod factory;
mod job;
mod stream;
#[cfg(test)]
mod test_utils;
pub use factory::BackfillJobFactory;
pub use job::{BackfillJob, SingleBlockBackfillJob};
pub use stream::StreamBackfillJob;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/exex/exex/src/backfill/job.rs | crates/exex/exex/src/backfill/job.rs | use crate::StreamBackfillJob;
use reth_evm::ConfigureEvm;
use std::{
ops::RangeInclusive,
time::{Duration, Instant},
};
use alloy_consensus::BlockHeader;
use alloy_primitives::BlockNumber;
use reth_ethereum_primitives::Receipt;
use reth_evm::execute::{BlockExecutionError, BlockExecutionOutput, Executor};
use reth_node_api::{Block as _, BlockBody as _, NodePrimitives};
use reth_primitives_traits::{format_gas_throughput, RecoveredBlock, SignedTransaction};
use reth_provider::{
BlockReader, Chain, ExecutionOutcome, HeaderProvider, ProviderError, StateProviderFactory,
TransactionVariant,
};
use reth_prune_types::PruneModes;
use reth_revm::database::StateProviderDatabase;
use reth_stages_api::ExecutionStageThresholds;
use reth_tracing::tracing::{debug, trace};
pub(super) type BackfillJobResult<T> = Result<T, BlockExecutionError>;
/// Backfill job started for a specific range.
///
/// It implements [`Iterator`] that executes blocks in batches according to the provided thresholds
/// and yields [`Chain`]. In other words, this iterator can yield multiple items for the given range
/// depending on the configured thresholds.
#[derive(Debug)]
pub struct BackfillJob<E, P> {
pub(crate) evm_config: E,
pub(crate) provider: P,
pub(crate) prune_modes: PruneModes,
pub(crate) thresholds: ExecutionStageThresholds,
pub(crate) range: RangeInclusive<BlockNumber>,
pub(crate) stream_parallelism: usize,
}
impl<E, P> Iterator for BackfillJob<E, P>
where
E: ConfigureEvm<Primitives: NodePrimitives<Block = P::Block>> + 'static,
P: HeaderProvider + BlockReader<Transaction: SignedTransaction> + StateProviderFactory,
{
type Item = BackfillJobResult<Chain<E::Primitives>>;
fn next(&mut self) -> Option<Self::Item> {
if self.range.is_empty() {
return None
}
Some(self.execute_range())
}
}
impl<E, P> BackfillJob<E, P>
where
E: ConfigureEvm<Primitives: NodePrimitives<Block = P::Block>> + 'static,
P: BlockReader<Transaction: SignedTransaction> + HeaderProvider + StateProviderFactory,
{
/// Converts the backfill job into a single block backfill job.
pub fn into_single_blocks(self) -> SingleBlockBackfillJob<E, P> {
self.into()
}
/// Converts the backfill job into a stream.
pub fn into_stream(self) -> StreamBackfillJob<E, P, Chain<E::Primitives>> {
self.into()
}
fn execute_range(&mut self) -> BackfillJobResult<Chain<E::Primitives>> {
debug!(
target: "exex::backfill",
range = ?self.range,
"Executing block range"
);
let mut executor = self.evm_config.batch_executor(StateProviderDatabase::new(
self.provider
.history_by_block_number(self.range.start().saturating_sub(1))
.map_err(BlockExecutionError::other)?,
));
let mut fetch_block_duration = Duration::default();
let mut execution_duration = Duration::default();
let mut cumulative_gas = 0;
let batch_start = Instant::now();
let mut blocks = Vec::new();
let mut results = Vec::new();
for block_number in self.range.clone() {
// Fetch the block
let fetch_block_start = Instant::now();
// we need the block's transactions along with their hashes
let block = self
.provider
.sealed_block_with_senders(block_number.into(), TransactionVariant::WithHash)
.map_err(BlockExecutionError::other)?
.ok_or_else(|| ProviderError::HeaderNotFound(block_number.into()))
.map_err(BlockExecutionError::other)?;
fetch_block_duration += fetch_block_start.elapsed();
cumulative_gas += block.gas_used();
// Configure the executor to use the current state.
trace!(target: "exex::backfill", number = block_number, txs = block.body().transactions().len(), "Executing block");
// Execute the block
let execute_start = Instant::now();
// Unseal the block for execution
let (block, senders) = block.split_sealed();
let (header, body) = block.split_sealed_header_body();
let block = P::Block::new_sealed(header, body).with_senders(senders);
results.push(executor.execute_one(&block)?);
execution_duration += execute_start.elapsed();
// TODO(alexey): report gas metrics using `block.header.gas_used`
// Seal the block back and save it
blocks.push(block);
// Check if we should commit now
if self.thresholds.is_end_of_batch(
block_number - *self.range.start() + 1,
executor.size_hint() as u64,
cumulative_gas,
batch_start.elapsed(),
) {
break
}
}
let first_block_number = blocks.first().expect("blocks should not be empty").number();
let last_block_number = blocks.last().expect("blocks should not be empty").number();
debug!(
target: "exex::backfill",
range = ?*self.range.start()..=last_block_number,
block_fetch = ?fetch_block_duration,
execution = ?execution_duration,
throughput = format_gas_throughput(cumulative_gas, execution_duration),
"Finished executing block range"
);
self.range = last_block_number + 1..=*self.range.end();
let outcome = ExecutionOutcome::from_blocks(
first_block_number,
executor.into_state().take_bundle(),
results,
);
let chain = Chain::new(blocks, outcome, None);
Ok(chain)
}
}
/// Single block Backfill job started for a specific range.
///
/// It implements [`Iterator`] which executes a block each time the
/// iterator is advanced and yields ([`RecoveredBlock`], [`BlockExecutionOutput`])
#[derive(Debug, Clone)]
pub struct SingleBlockBackfillJob<E, P> {
pub(crate) evm_config: E,
pub(crate) provider: P,
pub(crate) range: RangeInclusive<BlockNumber>,
pub(crate) stream_parallelism: usize,
}
impl<E, P> Iterator for SingleBlockBackfillJob<E, P>
where
E: ConfigureEvm<Primitives: NodePrimitives<Block = P::Block>> + 'static,
P: HeaderProvider + BlockReader + StateProviderFactory,
{
type Item = BackfillJobResult<(
RecoveredBlock<P::Block>,
BlockExecutionOutput<<E::Primitives as NodePrimitives>::Receipt>,
)>;
fn next(&mut self) -> Option<Self::Item> {
self.range.next().map(|block_number| self.execute_block(block_number))
}
}
impl<E, P> SingleBlockBackfillJob<E, P>
where
E: ConfigureEvm<Primitives: NodePrimitives<Block = P::Block>> + 'static,
P: HeaderProvider + BlockReader + StateProviderFactory,
{
/// Converts the single block backfill job into a stream.
pub fn into_stream(
self,
) -> StreamBackfillJob<
E,
P,
(RecoveredBlock<reth_ethereum_primitives::Block>, BlockExecutionOutput<Receipt>),
> {
self.into()
}
#[expect(clippy::type_complexity)]
pub(crate) fn execute_block(
&self,
block_number: u64,
) -> BackfillJobResult<(
RecoveredBlock<P::Block>,
BlockExecutionOutput<<E::Primitives as NodePrimitives>::Receipt>,
)> {
// Fetch the block with senders for execution.
let block_with_senders = self
.provider
.recovered_block(block_number.into(), TransactionVariant::WithHash)
.map_err(BlockExecutionError::other)?
.ok_or_else(|| ProviderError::HeaderNotFound(block_number.into()))
.map_err(BlockExecutionError::other)?;
// Configure the executor to use the previous block's state.
let executor = self.evm_config.batch_executor(StateProviderDatabase::new(
self.provider
.history_by_block_number(block_number.saturating_sub(1))
.map_err(BlockExecutionError::other)?,
));
trace!(target: "exex::backfill", number = block_number, txs = block_with_senders.body().transaction_count(), "Executing block");
let block_execution_output = executor.execute(&block_with_senders)?;
Ok((block_with_senders, block_execution_output))
}
}
impl<E, P> From<BackfillJob<E, P>> for SingleBlockBackfillJob<E, P> {
fn from(job: BackfillJob<E, P>) -> Self {
Self {
evm_config: job.evm_config,
provider: job.provider,
range: job.range,
stream_parallelism: job.stream_parallelism,
}
}
}
#[cfg(test)]
mod tests {
use crate::{
backfill::{
job::ExecutionStageThresholds,
test_utils::{blocks_and_execution_outputs, chain_spec, to_execution_outcome},
},
BackfillJobFactory,
};
use reth_db_common::init::init_genesis;
use reth_evm_ethereum::EthEvmConfig;
use reth_primitives_traits::crypto::secp256k1::public_key_to_address;
use reth_provider::{
providers::BlockchainProvider, test_utils::create_test_provider_factory_with_chain_spec,
};
use reth_testing_utils::generators;
#[test]
fn test_backfill() -> eyre::Result<()> {
reth_tracing::init_test_tracing();
// Create a key pair for the sender
let key_pair = generators::generate_key(&mut generators::rng());
let address = public_key_to_address(key_pair.public_key());
let chain_spec = chain_spec(address);
let executor = EthEvmConfig::ethereum(chain_spec.clone());
let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone());
init_genesis(&provider_factory)?;
let blockchain_db = BlockchainProvider::new(provider_factory.clone())?;
let blocks_and_execution_outputs =
blocks_and_execution_outputs(provider_factory, chain_spec, key_pair)?;
let (block, block_execution_output) = blocks_and_execution_outputs.first().unwrap();
let execution_outcome = to_execution_outcome(block.number, block_execution_output);
// Backfill the first block
let factory = BackfillJobFactory::new(executor, blockchain_db);
let job = factory.backfill(1..=1);
let chains = job.collect::<Result<Vec<_>, _>>()?;
// Assert that the backfill job produced the same chain as we got before when we were
// executing only the first block
assert_eq!(chains.len(), 1);
let mut chain = chains.into_iter().next().unwrap();
chain.execution_outcome_mut().bundle.reverts.sort();
assert_eq!(chain.blocks(), &[(1, block.clone())].into());
assert_eq!(chain.execution_outcome(), &execution_outcome);
Ok(())
}
#[test]
fn test_single_block_backfill() -> eyre::Result<()> {
reth_tracing::init_test_tracing();
// Create a key pair for the sender
let key_pair = generators::generate_key(&mut generators::rng());
let address = public_key_to_address(key_pair.public_key());
let chain_spec = chain_spec(address);
let executor = EthEvmConfig::ethereum(chain_spec.clone());
let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone());
init_genesis(&provider_factory)?;
let blockchain_db = BlockchainProvider::new(provider_factory.clone())?;
let blocks_and_execution_outcomes =
blocks_and_execution_outputs(provider_factory, chain_spec, key_pair)?;
// Backfill the first block
let factory = BackfillJobFactory::new(executor, blockchain_db);
let job = factory.backfill(1..=1);
let single_job = job.into_single_blocks();
let block_execution_it = single_job.into_iter();
// Assert that the backfill job only produces a single block
let blocks_and_outcomes = block_execution_it.collect::<Vec<_>>();
assert_eq!(blocks_and_outcomes.len(), 1);
// Assert that the backfill job single block iterator produces the expected output for each
// block
for (i, res) in blocks_and_outcomes.into_iter().enumerate() {
let (block, mut execution_output) = res?;
execution_output.state.reverts.sort();
let expected_block = blocks_and_execution_outcomes[i].0.clone();
let expected_output = &blocks_and_execution_outcomes[i].1;
assert_eq!(block, expected_block);
assert_eq!(&execution_output, expected_output);
}
Ok(())
}
#[test]
fn test_backfill_with_batch_threshold() -> eyre::Result<()> {
reth_tracing::init_test_tracing();
// Create a key pair for the sender
let key_pair = generators::generate_key(&mut generators::rng());
let address = public_key_to_address(key_pair.public_key());
let chain_spec = chain_spec(address);
let executor = EthEvmConfig::ethereum(chain_spec.clone());
let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone());
init_genesis(&provider_factory)?;
let blockchain_db = BlockchainProvider::new(provider_factory.clone())?;
let blocks_and_execution_outputs =
blocks_and_execution_outputs(provider_factory, chain_spec, key_pair)?;
let (block1, output1) = blocks_and_execution_outputs[0].clone();
let (block2, output2) = blocks_and_execution_outputs[1].clone();
// Backfill with max_blocks=1, expect two separate chains
let factory = BackfillJobFactory::new(executor, blockchain_db).with_thresholds(
ExecutionStageThresholds { max_blocks: Some(1), ..Default::default() },
);
let job = factory.backfill(1..=2);
let chains = job.collect::<Result<Vec<_>, _>>()?;
// Assert two chains, each with one block
assert_eq!(chains.len(), 2);
let mut chain1 = chains[0].clone();
chain1.execution_outcome_mut().bundle.reverts.sort();
assert_eq!(chain1.blocks(), &[(1, block1)].into());
assert_eq!(chain1.execution_outcome(), &to_execution_outcome(1, &output1));
let mut chain2 = chains[1].clone();
chain2.execution_outcome_mut().bundle.reverts.sort();
assert_eq!(chain2.blocks(), &[(2, block2)].into());
assert_eq!(chain2.execution_outcome(), &to_execution_outcome(2, &output2));
Ok(())
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/exex/exex/src/backfill/factory.rs | crates/exex/exex/src/backfill/factory.rs | use crate::BackfillJob;
use std::{ops::RangeInclusive, time::Duration};
use alloy_primitives::BlockNumber;
use reth_node_api::FullNodeComponents;
use reth_prune_types::PruneModes;
use reth_stages_api::ExecutionStageThresholds;
use super::stream::DEFAULT_PARALLELISM;
/// Factory for creating new backfill jobs.
#[derive(Debug, Clone)]
pub struct BackfillJobFactory<E, P> {
evm_config: E,
provider: P,
prune_modes: PruneModes,
thresholds: ExecutionStageThresholds,
stream_parallelism: usize,
}
impl<E, P> BackfillJobFactory<E, P> {
/// Creates a new [`BackfillJobFactory`].
pub fn new(evm_config: E, provider: P) -> Self {
Self {
evm_config,
provider,
prune_modes: PruneModes::none(),
thresholds: ExecutionStageThresholds {
// Default duration for a database transaction to be considered long-lived is
// 60 seconds, so we limit the backfill job to the half of it to be sure we finish
// before the warning is logged.
//
// See `reth_db::implementation::mdbx::tx::LONG_TRANSACTION_DURATION`.
max_duration: Some(Duration::from_secs(30)),
..Default::default()
},
stream_parallelism: DEFAULT_PARALLELISM,
}
}
/// Sets the prune modes
pub fn with_prune_modes(mut self, prune_modes: PruneModes) -> Self {
self.prune_modes = prune_modes;
self
}
/// Sets the thresholds
pub const fn with_thresholds(mut self, thresholds: ExecutionStageThresholds) -> Self {
self.thresholds = thresholds;
self
}
/// Sets the stream parallelism.
///
/// Configures the [`StreamBackfillJob`](super::stream::StreamBackfillJob) created via
/// [`BackfillJob::into_stream`].
pub const fn with_stream_parallelism(mut self, stream_parallelism: usize) -> Self {
self.stream_parallelism = stream_parallelism;
self
}
}
impl<E: Clone, P: Clone> BackfillJobFactory<E, P> {
/// Creates a new backfill job for the given range.
pub fn backfill(&self, range: RangeInclusive<BlockNumber>) -> BackfillJob<E, P> {
BackfillJob {
evm_config: self.evm_config.clone(),
provider: self.provider.clone(),
prune_modes: self.prune_modes.clone(),
range,
thresholds: self.thresholds.clone(),
stream_parallelism: self.stream_parallelism,
}
}
}
impl BackfillJobFactory<(), ()> {
/// Creates a new [`BackfillJobFactory`] from [`FullNodeComponents`].
pub fn new_from_components<Node: FullNodeComponents>(
components: Node,
) -> BackfillJobFactory<Node::Evm, Node::Provider> {
BackfillJobFactory::<_, _>::new(
components.evm_config().clone(),
components.provider().clone(),
)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/exex/exex/src/wal/storage.rs | crates/exex/exex/src/wal/storage.rs | use std::{
fs::File,
ops::RangeInclusive,
path::{Path, PathBuf},
};
use crate::wal::{WalError, WalResult};
use reth_ethereum_primitives::EthPrimitives;
use reth_exex_types::ExExNotification;
use reth_node_api::NodePrimitives;
use reth_tracing::tracing::debug;
use tracing::instrument;
static FILE_EXTENSION: &str = "wal";
/// The underlying WAL storage backed by a directory of files.
///
/// Each notification is represented by a single file that contains a MessagePack-encoded
/// notification.
#[derive(Debug, Clone)]
pub struct Storage<N: NodePrimitives = EthPrimitives> {
/// The path to the WAL file.
path: PathBuf,
_pd: std::marker::PhantomData<N>,
}
impl<N> Storage<N>
where
N: NodePrimitives,
{
/// Creates a new instance of [`Storage`] backed by the file at the given path and creates
/// it doesn't exist.
pub(super) fn new(path: impl AsRef<Path>) -> WalResult<Self> {
reth_fs_util::create_dir_all(&path)?;
Ok(Self { path: path.as_ref().to_path_buf(), _pd: std::marker::PhantomData })
}
fn file_path(&self, id: u32) -> PathBuf {
self.path.join(format!("{id}.{FILE_EXTENSION}"))
}
fn parse_filename(filename: &str) -> WalResult<u32> {
filename
.strip_suffix(".wal")
.and_then(|s| s.parse().ok())
.ok_or_else(|| WalError::Parse(filename.to_string()))
}
/// Removes notification for the given file ID from the storage.
///
/// # Returns
///
/// The size of the file that was removed in bytes, if any.
#[instrument(skip(self))]
fn remove_notification(&self, file_id: u32) -> Option<u64> {
let path = self.file_path(file_id);
let size = path.metadata().ok()?.len();
match reth_fs_util::remove_file(self.file_path(file_id)) {
Ok(()) => {
debug!(target: "exex::wal::storage", "Notification was removed from the storage");
Some(size)
}
Err(err) => {
debug!(target: "exex::wal::storage", ?err, "Failed to remove notification from the storage");
None
}
}
}
/// Returns the range of file IDs in the storage.
///
/// If there are no files in the storage, returns `None`.
pub(super) fn files_range(&self) -> WalResult<Option<RangeInclusive<u32>>> {
let mut min_id = None;
let mut max_id = None;
for entry in reth_fs_util::read_dir(&self.path)? {
let entry = entry.map_err(|err| WalError::DirEntry(self.path.clone(), err))?;
if entry.path().extension() == Some(FILE_EXTENSION.as_ref()) {
let file_name = entry.file_name();
let file_id = Self::parse_filename(&file_name.to_string_lossy())?;
min_id = min_id.map_or(Some(file_id), |min_id: u32| Some(min_id.min(file_id)));
max_id = max_id.map_or(Some(file_id), |max_id: u32| Some(max_id.max(file_id)));
}
}
Ok(min_id.zip(max_id).map(|(min_id, max_id)| min_id..=max_id))
}
/// Removes notifications from the storage according to the given list of file IDs.
///
/// # Returns
///
/// Number of removed notifications and the total size of the removed files in bytes.
pub(super) fn remove_notifications(
&self,
file_ids: impl IntoIterator<Item = u32>,
) -> WalResult<(usize, u64)> {
let mut deleted_total = 0;
let mut deleted_size = 0;
for id in file_ids {
if let Some(size) = self.remove_notification(id) {
deleted_total += 1;
deleted_size += size;
}
}
Ok((deleted_total, deleted_size))
}
pub(super) fn iter_notifications(
&self,
range: RangeInclusive<u32>,
) -> impl Iterator<Item = WalResult<(u32, u64, ExExNotification<N>)>> + '_ {
range.map(move |id| {
let (notification, size) =
self.read_notification(id)?.ok_or(WalError::FileNotFound(id))?;
Ok((id, size, notification))
})
}
/// Reads the notification from the file with the given ID.
#[instrument(skip(self))]
pub(super) fn read_notification(
&self,
file_id: u32,
) -> WalResult<Option<(ExExNotification<N>, u64)>> {
let file_path = self.file_path(file_id);
debug!(target: "exex::wal::storage", ?file_path, "Reading notification from WAL");
let mut file = match File::open(&file_path) {
Ok(file) => file,
Err(err) if err.kind() == std::io::ErrorKind::NotFound => return Ok(None),
Err(err) => return Err(reth_fs_util::FsPathError::open(err, &file_path).into()),
};
let size = file.metadata().map_err(|err| WalError::FileMetadata(file_id, err))?.len();
// Deserialize using the bincode- and msgpack-compatible serde wrapper
let notification: reth_exex_types::serde_bincode_compat::ExExNotification<'_, N> =
rmp_serde::decode::from_read(&mut file)
.map_err(|err| WalError::Decode(file_id, file_path, err))?;
Ok(Some((notification.into(), size)))
}
/// Writes the notification to the file with the given ID.
///
/// # Returns
///
/// The size of the file that was written in bytes.
#[instrument(skip(self, notification))]
pub(super) fn write_notification(
&self,
file_id: u32,
notification: &ExExNotification<N>,
) -> WalResult<u64> {
let file_path = self.file_path(file_id);
debug!(target: "exex::wal::storage", ?file_path, "Writing notification to WAL");
// Serialize using the bincode- and msgpack-compatible serde wrapper
let notification =
reth_exex_types::serde_bincode_compat::ExExNotification::<N>::from(notification);
reth_fs_util::atomic_write_file(&file_path, |file| {
rmp_serde::encode::write(file, ¬ification)
})?;
Ok(file_path.metadata().map_err(|err| WalError::FileMetadata(file_id, err))?.len())
}
}
#[cfg(test)]
mod tests {
use super::Storage;
use reth_exex_types::ExExNotification;
use reth_provider::Chain;
use reth_testing_utils::generators::{self, random_block};
use std::{fs::File, sync::Arc};
// wal with 1 block and tx
// <https://github.com/paradigmxyz/reth/issues/15012>
#[test]
fn decode_notification_wal() {
let wal = include_bytes!("../../test-data/28.wal");
let notification: reth_exex_types::serde_bincode_compat::ExExNotification<
'_,
reth_ethereum_primitives::EthPrimitives,
> = rmp_serde::decode::from_slice(wal.as_slice()).unwrap();
let notification: ExExNotification = notification.into();
match notification {
ExExNotification::ChainCommitted { new } => {
assert_eq!(new.blocks().len(), 1);
assert_eq!(new.tip().transaction_count(), 1);
}
_ => panic!("unexpected notification"),
}
}
#[test]
fn test_roundtrip() -> eyre::Result<()> {
let mut rng = generators::rng();
let temp_dir = tempfile::tempdir()?;
let storage: Storage = Storage::new(&temp_dir)?;
let old_block = random_block(&mut rng, 0, Default::default()).try_recover()?;
let new_block = random_block(&mut rng, 0, Default::default()).try_recover()?;
let notification = ExExNotification::ChainReorged {
new: Arc::new(Chain::new(vec![new_block], Default::default(), None)),
old: Arc::new(Chain::new(vec![old_block], Default::default(), None)),
};
// Do a round trip serialization and deserialization
let file_id = 0;
storage.write_notification(file_id, ¬ification)?;
let deserialized_notification = storage.read_notification(file_id)?;
assert_eq!(
deserialized_notification.map(|(notification, _)| notification),
Some(notification)
);
Ok(())
}
#[test]
fn test_files_range() -> eyre::Result<()> {
let temp_dir = tempfile::tempdir()?;
let storage: Storage = Storage::new(&temp_dir)?;
// Create WAL files
File::create(storage.file_path(1))?;
File::create(storage.file_path(2))?;
File::create(storage.file_path(3))?;
// Create non-WAL files that should be ignored
File::create(temp_dir.path().join("0.tmp"))?;
File::create(temp_dir.path().join("4.tmp"))?;
// Check files range
assert_eq!(storage.files_range()?, Some(1..=3));
Ok(())
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/exex/exex/src/wal/error.rs | crates/exex/exex/src/wal/error.rs | //! Wal Errors
use std::path::PathBuf;
/// Wal Result type.
pub type WalResult<T> = Result<T, WalError>;
/// Wal Error types
#[derive(Debug, thiserror::Error)]
pub enum WalError {
/// Filesystem error at the path
#[error(transparent)]
FsPathError(#[from] reth_fs_util::FsPathError),
/// Directory entry reading error
#[error("failed to get {0} directory entry: {1}")]
DirEntry(PathBuf, std::io::Error),
/// Error when reading file metadata
#[error("failed to get metadata for file {0}: {1}")]
FileMetadata(u32, std::io::Error),
/// Parse error
#[error("failed to parse file name: {0}")]
Parse(String),
/// Notification not found error
#[error("notification {0} not found")]
FileNotFound(u32),
/// Decode error
#[error("failed to decode notification {0} from {1}: {2}")]
Decode(u32, PathBuf, rmp_serde::decode::Error),
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/exex/exex/src/wal/mod.rs | crates/exex/exex/src/wal/mod.rs | #![allow(dead_code)]
mod cache;
pub use cache::BlockCache;
mod storage;
use reth_ethereum_primitives::EthPrimitives;
use reth_node_api::NodePrimitives;
pub use storage::Storage;
mod metrics;
use metrics::Metrics;
mod error;
pub use error::{WalError, WalResult};
use std::{
path::Path,
sync::{
atomic::{AtomicU32, Ordering},
Arc,
},
};
use alloy_eips::BlockNumHash;
use alloy_primitives::B256;
use parking_lot::{RwLock, RwLockReadGuard};
use reth_exex_types::ExExNotification;
use reth_tracing::tracing::{debug, instrument};
/// WAL is a write-ahead log (WAL) that stores the notifications sent to ExExes.
///
/// WAL is backed by a directory of binary files represented by [`Storage`] and a block cache
/// represented by [`BlockCache`]. The role of the block cache is to avoid walking the WAL directory
/// and decoding notifications every time we want to iterate or finalize the WAL.
///
/// The expected mode of operation is as follows:
/// 1. On every new canonical chain notification, call [`Wal::commit`].
/// 2. When the chain is finalized, call [`Wal::finalize`] to prevent the infinite growth of the
/// WAL.
#[derive(Debug, Clone)]
pub struct Wal<N: NodePrimitives = EthPrimitives> {
inner: Arc<WalInner<N>>,
}
impl<N> Wal<N>
where
N: NodePrimitives,
{
/// Creates a new instance of [`Wal`].
pub fn new(directory: impl AsRef<Path>) -> WalResult<Self> {
Ok(Self { inner: Arc::new(WalInner::new(directory)?) })
}
/// Returns a read-only handle to the WAL.
pub fn handle(&self) -> WalHandle<N> {
WalHandle { wal: self.inner.clone() }
}
/// Commits the notification to WAL.
pub fn commit(&self, notification: &ExExNotification<N>) -> WalResult<()> {
self.inner.commit(notification)
}
/// Finalizes the WAL up to the given canonical block, inclusive.
///
/// The caller should check that all ExExes are on the canonical chain and will not need any
/// blocks from the WAL below the provided block, inclusive.
pub fn finalize(&self, to_block: BlockNumHash) -> WalResult<()> {
self.inner.finalize(to_block)
}
/// Returns an iterator over all notifications in the WAL.
pub fn iter_notifications(
&self,
) -> WalResult<Box<dyn Iterator<Item = WalResult<ExExNotification<N>>> + '_>> {
self.inner.iter_notifications()
}
/// Returns the number of blocks in the WAL.
pub fn num_blocks(&self) -> usize {
self.inner.block_cache().num_blocks()
}
}
/// Inner type for the WAL.
#[derive(Debug)]
struct WalInner<N: NodePrimitives> {
next_file_id: AtomicU32,
/// The underlying WAL storage backed by a file.
storage: Storage<N>,
/// WAL block cache. See [`cache::BlockCache`] docs for more details.
block_cache: RwLock<BlockCache>,
metrics: Metrics,
}
impl<N> WalInner<N>
where
N: NodePrimitives,
{
fn new(directory: impl AsRef<Path>) -> WalResult<Self> {
let wal = Self {
next_file_id: AtomicU32::new(0),
storage: Storage::new(directory)?,
block_cache: RwLock::new(BlockCache::default()),
metrics: Metrics::default(),
};
wal.fill_block_cache()?;
Ok(wal)
}
fn block_cache(&self) -> RwLockReadGuard<'_, BlockCache> {
self.block_cache.read()
}
/// Fills the block cache with the notifications from the storage.
#[instrument(skip(self))]
fn fill_block_cache(&self) -> WalResult<()> {
let Some(files_range) = self.storage.files_range()? else { return Ok(()) };
self.next_file_id.store(files_range.end() + 1, Ordering::Relaxed);
let mut block_cache = self.block_cache.write();
let mut notifications_size = 0;
for entry in self.storage.iter_notifications(files_range) {
let (file_id, size, notification) = entry?;
notifications_size += size;
let committed_chain = notification.committed_chain();
let reverted_chain = notification.reverted_chain();
debug!(
target: "exex::wal",
?file_id,
reverted_block_range = ?reverted_chain.as_ref().map(|chain| chain.range()),
committed_block_range = ?committed_chain.as_ref().map(|chain| chain.range()),
"Inserting block cache entries"
);
block_cache.insert_notification_blocks_with_file_id(file_id, ¬ification);
}
self.update_metrics(&block_cache, notifications_size as i64);
Ok(())
}
#[instrument(skip_all, fields(
reverted_block_range = ?notification.reverted_chain().as_ref().map(|chain| chain.range()),
committed_block_range = ?notification.committed_chain().as_ref().map(|chain| chain.range())
))]
fn commit(&self, notification: &ExExNotification<N>) -> WalResult<()> {
let mut block_cache = self.block_cache.write();
let file_id = self.next_file_id.fetch_add(1, Ordering::Relaxed);
let size = self.storage.write_notification(file_id, notification)?;
debug!(target: "exex::wal", ?file_id, "Inserting notification blocks into the block cache");
block_cache.insert_notification_blocks_with_file_id(file_id, notification);
self.update_metrics(&block_cache, size as i64);
Ok(())
}
#[instrument(skip(self))]
fn finalize(&self, to_block: BlockNumHash) -> WalResult<()> {
let mut block_cache = self.block_cache.write();
let file_ids = block_cache.remove_before(to_block.number);
// Remove notifications from the storage.
if file_ids.is_empty() {
debug!(target: "exex::wal", "No notifications were finalized from the storage");
return Ok(())
}
let (removed_notifications, removed_size) = self.storage.remove_notifications(file_ids)?;
debug!(target: "exex::wal", ?removed_notifications, ?removed_size, "Storage was finalized");
self.update_metrics(&block_cache, -(removed_size as i64));
Ok(())
}
fn update_metrics(&self, block_cache: &BlockCache, size_delta: i64) {
self.metrics.size_bytes.increment(size_delta as f64);
self.metrics.notifications_count.set(block_cache.notification_max_blocks.len() as f64);
self.metrics.committed_blocks_count.set(block_cache.committed_blocks.len() as f64);
if let Some(lowest_committed_block_height) = block_cache.lowest_committed_block_height {
self.metrics.lowest_committed_block_height.set(lowest_committed_block_height as f64);
}
if let Some(highest_committed_block_height) = block_cache.highest_committed_block_height {
self.metrics.highest_committed_block_height.set(highest_committed_block_height as f64);
}
}
/// Returns an iterator over all notifications in the WAL.
fn iter_notifications(
&self,
) -> WalResult<Box<dyn Iterator<Item = WalResult<ExExNotification<N>>> + '_>> {
let Some(range) = self.storage.files_range()? else {
return Ok(Box::new(std::iter::empty()))
};
Ok(Box::new(self.storage.iter_notifications(range).map(|entry| Ok(entry?.2))))
}
}
/// A read-only handle to the WAL that can be shared.
#[derive(Debug)]
pub struct WalHandle<N: NodePrimitives> {
wal: Arc<WalInner<N>>,
}
impl<N> WalHandle<N>
where
N: NodePrimitives,
{
/// Returns the notification for the given committed block hash if it exists.
pub fn get_committed_notification_by_block_hash(
&self,
block_hash: &B256,
) -> WalResult<Option<ExExNotification<N>>> {
let Some(file_id) = self.wal.block_cache().get_file_id_by_committed_block_hash(block_hash)
else {
return Ok(None)
};
self.wal
.storage
.read_notification(file_id)
.map(|entry| entry.map(|(notification, _)| notification))
}
}
#[cfg(test)]
mod tests {
use crate::wal::{cache::CachedBlock, error::WalResult, Wal};
use alloy_primitives::B256;
use itertools::Itertools;
use reth_exex_types::ExExNotification;
use reth_provider::Chain;
use reth_testing_utils::generators::{
self, random_block, random_block_range, BlockParams, BlockRangeParams,
};
use std::sync::Arc;
fn read_notifications(wal: &Wal) -> WalResult<Vec<ExExNotification>> {
wal.inner.storage.files_range()?.map_or(Ok(Vec::new()), |range| {
wal.inner
.storage
.iter_notifications(range)
.map(|entry| entry.map(|(_, _, n)| n))
.collect()
})
}
fn sort_committed_blocks(
committed_blocks: Vec<(B256, u32, CachedBlock)>,
) -> Vec<(B256, u32, CachedBlock)> {
committed_blocks
.into_iter()
.sorted_by_key(|(_, _, block)| (block.block.number, block.block.hash))
.collect()
}
#[test]
fn test_wal() -> eyre::Result<()> {
reth_tracing::init_test_tracing();
let mut rng = generators::rng();
// Create an instance of the WAL in a temporary directory
let temp_dir = tempfile::tempdir()?;
let wal = Wal::new(&temp_dir)?;
assert!(wal.inner.block_cache().is_empty());
// Create 4 canonical blocks and one reorged block with number 2
let blocks = random_block_range(&mut rng, 0..=3, BlockRangeParams::default())
.into_iter()
.map(|block| block.try_recover())
.collect::<Result<Vec<_>, _>>()?;
let block_1_reorged = random_block(
&mut rng,
1,
BlockParams { parent: Some(blocks[0].hash()), ..Default::default() },
)
.try_recover()?;
let block_2_reorged = random_block(
&mut rng,
2,
BlockParams { parent: Some(blocks[1].hash()), ..Default::default() },
)
.try_recover()?;
// Create notifications for the above blocks.
// 1. Committed notification for blocks with number 0 and 1
// 2. Reverted notification for block with number 1
// 3. Committed notification for block with number 1 and 2
// 4. Reorged notification for block with number 2 that was reverted, and blocks with number
// 2 and 3 that were committed
let committed_notification_1 = ExExNotification::ChainCommitted {
new: Arc::new(Chain::new(
vec![blocks[0].clone(), blocks[1].clone()],
Default::default(),
None,
)),
};
let reverted_notification = ExExNotification::ChainReverted {
old: Arc::new(Chain::new(vec![blocks[1].clone()], Default::default(), None)),
};
let committed_notification_2 = ExExNotification::ChainCommitted {
new: Arc::new(Chain::new(
vec![block_1_reorged.clone(), blocks[2].clone()],
Default::default(),
None,
)),
};
let reorged_notification = ExExNotification::ChainReorged {
old: Arc::new(Chain::new(vec![blocks[2].clone()], Default::default(), None)),
new: Arc::new(Chain::new(
vec![block_2_reorged.clone(), blocks[3].clone()],
Default::default(),
None,
)),
};
// Commit notifications, verify that the block cache is updated and the notifications are
// written to WAL.
// First notification (commit block 0, 1)
let file_id = 0;
let committed_notification_1_cache_blocks = (blocks[1].number, file_id);
let committed_notification_1_cache_committed_blocks = vec![
(
blocks[0].hash(),
file_id,
CachedBlock {
block: (blocks[0].number, blocks[0].hash()).into(),
parent_hash: blocks[0].parent_hash,
},
),
(
blocks[1].hash(),
file_id,
CachedBlock {
block: (blocks[1].number, blocks[1].hash()).into(),
parent_hash: blocks[1].parent_hash,
},
),
];
wal.commit(&committed_notification_1)?;
assert_eq!(
wal.inner.block_cache().blocks_sorted(),
[committed_notification_1_cache_blocks]
);
assert_eq!(
wal.inner.block_cache().committed_blocks_sorted(),
committed_notification_1_cache_committed_blocks
);
assert_eq!(read_notifications(&wal)?, vec![committed_notification_1.clone()]);
// Second notification (revert block 1)
wal.commit(&reverted_notification)?;
let file_id = 1;
let reverted_notification_cache_blocks = (blocks[1].number, file_id);
assert_eq!(
wal.inner.block_cache().blocks_sorted(),
[reverted_notification_cache_blocks, committed_notification_1_cache_blocks]
);
assert_eq!(
wal.inner.block_cache().committed_blocks_sorted(),
committed_notification_1_cache_committed_blocks
);
assert_eq!(
read_notifications(&wal)?,
vec![committed_notification_1.clone(), reverted_notification.clone()]
);
// Third notification (commit block 1, 2)
wal.commit(&committed_notification_2)?;
let file_id = 2;
let committed_notification_2_cache_blocks = (blocks[2].number, file_id);
let committed_notification_2_cache_committed_blocks = vec![
(
block_1_reorged.hash(),
file_id,
CachedBlock {
block: (block_1_reorged.number, block_1_reorged.hash()).into(),
parent_hash: block_1_reorged.parent_hash,
},
),
(
blocks[2].hash(),
file_id,
CachedBlock {
block: (blocks[2].number, blocks[2].hash()).into(),
parent_hash: blocks[2].parent_hash,
},
),
];
assert_eq!(
wal.inner.block_cache().blocks_sorted(),
[
committed_notification_2_cache_blocks,
reverted_notification_cache_blocks,
committed_notification_1_cache_blocks,
]
);
assert_eq!(
wal.inner.block_cache().committed_blocks_sorted(),
sort_committed_blocks(
[
committed_notification_1_cache_committed_blocks.clone(),
committed_notification_2_cache_committed_blocks.clone()
]
.concat()
)
);
assert_eq!(
read_notifications(&wal)?,
vec![
committed_notification_1.clone(),
reverted_notification.clone(),
committed_notification_2.clone()
]
);
// Fourth notification (revert block 2, commit block 2, 3)
wal.commit(&reorged_notification)?;
let file_id = 3;
let reorged_notification_cache_blocks = (blocks[3].number, file_id);
let reorged_notification_cache_committed_blocks = vec![
(
block_2_reorged.hash(),
file_id,
CachedBlock {
block: (block_2_reorged.number, block_2_reorged.hash()).into(),
parent_hash: block_2_reorged.parent_hash,
},
),
(
blocks[3].hash(),
file_id,
CachedBlock {
block: (blocks[3].number, blocks[3].hash()).into(),
parent_hash: blocks[3].parent_hash,
},
),
];
assert_eq!(
wal.inner.block_cache().blocks_sorted(),
[
reorged_notification_cache_blocks,
committed_notification_2_cache_blocks,
reverted_notification_cache_blocks,
committed_notification_1_cache_blocks,
]
);
assert_eq!(
wal.inner.block_cache().committed_blocks_sorted(),
sort_committed_blocks(
[
committed_notification_1_cache_committed_blocks,
committed_notification_2_cache_committed_blocks.clone(),
reorged_notification_cache_committed_blocks.clone()
]
.concat()
)
);
assert_eq!(
read_notifications(&wal)?,
vec![
committed_notification_1,
reverted_notification,
committed_notification_2.clone(),
reorged_notification.clone()
]
);
// Now, finalize the WAL up to the block 1. Block 1 was in the third notification that also
// had block 2 committed. In this case, we can't split the notification into two parts, so
// we preserve the whole notification in both the block cache and the storage, and delete
// the notifications before it.
wal.finalize((block_1_reorged.number, block_1_reorged.hash()).into())?;
assert_eq!(
wal.inner.block_cache().blocks_sorted(),
[reorged_notification_cache_blocks, committed_notification_2_cache_blocks]
);
assert_eq!(
wal.inner.block_cache().committed_blocks_sorted(),
sort_committed_blocks(
[
committed_notification_2_cache_committed_blocks.clone(),
reorged_notification_cache_committed_blocks.clone()
]
.concat()
)
);
assert_eq!(
read_notifications(&wal)?,
vec![committed_notification_2.clone(), reorged_notification.clone()]
);
// Re-open the WAL and verify that the cache population works correctly
let wal = Wal::new(&temp_dir)?;
assert_eq!(
wal.inner.block_cache().blocks_sorted(),
[reorged_notification_cache_blocks, committed_notification_2_cache_blocks]
);
assert_eq!(
wal.inner.block_cache().committed_blocks_sorted(),
sort_committed_blocks(
[
committed_notification_2_cache_committed_blocks,
reorged_notification_cache_committed_blocks
]
.concat()
)
);
assert_eq!(read_notifications(&wal)?, vec![committed_notification_2, reorged_notification]);
Ok(())
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/exex/exex/src/wal/metrics.rs | crates/exex/exex/src/wal/metrics.rs | use metrics::Gauge;
use reth_metrics::Metrics;
/// Metrics for the [WAL](`super::Wal`)
#[derive(Metrics)]
#[metrics(scope = "exex.wal")]
pub(super) struct Metrics {
/// Size of all notifications in WAL in bytes
pub size_bytes: Gauge,
/// Number of notifications in WAL
pub notifications_count: Gauge,
/// Number of committed blocks in WAL
pub committed_blocks_count: Gauge,
/// Lowest committed block height in WAL
pub lowest_committed_block_height: Gauge,
/// Highest committed block height in WAL
pub highest_committed_block_height: Gauge,
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/exex/exex/src/wal/cache.rs | crates/exex/exex/src/wal/cache.rs | use std::{
cmp::Reverse,
collections::{BinaryHeap, HashSet},
};
use alloy_consensus::BlockHeader;
use alloy_eips::BlockNumHash;
use alloy_primitives::{map::FbHashMap, BlockNumber, B256};
use reth_exex_types::ExExNotification;
use reth_node_api::NodePrimitives;
/// The block cache of the WAL.
///
/// This cache is needed to avoid walking the WAL directory every time we want to find a
/// notification corresponding to a block or a block corresponding to a hash.
#[derive(Debug, Default)]
pub struct BlockCache {
/// A min heap of `(Block Number, File ID)` tuples.
///
/// Contains one highest block in notification. In a notification with both committed and
/// reverted chain, the highest block is chosen between both chains.
pub(super) notification_max_blocks: BinaryHeap<Reverse<(BlockNumber, u32)>>,
/// A mapping of committed blocks `Block Hash -> Block`.
///
/// For each [`ExExNotification::ChainCommitted`] notification, there will be an entry per
/// block.
pub(super) committed_blocks: FbHashMap<32, (u32, CachedBlock)>,
/// Block height of the lowest committed block currently in the cache.
pub(super) lowest_committed_block_height: Option<BlockNumber>,
/// Block height of the highest committed block currently in the cache.
pub(super) highest_committed_block_height: Option<BlockNumber>,
}
impl BlockCache {
/// Returns `true` if the cache is empty.
pub(super) fn is_empty(&self) -> bool {
self.notification_max_blocks.is_empty()
}
/// Returns the number of blocks in the cache.
pub(super) fn num_blocks(&self) -> usize {
self.committed_blocks.len()
}
/// Removes all files from the cache that has notifications with a tip block less than or equal
/// to the given block number.
///
/// # Returns
///
/// A set of file IDs that were removed.
pub(super) fn remove_before(&mut self, block_number: BlockNumber) -> HashSet<u32> {
let mut file_ids = HashSet::default();
while let Some(block @ Reverse((max_block, file_id))) =
self.notification_max_blocks.peek().copied()
{
if max_block <= block_number {
let popped_block = self.notification_max_blocks.pop().unwrap();
debug_assert_eq!(popped_block, block);
file_ids.insert(file_id);
} else {
break
}
}
let (mut lowest_committed_block_height, mut highest_committed_block_height) = (None, None);
self.committed_blocks.retain(|_, (file_id, block)| {
let retain = !file_ids.contains(file_id);
if retain {
lowest_committed_block_height = Some(
lowest_committed_block_height
.map_or(block.block.number, |lowest| block.block.number.min(lowest)),
);
highest_committed_block_height = Some(
highest_committed_block_height
.map_or(block.block.number, |highest| block.block.number.max(highest)),
);
}
retain
});
self.lowest_committed_block_height = lowest_committed_block_height;
self.highest_committed_block_height = highest_committed_block_height;
file_ids
}
/// Returns the file ID for the notification containing the given committed block hash, if it
/// exists.
pub(super) fn get_file_id_by_committed_block_hash(&self, block_hash: &B256) -> Option<u32> {
self.committed_blocks.get(block_hash).map(|entry| entry.0)
}
/// Inserts the blocks from the notification into the cache with the given file ID.
pub(super) fn insert_notification_blocks_with_file_id<N: NodePrimitives>(
&mut self,
file_id: u32,
notification: &ExExNotification<N>,
) {
let reverted_chain = notification.reverted_chain();
let committed_chain = notification.committed_chain();
let max_block =
reverted_chain.iter().chain(&committed_chain).map(|chain| chain.tip().number()).max();
if let Some(max_block) = max_block {
self.notification_max_blocks.push(Reverse((max_block, file_id)));
}
if let Some(committed_chain) = &committed_chain {
for block in committed_chain.blocks().values() {
let cached_block = CachedBlock {
block: (block.number(), block.hash()).into(),
parent_hash: block.parent_hash(),
};
self.committed_blocks.insert(block.hash(), (file_id, cached_block));
}
self.highest_committed_block_height = Some(committed_chain.tip().number());
}
}
#[cfg(test)]
pub(super) fn blocks_sorted(&self) -> Vec<(BlockNumber, u32)> {
self.notification_max_blocks
.clone()
.into_sorted_vec()
.into_iter()
.map(|entry| entry.0)
.collect()
}
#[cfg(test)]
pub(super) fn committed_blocks_sorted(&self) -> Vec<(B256, u32, CachedBlock)> {
use itertools::Itertools;
self.committed_blocks
.iter()
.map(|(hash, (file_id, block))| (*hash, *file_id, *block))
.sorted_by_key(|(_, _, block)| (block.block.number, block.block.hash))
.collect()
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub(super) struct CachedBlock {
/// The block number and hash of the block.
pub(super) block: BlockNumHash,
/// The hash of the parent block.
pub(super) parent_hash: B256,
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/exex/types/src/lib.rs | crates/exex/types/src/lib.rs | //! Commonly used ExEx types.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
mod finished_height;
mod head;
mod notification;
pub use finished_height::FinishedExExHeight;
pub use head::ExExHead;
pub use notification::ExExNotification;
/// Bincode-compatible serde implementations for commonly used ExEx types.
///
/// `bincode` crate doesn't work with optionally serializable serde fields, but some of the
/// ExEx types require optional serialization for RPC compatibility. This module makes so that
/// all fields are serialized.
///
/// Read more: <https://github.com/bincode-org/bincode/issues/326>
#[cfg(all(feature = "serde", feature = "serde-bincode-compat"))]
pub mod serde_bincode_compat {
pub use super::notification::serde_bincode_compat::*;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/exex/types/src/notification.rs | crates/exex/types/src/notification.rs | use std::sync::Arc;
use reth_chain_state::CanonStateNotification;
use reth_execution_types::Chain;
use reth_primitives_traits::NodePrimitives;
/// Notifications sent to an `ExEx`.
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum ExExNotification<N: NodePrimitives = reth_chain_state::EthPrimitives> {
/// Chain got committed without a reorg, and only the new chain is returned.
ChainCommitted {
/// The new chain after commit.
new: Arc<Chain<N>>,
},
/// Chain got reorged, and both the old and the new chains are returned.
ChainReorged {
/// The old chain before reorg.
old: Arc<Chain<N>>,
/// The new chain after reorg.
new: Arc<Chain<N>>,
},
/// Chain got reverted, and only the old chain is returned.
ChainReverted {
/// The old chain before reversion.
old: Arc<Chain<N>>,
},
}
impl<N: NodePrimitives> ExExNotification<N> {
/// Returns the committed chain from the [`Self::ChainCommitted`] and [`Self::ChainReorged`]
/// variants, if any.
pub fn committed_chain(&self) -> Option<Arc<Chain<N>>> {
match self {
Self::ChainCommitted { new } | Self::ChainReorged { old: _, new } => Some(new.clone()),
Self::ChainReverted { .. } => None,
}
}
/// Returns the reverted chain from the [`Self::ChainReorged`] and [`Self::ChainReverted`]
/// variants, if any.
pub fn reverted_chain(&self) -> Option<Arc<Chain<N>>> {
match self {
Self::ChainReorged { old, new: _ } | Self::ChainReverted { old } => Some(old.clone()),
Self::ChainCommitted { .. } => None,
}
}
/// Converts the notification into a notification that is the inverse of the original one.
///
/// - For [`Self::ChainCommitted`], it's [`Self::ChainReverted`].
/// - For [`Self::ChainReverted`], it's [`Self::ChainCommitted`].
/// - For [`Self::ChainReorged`], it's [`Self::ChainReorged`] with the new chain as the old
/// chain and the old chain as the new chain.
pub fn into_inverted(self) -> Self {
match self {
Self::ChainCommitted { new } => Self::ChainReverted { old: new },
Self::ChainReverted { old } => Self::ChainCommitted { new: old },
Self::ChainReorged { old, new } => Self::ChainReorged { old: new, new: old },
}
}
}
impl<P: NodePrimitives> From<CanonStateNotification<P>> for ExExNotification<P> {
fn from(notification: CanonStateNotification<P>) -> Self {
match notification {
CanonStateNotification::Commit { new } => Self::ChainCommitted { new },
CanonStateNotification::Reorg { old, new } => Self::ChainReorged { old, new },
}
}
}
/// Bincode-compatible [`ExExNotification`] serde implementation.
#[cfg(all(feature = "serde", feature = "serde-bincode-compat"))]
pub(super) mod serde_bincode_compat {
use reth_execution_types::serde_bincode_compat::Chain;
use reth_primitives_traits::NodePrimitives;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use serde_with::{DeserializeAs, SerializeAs};
use std::sync::Arc;
/// Bincode-compatible [`super::ExExNotification`] serde implementation.
///
/// Intended to use with the [`serde_with::serde_as`] macro in the following way:
/// ```rust
/// use reth_exex_types::{serde_bincode_compat, ExExNotification};
/// use reth_primitives_traits::NodePrimitives;
/// use serde::{Deserialize, Serialize};
/// use serde_with::serde_as;
///
/// #[serde_as]
/// #[derive(Serialize, Deserialize)]
/// struct Data<N: NodePrimitives> {
/// #[serde_as(as = "serde_bincode_compat::ExExNotification<'_, N>")]
/// notification: ExExNotification<N>,
/// }
/// ```
#[derive(Debug, Serialize, Deserialize)]
#[expect(missing_docs)]
#[serde(bound = "")]
#[expect(clippy::large_enum_variant)]
pub enum ExExNotification<'a, N>
where
N: NodePrimitives,
{
ChainCommitted { new: Chain<'a, N> },
ChainReorged { old: Chain<'a, N>, new: Chain<'a, N> },
ChainReverted { old: Chain<'a, N> },
}
impl<'a, N> From<&'a super::ExExNotification<N>> for ExExNotification<'a, N>
where
N: NodePrimitives,
{
fn from(value: &'a super::ExExNotification<N>) -> Self {
match value {
super::ExExNotification::ChainCommitted { new } => {
ExExNotification::ChainCommitted { new: Chain::from(new.as_ref()) }
}
super::ExExNotification::ChainReorged { old, new } => {
ExExNotification::ChainReorged {
old: Chain::from(old.as_ref()),
new: Chain::from(new.as_ref()),
}
}
super::ExExNotification::ChainReverted { old } => {
ExExNotification::ChainReverted { old: Chain::from(old.as_ref()) }
}
}
}
}
impl<'a, N> From<ExExNotification<'a, N>> for super::ExExNotification<N>
where
N: NodePrimitives,
{
fn from(value: ExExNotification<'a, N>) -> Self {
match value {
ExExNotification::ChainCommitted { new } => {
Self::ChainCommitted { new: Arc::new(new.into()) }
}
ExExNotification::ChainReorged { old, new } => {
Self::ChainReorged { old: Arc::new(old.into()), new: Arc::new(new.into()) }
}
ExExNotification::ChainReverted { old } => {
Self::ChainReverted { old: Arc::new(old.into()) }
}
}
}
}
impl<N> SerializeAs<super::ExExNotification<N>> for ExExNotification<'_, N>
where
N: NodePrimitives,
{
fn serialize_as<S>(
source: &super::ExExNotification<N>,
serializer: S,
) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
ExExNotification::from(source).serialize(serializer)
}
}
impl<'de, N> DeserializeAs<'de, super::ExExNotification<N>> for ExExNotification<'de, N>
where
N: NodePrimitives,
{
fn deserialize_as<D>(deserializer: D) -> Result<super::ExExNotification<N>, D::Error>
where
D: Deserializer<'de>,
{
ExExNotification::deserialize(deserializer).map(Into::into)
}
}
#[cfg(test)]
mod tests {
use super::super::{serde_bincode_compat, ExExNotification};
use arbitrary::Arbitrary;
use rand::Rng;
use reth_execution_types::Chain;
use reth_primitives_traits::RecoveredBlock;
use serde::{Deserialize, Serialize};
use serde_with::serde_as;
use std::sync::Arc;
#[test]
fn test_exex_notification_bincode_roundtrip() {
#[serde_as]
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)]
struct Data {
#[serde_as(
as = "serde_bincode_compat::ExExNotification<'_, reth_ethereum_primitives::EthPrimitives>"
)]
notification: ExExNotification,
}
let mut bytes = [0u8; 1024];
rand::rng().fill(bytes.as_mut_slice());
let data = Data {
notification: ExExNotification::ChainReorged {
old: Arc::new(Chain::new(
vec![RecoveredBlock::arbitrary(&mut arbitrary::Unstructured::new(&bytes))
.unwrap()],
Default::default(),
None,
)),
new: Arc::new(Chain::new(
vec![RecoveredBlock::arbitrary(&mut arbitrary::Unstructured::new(&bytes))
.unwrap()],
Default::default(),
None,
)),
},
};
let encoded = bincode::serialize(&data).unwrap();
let decoded: Data = bincode::deserialize(&encoded).unwrap();
assert_eq!(decoded, data);
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/exex/types/src/finished_height.rs | crates/exex/types/src/finished_height.rs | use alloy_primitives::BlockNumber;
/// The finished height of all `ExEx`'s.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum FinishedExExHeight {
/// No `ExEx`'s are installed, so there is no finished height.
NoExExs,
/// Not all `ExExs` have emitted a `FinishedHeight` event yet.
NotReady,
/// The finished height of all `ExEx`'s.
///
/// This is the lowest common denominator between all `ExEx`'s.
///
/// This block is used to (amongst other things) determine what blocks are safe to prune.
///
/// The number is inclusive, i.e. all blocks `<= finished_height` are safe to prune.
Height(BlockNumber),
}
impl FinishedExExHeight {
/// Returns `true` if not all `ExExs` have emitted a `FinishedHeight` event yet.
pub const fn is_not_ready(&self) -> bool {
matches!(self, Self::NotReady)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/exex/types/src/head.rs | crates/exex/types/src/head.rs | use alloy_eips::BlockNumHash;
/// A head of the ExEx. It contains the highest host block committed to the
/// internal ExEx state. I.e. the latest block that the ExEx has fully
/// processed.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct ExExHead {
/// The head block.
pub block: BlockNumHash,
}
impl ExExHead {
/// Creates a new instance for the given head block.
pub const fn new(block: BlockNumHash) -> Self {
Self { block }
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/exex/test-utils/src/lib.rs | crates/exex/test-utils/src/lib.rs | //! Test helpers for `reth-exex`
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
use std::{
fmt::Debug,
future::{poll_fn, Future},
sync::Arc,
task::Poll,
};
use alloy_eips::BlockNumHash;
use futures_util::FutureExt;
use reth_chainspec::{ChainSpec, MAINNET};
use reth_consensus::test_utils::TestConsensus;
use reth_db::{
test_utils::{create_test_rw_db, create_test_static_files_dir, TempDatabase},
DatabaseEnv,
};
use reth_db_common::init::init_genesis;
use reth_ethereum_primitives::{EthPrimitives, TransactionSigned};
use reth_evm_ethereum::MockEvmConfig;
use reth_execution_types::Chain;
use reth_exex::{ExExContext, ExExEvent, ExExNotification, ExExNotifications, Wal};
use reth_network::{config::rng_secret_key, NetworkConfigBuilder, NetworkManager};
use reth_node_api::{
FullNodeTypes, FullNodeTypesAdapter, NodePrimitives, NodeTypes, NodeTypesWithDBAdapter,
};
use reth_node_builder::{
components::{
BasicPayloadServiceBuilder, Components, ComponentsBuilder, ConsensusBuilder,
ExecutorBuilder, PoolBuilder,
},
BuilderContext, Node, NodeAdapter, RethFullAdapter,
};
use reth_node_core::node_config::NodeConfig;
use reth_node_ethereum::{
node::{
EthereumAddOns, EthereumEngineValidatorBuilder, EthereumEthApiBuilder,
EthereumNetworkBuilder, EthereumPayloadBuilder,
},
EthEngineTypes,
};
use reth_payload_builder::noop::NoopPayloadBuilderService;
use reth_primitives_traits::{Block as _, RecoveredBlock};
use reth_provider::{
providers::{BlockchainProvider, StaticFileProvider},
BlockReader, EthStorage, ProviderFactory,
};
use reth_tasks::TaskManager;
use reth_transaction_pool::test_utils::{testing_pool, TestPool};
use tempfile::TempDir;
use thiserror::Error;
use tokio::sync::mpsc::{Sender, UnboundedReceiver};
/// A test [`PoolBuilder`] that builds a [`TestPool`].
#[derive(Debug, Default, Clone, Copy)]
#[non_exhaustive]
pub struct TestPoolBuilder;
impl<Node> PoolBuilder<Node> for TestPoolBuilder
where
Node: FullNodeTypes<Types: NodeTypes<Primitives: NodePrimitives<SignedTx = TransactionSigned>>>,
{
type Pool = TestPool;
async fn build_pool(self, _ctx: &BuilderContext<Node>) -> eyre::Result<Self::Pool> {
Ok(testing_pool())
}
}
/// A test [`ExecutorBuilder`] that builds a [`MockEvmConfig`] for testing.
#[derive(Debug, Default, Clone, Copy)]
#[non_exhaustive]
pub struct TestExecutorBuilder;
impl<Node> ExecutorBuilder<Node> for TestExecutorBuilder
where
Node: FullNodeTypes<Types: NodeTypes<ChainSpec = ChainSpec, Primitives = EthPrimitives>>,
{
type EVM = MockEvmConfig;
async fn build_evm(self, _ctx: &BuilderContext<Node>) -> eyre::Result<Self::EVM> {
let evm_config = MockEvmConfig::default();
Ok(evm_config)
}
}
/// A test [`ConsensusBuilder`] that builds a [`TestConsensus`].
#[derive(Debug, Default, Clone, Copy)]
#[non_exhaustive]
pub struct TestConsensusBuilder;
impl<Node> ConsensusBuilder<Node> for TestConsensusBuilder
where
Node: FullNodeTypes,
{
type Consensus = Arc<TestConsensus>;
async fn build_consensus(self, _ctx: &BuilderContext<Node>) -> eyre::Result<Self::Consensus> {
Ok(Arc::new(TestConsensus::default()))
}
}
/// A test [`Node`].
#[derive(Debug, Default, Clone, Copy)]
#[non_exhaustive]
pub struct TestNode;
impl NodeTypes for TestNode {
type Primitives = EthPrimitives;
type ChainSpec = ChainSpec;
type Storage = EthStorage;
type Payload = EthEngineTypes;
}
impl<N> Node<N> for TestNode
where
N: FullNodeTypes<Types = Self>,
{
type ComponentsBuilder = ComponentsBuilder<
N,
TestPoolBuilder,
BasicPayloadServiceBuilder<EthereumPayloadBuilder>,
EthereumNetworkBuilder,
TestExecutorBuilder,
TestConsensusBuilder,
>;
type AddOns =
EthereumAddOns<NodeAdapter<N>, EthereumEthApiBuilder, EthereumEngineValidatorBuilder>;
fn components_builder(&self) -> Self::ComponentsBuilder {
ComponentsBuilder::default()
.node_types::<N>()
.pool(TestPoolBuilder::default())
.executor(TestExecutorBuilder::default())
.payload(BasicPayloadServiceBuilder::default())
.network(EthereumNetworkBuilder::default())
.consensus(TestConsensusBuilder::default())
}
fn add_ons(&self) -> Self::AddOns {
EthereumAddOns::default()
}
}
/// A shared [`TempDatabase`] used for testing
pub type TmpDB = Arc<TempDatabase<DatabaseEnv>>;
/// The [`NodeAdapter`] for the [`TestExExContext`]. Contains type necessary to
/// boot the testing environment
pub type Adapter = NodeAdapter<RethFullAdapter<TmpDB, TestNode>>;
/// An [`ExExContext`] using the [`Adapter`] type.
pub type TestExExContext = ExExContext<Adapter>;
/// A helper type for testing Execution Extensions.
#[derive(Debug)]
pub struct TestExExHandle {
/// Genesis block that was inserted into the storage
pub genesis: RecoveredBlock<reth_ethereum_primitives::Block>,
/// Provider Factory for accessing the emphemeral storage of the host node
pub provider_factory: ProviderFactory<NodeTypesWithDBAdapter<TestNode, TmpDB>>,
/// Channel for receiving events from the Execution Extension
pub events_rx: UnboundedReceiver<ExExEvent>,
/// Channel for sending notifications to the Execution Extension
pub notifications_tx: Sender<ExExNotification>,
/// Node task manager
pub tasks: TaskManager,
/// WAL temp directory handle
_wal_directory: TempDir,
}
impl TestExExHandle {
/// Send a notification to the Execution Extension that the chain has been committed
pub async fn send_notification_chain_committed(&self, chain: Chain) -> eyre::Result<()> {
self.notifications_tx
.send(ExExNotification::ChainCommitted { new: Arc::new(chain) })
.await?;
Ok(())
}
/// Send a notification to the Execution Extension that the chain has been reorged
pub async fn send_notification_chain_reorged(
&self,
old: Chain,
new: Chain,
) -> eyre::Result<()> {
self.notifications_tx
.send(ExExNotification::ChainReorged { old: Arc::new(old), new: Arc::new(new) })
.await?;
Ok(())
}
/// Send a notification to the Execution Extension that the chain has been reverted
pub async fn send_notification_chain_reverted(&self, chain: Chain) -> eyre::Result<()> {
self.notifications_tx
.send(ExExNotification::ChainReverted { old: Arc::new(chain) })
.await?;
Ok(())
}
/// Asserts that the Execution Extension did not emit any events.
#[track_caller]
pub fn assert_events_empty(&self) {
assert!(self.events_rx.is_empty());
}
/// Asserts that the Execution Extension emitted a `FinishedHeight` event with the correct
/// height.
#[track_caller]
pub fn assert_event_finished_height(&mut self, height: BlockNumHash) -> eyre::Result<()> {
let event = self.events_rx.try_recv()?;
assert_eq!(event, ExExEvent::FinishedHeight(height));
Ok(())
}
}
/// Creates a new [`ExExContext`].
///
/// This is a convenience function that does the following:
/// 1. Sets up an [`ExExContext`] with all dependencies.
/// 2. Inserts the genesis block of the provided (chain spec)[`ChainSpec`] into the storage.
/// 3. Creates a channel for receiving events from the Execution Extension.
/// 4. Creates a channel for sending notifications to the Execution Extension.
///
/// # Warning
/// The genesis block is not sent to the notifications channel. The caller is responsible for
/// doing this.
pub async fn test_exex_context_with_chain_spec(
chain_spec: Arc<ChainSpec>,
) -> eyre::Result<(ExExContext<Adapter>, TestExExHandle)> {
let transaction_pool = testing_pool();
let evm_config = MockEvmConfig::default();
let consensus = Arc::new(TestConsensus::default());
let (static_dir, _) = create_test_static_files_dir();
let db = create_test_rw_db();
let provider_factory = ProviderFactory::<NodeTypesWithDBAdapter<TestNode, _>>::new(
db,
chain_spec.clone(),
StaticFileProvider::read_write(static_dir.keep()).expect("static file provider"),
);
let genesis_hash = init_genesis(&provider_factory)?;
let provider = BlockchainProvider::new(provider_factory.clone())?;
let network_manager = NetworkManager::new(
NetworkConfigBuilder::new(rng_secret_key())
.with_unused_discovery_port()
.with_unused_listener_port()
.build(provider_factory.clone()),
)
.await?;
let network = network_manager.handle().clone();
let tasks = TaskManager::current();
let task_executor = tasks.executor();
tasks.executor().spawn(network_manager);
let (_, payload_builder_handle) = NoopPayloadBuilderService::<EthEngineTypes>::new();
let components = NodeAdapter::<FullNodeTypesAdapter<_, _, _>, _> {
components: Components {
transaction_pool,
evm_config,
consensus,
network,
payload_builder_handle,
},
task_executor,
provider,
};
let genesis = provider_factory
.block_by_hash(genesis_hash)?
.ok_or_else(|| eyre::eyre!("genesis block not found"))?
.seal_slow()
.try_recover()?;
let head = genesis.num_hash();
let wal_directory = tempfile::tempdir()?;
let wal = Wal::new(wal_directory.path())?;
let (events_tx, events_rx) = tokio::sync::mpsc::unbounded_channel();
let (notifications_tx, notifications_rx) = tokio::sync::mpsc::channel(1);
let notifications = ExExNotifications::new(
head,
components.provider.clone(),
components.components.evm_config.clone(),
notifications_rx,
wal.handle(),
);
let ctx = ExExContext {
head,
config: NodeConfig::test(),
reth_config: reth_config::Config::default(),
events: events_tx,
notifications,
components,
};
Ok((
ctx,
TestExExHandle {
genesis,
provider_factory,
events_rx,
notifications_tx,
tasks,
_wal_directory: wal_directory,
},
))
}
/// Creates a new [`ExExContext`] with (mainnet)[`MAINNET`] chain spec.
///
/// For more information see [`test_exex_context_with_chain_spec`].
pub async fn test_exex_context() -> eyre::Result<(ExExContext<Adapter>, TestExExHandle)> {
test_exex_context_with_chain_spec(MAINNET.clone()).await
}
/// An extension trait for polling an Execution Extension future.
pub trait PollOnce {
/// Polls the given Execution Extension future __once__. The future should be
/// (pinned)[`std::pin::pin`].
///
/// # Returns
/// - `Ok(())` if the future returned [`Poll::Pending`]. The future can be polled again.
/// - `Err(PollOnceError::FutureIsReady)` if the future returned [`Poll::Ready`] without an
/// error. The future should never resolve.
/// - `Err(PollOnceError::FutureError(err))` if the future returned [`Poll::Ready`] with an
/// error. Something went wrong.
fn poll_once(&mut self) -> impl Future<Output = Result<(), PollOnceError>> + Send;
}
/// An Execution Extension future polling error.
#[derive(Error, Debug)]
pub enum PollOnceError {
/// The future returned [`Poll::Ready`] without an error, but it should never resolve.
#[error("Execution Extension future returned Ready, but it should never resolve")]
FutureIsReady,
/// The future returned [`Poll::Ready`] with an error.
#[error(transparent)]
FutureError(#[from] eyre::Error),
}
impl<F: Future<Output = eyre::Result<()>> + Unpin + Send> PollOnce for F {
async fn poll_once(&mut self) -> Result<(), PollOnceError> {
poll_fn(|cx| match self.poll_unpin(cx) {
Poll::Ready(Ok(())) => Poll::Ready(Err(PollOnceError::FutureIsReady)),
Poll::Ready(Err(err)) => Poll::Ready(Err(PollOnceError::FutureError(err))),
Poll::Pending => Poll::Ready(Ok(())),
})
.await
}
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn check_test_context_creation() {
let _ = test_exex_context().await.unwrap();
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-engine-api/src/lib.rs | crates/rpc/rpc-engine-api/src/lib.rs | //! The implementation of Engine API.
//! [Read more](https://github.com/ethereum/execution-apis/tree/main/src/engine).
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
/// The Engine API implementation.
mod engine_api;
/// Engine API capabilities.
pub mod capabilities;
pub use capabilities::EngineCapabilities;
/// Engine API error.
mod error;
/// Engine API metrics.
mod metrics;
pub use engine_api::{EngineApi, EngineApiSender};
pub use error::*;
// re-export server trait for convenience
pub use reth_rpc_api::EngineApiServer;
#[cfg(test)]
mod tests {
// silence unused import warning
use alloy_rlp as _;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-engine-api/src/error.rs | crates/rpc/rpc-engine-api/src/error.rs | use alloy_primitives::{B256, U256};
use alloy_rpc_types_engine::{
ForkchoiceUpdateError, INVALID_FORK_CHOICE_STATE_ERROR, INVALID_FORK_CHOICE_STATE_ERROR_MSG,
INVALID_PAYLOAD_ATTRIBUTES_ERROR, INVALID_PAYLOAD_ATTRIBUTES_ERROR_MSG,
};
use jsonrpsee_types::error::{
INTERNAL_ERROR_CODE, INVALID_PARAMS_CODE, INVALID_PARAMS_MSG, SERVER_ERROR_MSG,
};
use reth_engine_primitives::{BeaconForkChoiceUpdateError, BeaconOnNewPayloadError};
use reth_payload_builder_primitives::PayloadBuilderError;
use reth_payload_primitives::EngineObjectValidationError;
use thiserror::Error;
/// The Engine API result type
pub type EngineApiResult<Ok> = Result<Ok, EngineApiError>;
/// Invalid payload attributes code.
pub const INVALID_PAYLOAD_ATTRIBUTES: i32 = -38003;
/// Payload unsupported fork code.
pub const UNSUPPORTED_FORK_CODE: i32 = -38005;
/// Payload unknown error code.
pub const UNKNOWN_PAYLOAD_CODE: i32 = -38001;
/// Request too large error code.
pub const REQUEST_TOO_LARGE_CODE: i32 = -38004;
/// Error message for the request too large error.
const REQUEST_TOO_LARGE_MESSAGE: &str = "Too large request";
/// Error message for the request too large error.
const INVALID_PAYLOAD_ATTRIBUTES_MSG: &str = "Invalid payload attributes";
/// Error returned by [`EngineApi`][crate::EngineApi]
///
/// Note: This is a high-fidelity error type which can be converted to an RPC error that adheres to
/// the [Engine API spec](https://github.com/ethereum/execution-apis/blob/main/src/engine/common.md#errors).
#[derive(Error, Debug)]
pub enum EngineApiError {
// **IMPORTANT**: keep error messages in sync with the Engine API spec linked above.
/// Payload does not exist / is not available.
#[error("Unknown payload")]
UnknownPayload,
/// The payload body request length is too large.
#[error("requested count too large: {len}")]
PayloadRequestTooLarge {
/// The length that was requested.
len: u64,
},
/// Too many requested versioned hashes for blobs request
#[error("requested blob count too large: {len}")]
BlobRequestTooLarge {
/// The length that was requested.
len: usize,
},
/// Thrown if `engine_getPayloadBodiesByRangeV1` contains an invalid range
#[error("invalid start ({start}) or count ({count})")]
InvalidBodiesRange {
/// Start of the range
start: u64,
/// Requested number of items
count: u64,
},
/// Terminal total difficulty mismatch during transition configuration exchange.
#[error(
"invalid transition terminal total difficulty: \
execution: {execution}, consensus: {consensus}"
)]
TerminalTD {
/// Execution terminal total difficulty value.
execution: U256,
/// Consensus terminal total difficulty value.
consensus: U256,
},
/// Terminal block hash mismatch during transition configuration exchange.
#[error(
"invalid transition terminal block hash: \
execution: {execution:?}, consensus: {consensus}"
)]
TerminalBlockHash {
/// Execution terminal block hash. `None` if block number is not found in the database.
execution: Option<B256>,
/// Consensus terminal block hash.
consensus: B256,
},
/// An error occurred while processing the fork choice update in the beacon consensus engine.
#[error(transparent)]
ForkChoiceUpdate(#[from] BeaconForkChoiceUpdateError),
/// An error occurred while processing a new payload in the beacon consensus engine.
#[error(transparent)]
NewPayload(#[from] BeaconOnNewPayloadError),
/// Encountered an internal error.
#[error(transparent)]
Internal(#[from] Box<dyn core::error::Error + Send + Sync>),
/// Fetching the payload failed
#[error(transparent)]
GetPayloadError(#[from] PayloadBuilderError),
/// The payload or attributes are known to be malformed before processing.
#[error(transparent)]
EngineObjectValidationError(#[from] EngineObjectValidationError),
/// Requests hash provided, but can't be accepted by the API.
#[error("requests hash cannot be accepted by the API without `--engine.accept-execution-requests-hash` flag")]
UnexpectedRequestsHash,
/// Any other rpc error
#[error("{0}")]
Other(jsonrpsee_types::ErrorObject<'static>),
}
impl EngineApiError {
/// Crates a new [`EngineApiError::Other`] variant.
pub const fn other(err: jsonrpsee_types::ErrorObject<'static>) -> Self {
Self::Other(err)
}
}
/// Helper type to represent the `error` field in the error response:
/// <https://github.com/ethereum/execution-apis/blob/main/src/engine/common.md#errors>
#[derive(serde::Serialize)]
struct ErrorData {
err: String,
}
impl ErrorData {
#[inline]
fn new(err: impl std::fmt::Display) -> Self {
Self { err: err.to_string() }
}
}
impl From<EngineApiError> for jsonrpsee_types::error::ErrorObject<'static> {
fn from(error: EngineApiError) -> Self {
match error {
EngineApiError::InvalidBodiesRange { .. } |
EngineApiError::EngineObjectValidationError(
EngineObjectValidationError::Payload(_) |
EngineObjectValidationError::InvalidParams(_),
) |
EngineApiError::UnexpectedRequestsHash => {
// Note: the data field is not required by the spec, but is also included by other
// clients
jsonrpsee_types::error::ErrorObject::owned(
INVALID_PARAMS_CODE,
INVALID_PARAMS_MSG,
Some(ErrorData::new(error)),
)
}
EngineApiError::EngineObjectValidationError(
EngineObjectValidationError::PayloadAttributes(_),
) => {
// Note: the data field is not required by the spec, but is also included by other
// clients
jsonrpsee_types::error::ErrorObject::owned(
INVALID_PAYLOAD_ATTRIBUTES,
INVALID_PAYLOAD_ATTRIBUTES_MSG,
Some(ErrorData::new(error)),
)
}
EngineApiError::UnknownPayload => jsonrpsee_types::error::ErrorObject::owned(
UNKNOWN_PAYLOAD_CODE,
error.to_string(),
None::<()>,
),
EngineApiError::PayloadRequestTooLarge { .. } |
EngineApiError::BlobRequestTooLarge { .. } => {
jsonrpsee_types::error::ErrorObject::owned(
REQUEST_TOO_LARGE_CODE,
REQUEST_TOO_LARGE_MESSAGE,
Some(ErrorData::new(error)),
)
}
EngineApiError::EngineObjectValidationError(
EngineObjectValidationError::UnsupportedFork,
) => jsonrpsee_types::error::ErrorObject::owned(
UNSUPPORTED_FORK_CODE,
error.to_string(),
None::<()>,
),
// Error responses from the consensus engine
EngineApiError::ForkChoiceUpdate(ref err) => match err {
BeaconForkChoiceUpdateError::ForkchoiceUpdateError(err) => match err {
ForkchoiceUpdateError::UpdatedInvalidPayloadAttributes => {
jsonrpsee_types::error::ErrorObject::owned(
INVALID_PAYLOAD_ATTRIBUTES_ERROR,
INVALID_PAYLOAD_ATTRIBUTES_ERROR_MSG,
None::<()>,
)
}
ForkchoiceUpdateError::InvalidState |
ForkchoiceUpdateError::UnknownFinalBlock => {
jsonrpsee_types::error::ErrorObject::owned(
INVALID_FORK_CHOICE_STATE_ERROR,
INVALID_FORK_CHOICE_STATE_ERROR_MSG,
None::<()>,
)
}
},
BeaconForkChoiceUpdateError::EngineUnavailable |
BeaconForkChoiceUpdateError::Internal(_) => {
jsonrpsee_types::error::ErrorObject::owned(
INTERNAL_ERROR_CODE,
SERVER_ERROR_MSG,
Some(ErrorData::new(error)),
)
}
},
// Any other server error
EngineApiError::TerminalTD { .. } |
EngineApiError::TerminalBlockHash { .. } |
EngineApiError::NewPayload(_) |
EngineApiError::Internal(_) |
EngineApiError::GetPayloadError(_) => jsonrpsee_types::error::ErrorObject::owned(
INTERNAL_ERROR_CODE,
SERVER_ERROR_MSG,
Some(ErrorData::new(error)),
),
EngineApiError::Other(err) => err,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_rpc_types_engine::ForkchoiceUpdateError;
#[track_caller]
fn ensure_engine_rpc_error(
code: i32,
message: &str,
err: impl Into<jsonrpsee_types::error::ErrorObject<'static>>,
) {
let err = err.into();
assert_eq!(err.code(), code);
assert_eq!(err.message(), message);
}
// Tests that engine errors are formatted correctly according to the engine API spec
// <https://github.com/ethereum/execution-apis/blob/main/src/engine/common.md#errors>
#[test]
fn engine_error_rpc_error_test() {
ensure_engine_rpc_error(
UNSUPPORTED_FORK_CODE,
"Unsupported fork",
EngineApiError::EngineObjectValidationError(
EngineObjectValidationError::UnsupportedFork,
),
);
ensure_engine_rpc_error(
REQUEST_TOO_LARGE_CODE,
"Too large request",
EngineApiError::PayloadRequestTooLarge { len: 0 },
);
ensure_engine_rpc_error(
-38002,
"Invalid forkchoice state",
EngineApiError::ForkChoiceUpdate(BeaconForkChoiceUpdateError::ForkchoiceUpdateError(
ForkchoiceUpdateError::InvalidState,
)),
);
ensure_engine_rpc_error(
-38003,
"Invalid payload attributes",
EngineApiError::ForkChoiceUpdate(BeaconForkChoiceUpdateError::ForkchoiceUpdateError(
ForkchoiceUpdateError::UpdatedInvalidPayloadAttributes,
)),
);
ensure_engine_rpc_error(
UNKNOWN_PAYLOAD_CODE,
"Unknown payload",
EngineApiError::UnknownPayload,
);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-engine-api/src/metrics.rs | crates/rpc/rpc-engine-api/src/metrics.rs | use std::time::Duration;
use crate::EngineApiError;
use alloy_rpc_types_engine::{ForkchoiceUpdated, PayloadStatus, PayloadStatusEnum};
use metrics::{Counter, Gauge, Histogram};
use reth_metrics::Metrics;
/// All beacon consensus engine metrics
#[derive(Default)]
pub(crate) struct EngineApiMetrics {
/// Engine API latency metrics
pub(crate) latency: EngineApiLatencyMetrics,
/// Engine API forkchoiceUpdated response type metrics
pub(crate) fcu_response: ForkchoiceUpdatedResponseMetrics,
/// Engine API newPayload response type metrics
pub(crate) new_payload_response: NewPayloadStatusResponseMetrics,
/// Blob-related metrics
pub(crate) blob_metrics: BlobMetrics,
}
/// Beacon consensus engine latency metrics.
#[derive(Metrics)]
#[metrics(scope = "engine.rpc")]
pub(crate) struct EngineApiLatencyMetrics {
/// Latency for `engine_newPayloadV1`
pub(crate) new_payload_v1: Histogram,
/// Latency for `engine_newPayloadV2`
pub(crate) new_payload_v2: Histogram,
/// Latency for `engine_newPayloadV3`
pub(crate) new_payload_v3: Histogram,
/// Latency for `engine_newPayloadV4`
pub(crate) new_payload_v4: Histogram,
/// Latency for `engine_forkchoiceUpdatedV1`
pub(crate) fork_choice_updated_v1: Histogram,
/// Latency for `engine_forkchoiceUpdatedV2`
pub(crate) fork_choice_updated_v2: Histogram,
/// Latency for `engine_forkchoiceUpdatedV3`
pub(crate) fork_choice_updated_v3: Histogram,
/// Time diff between `engine_newPayloadV*` and the next FCU
pub(crate) new_payload_forkchoice_updated_time_diff: Histogram,
/// Latency for `engine_getPayloadV1`
pub(crate) get_payload_v1: Histogram,
/// Latency for `engine_getPayloadV2`
pub(crate) get_payload_v2: Histogram,
/// Latency for `engine_getPayloadV3`
pub(crate) get_payload_v3: Histogram,
/// Latency for `engine_getPayloadV4`
pub(crate) get_payload_v4: Histogram,
/// Latency for `engine_getPayloadV5`
pub(crate) get_payload_v5: Histogram,
/// Latency for `engine_getPayloadBodiesByRangeV1`
pub(crate) get_payload_bodies_by_range_v1: Histogram,
/// Latency for `engine_getPayloadBodiesByHashV1`
pub(crate) get_payload_bodies_by_hash_v1: Histogram,
/// Latency for `engine_getBlobsV1`
pub(crate) get_blobs_v1: Histogram,
/// Latency for `engine_getBlobsV2`
pub(crate) get_blobs_v2: Histogram,
}
/// Metrics for engine API forkchoiceUpdated responses.
#[derive(Metrics)]
#[metrics(scope = "engine.rpc")]
pub(crate) struct ForkchoiceUpdatedResponseMetrics {
/// The total count of forkchoice updated messages received.
pub(crate) forkchoice_updated_messages: Counter,
/// The total count of forkchoice updated messages that we responded to with
/// [`Invalid`](alloy_rpc_types_engine::PayloadStatusEnum#Invalid).
pub(crate) forkchoice_updated_invalid: Counter,
/// The total count of forkchoice updated messages that we responded to with
/// [`Valid`](alloy_rpc_types_engine::PayloadStatusEnum#Valid).
pub(crate) forkchoice_updated_valid: Counter,
/// The total count of forkchoice updated messages that we responded to with
/// [`Syncing`](alloy_rpc_types_engine::PayloadStatusEnum#Syncing).
pub(crate) forkchoice_updated_syncing: Counter,
/// The total count of forkchoice updated messages that we responded to with
/// [`Accepted`](alloy_rpc_types_engine::PayloadStatusEnum#Accepted).
pub(crate) forkchoice_updated_accepted: Counter,
/// The total count of forkchoice updated messages that were unsuccessful, i.e. we responded
/// with an error type that is not a [`PayloadStatusEnum`].
pub(crate) forkchoice_updated_error: Counter,
}
/// Metrics for engine API newPayload responses.
#[derive(Metrics)]
#[metrics(scope = "engine.rpc")]
pub(crate) struct NewPayloadStatusResponseMetrics {
/// The total count of new payload messages received.
pub(crate) new_payload_messages: Counter,
/// The total count of new payload messages that we responded to with
/// [Invalid](alloy_rpc_types_engine::PayloadStatusEnum#Invalid).
pub(crate) new_payload_invalid: Counter,
/// The total count of new payload messages that we responded to with
/// [Valid](alloy_rpc_types_engine::PayloadStatusEnum#Valid).
pub(crate) new_payload_valid: Counter,
/// The total count of new payload messages that we responded to with
/// [Syncing](alloy_rpc_types_engine::PayloadStatusEnum#Syncing).
pub(crate) new_payload_syncing: Counter,
/// The total count of new payload messages that we responded to with
/// [Accepted](alloy_rpc_types_engine::PayloadStatusEnum#Accepted).
pub(crate) new_payload_accepted: Counter,
/// The total count of new payload messages that were unsuccessful, i.e. we responded with an
/// error type that is not a [`PayloadStatusEnum`].
pub(crate) new_payload_error: Counter,
/// The total gas of valid new payload messages received.
pub(crate) new_payload_total_gas: Histogram,
/// The gas per second of valid new payload messages received.
pub(crate) new_payload_gas_per_second: Histogram,
/// Latency for the last `engine_newPayloadV*` call
pub(crate) new_payload_last: Gauge,
}
#[derive(Metrics)]
#[metrics(scope = "engine.rpc.blobs")]
pub(crate) struct BlobMetrics {
/// Count of blobs successfully retrieved
pub(crate) blob_count: Counter,
/// Count of blob misses
pub(crate) blob_misses: Counter,
/// Number of blobs requested via getBlobsV2
pub(crate) get_blobs_requests_blobs_total: Counter,
/// Number of blobs requested via getBlobsV2 that are present in the blobpool
pub(crate) get_blobs_requests_blobs_in_blobpool_total: Counter,
/// Number of times getBlobsV2 responded with “hit”
pub(crate) get_blobs_requests_success_total: Counter,
/// Number of times getBlobsV2 responded with “miss”
pub(crate) get_blobs_requests_failure_total: Counter,
}
impl NewPayloadStatusResponseMetrics {
/// Increment the newPayload counter based on the given rpc result
pub(crate) fn update_response_metrics(
&self,
result: &Result<PayloadStatus, EngineApiError>,
gas_used: u64,
time: Duration,
) {
self.new_payload_last.set(time);
match result {
Ok(status) => match status.status {
PayloadStatusEnum::Valid => {
self.new_payload_valid.increment(1);
self.new_payload_total_gas.record(gas_used as f64);
self.new_payload_gas_per_second.record(gas_used as f64 / time.as_secs_f64());
}
PayloadStatusEnum::Syncing => self.new_payload_syncing.increment(1),
PayloadStatusEnum::Accepted => self.new_payload_accepted.increment(1),
PayloadStatusEnum::Invalid { .. } => self.new_payload_invalid.increment(1),
},
Err(_) => self.new_payload_error.increment(1),
}
self.new_payload_messages.increment(1);
}
}
impl ForkchoiceUpdatedResponseMetrics {
/// Increment the forkchoiceUpdated counter based on the given rpc result
pub(crate) fn update_response_metrics(
&self,
result: &Result<ForkchoiceUpdated, EngineApiError>,
) {
match result {
Ok(status) => match status.payload_status.status {
PayloadStatusEnum::Valid => self.forkchoice_updated_valid.increment(1),
PayloadStatusEnum::Syncing => self.forkchoice_updated_syncing.increment(1),
PayloadStatusEnum::Accepted => self.forkchoice_updated_accepted.increment(1),
PayloadStatusEnum::Invalid { .. } => self.forkchoice_updated_invalid.increment(1),
},
Err(_) => self.forkchoice_updated_error.increment(1),
}
self.forkchoice_updated_messages.increment(1);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-engine-api/src/capabilities.rs | crates/rpc/rpc-engine-api/src/capabilities.rs | use std::collections::HashSet;
/// The list of all supported Engine capabilities available over the engine endpoint.
pub const CAPABILITIES: &[&str] = &[
"engine_forkchoiceUpdatedV1",
"engine_forkchoiceUpdatedV2",
"engine_forkchoiceUpdatedV3",
"engine_getClientVersionV1",
"engine_getPayloadV1",
"engine_getPayloadV2",
"engine_getPayloadV3",
"engine_getPayloadV4",
"engine_getPayloadV5",
"engine_newPayloadV1",
"engine_newPayloadV2",
"engine_newPayloadV3",
"engine_newPayloadV4",
"engine_getPayloadBodiesByHashV1",
"engine_getPayloadBodiesByRangeV1",
"engine_getBlobsV1",
"engine_getBlobsV2",
];
// The list of all supported Engine capabilities available over the engine endpoint.
///
/// Latest spec: Prague
#[derive(Debug, Clone)]
pub struct EngineCapabilities {
inner: HashSet<String>,
}
impl EngineCapabilities {
/// Creates a new `EngineCapabilities` instance with the given capabilities.
pub fn new(capabilities: impl IntoIterator<Item: Into<String>>) -> Self {
Self { inner: capabilities.into_iter().map(Into::into).collect() }
}
/// Returns the list of all supported Engine capabilities for Prague spec.
fn prague() -> Self {
Self { inner: CAPABILITIES.iter().copied().map(str::to_owned).collect() }
}
/// Returns the list of all supported Engine capabilities.
pub fn list(&self) -> Vec<String> {
self.inner.iter().cloned().collect()
}
/// Inserts a new capability.
pub fn add_capability(&mut self, capability: impl Into<String>) {
self.inner.insert(capability.into());
}
/// Removes a capability.
pub fn remove_capability(&mut self, capability: &str) -> Option<String> {
self.inner.take(capability)
}
}
impl Default for EngineCapabilities {
fn default() -> Self {
Self::prague()
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-engine-api/src/engine_api.rs | crates/rpc/rpc-engine-api/src/engine_api.rs | use crate::{
capabilities::EngineCapabilities, metrics::EngineApiMetrics, EngineApiError, EngineApiResult,
};
use alloy_eips::{
eip1898::BlockHashOrNumber,
eip4844::{BlobAndProofV1, BlobAndProofV2},
eip4895::Withdrawals,
eip7685::RequestsOrHash,
};
use alloy_primitives::{BlockHash, BlockNumber, B256, U64};
use alloy_rpc_types_engine::{
CancunPayloadFields, ClientVersionV1, ExecutionData, ExecutionPayloadBodiesV1,
ExecutionPayloadBodyV1, ExecutionPayloadInputV2, ExecutionPayloadSidecar, ExecutionPayloadV1,
ExecutionPayloadV3, ForkchoiceState, ForkchoiceUpdated, PayloadId, PayloadStatus,
PraguePayloadFields,
};
use async_trait::async_trait;
use jsonrpsee_core::{server::RpcModule, RpcResult};
use parking_lot::Mutex;
use reth_chainspec::EthereumHardforks;
use reth_engine_primitives::{ConsensusEngineHandle, EngineApiValidator, EngineTypes};
use reth_payload_builder::PayloadStore;
use reth_payload_primitives::{
validate_payload_timestamp, EngineApiMessageVersion, ExecutionPayload, PayloadOrAttributes,
PayloadTypes,
};
use reth_primitives_traits::{Block, BlockBody};
use reth_rpc_api::{EngineApiServer, IntoEngineApiRpcModule};
use reth_storage_api::{BlockReader, HeaderProvider, StateProviderFactory};
use reth_tasks::TaskSpawner;
use reth_transaction_pool::TransactionPool;
use std::{sync::Arc, time::Instant};
use tokio::sync::oneshot;
use tracing::{debug, trace, warn};
/// The Engine API response sender.
pub type EngineApiSender<Ok> = oneshot::Sender<EngineApiResult<Ok>>;
/// The upper limit for payload bodies request.
const MAX_PAYLOAD_BODIES_LIMIT: u64 = 1024;
/// The upper limit for blobs in `engine_getBlobsVx`.
const MAX_BLOB_LIMIT: usize = 128;
/// The Engine API implementation that grants the Consensus layer access to data and
/// functions in the Execution layer that are crucial for the consensus process.
///
/// This type is generic over [`EngineTypes`] and intended to be used as the entrypoint for engine
/// API processing. It can be reused by other non L1 engine APIs that deviate from the L1 spec but
/// are still follow the engine API model.
///
/// ## Implementers
///
/// Implementing support for an engine API jsonrpsee RPC handler is done by defining the engine API
/// server trait and implementing it on a type that can either wrap this [`EngineApi`] type or
/// use a custom [`EngineTypes`] implementation if it mirrors ethereum's versioned engine API
/// endpoints (e.g. opstack).
/// See also [`EngineApiServer`] implementation for this type which is the
/// L1 implementation.
pub struct EngineApi<Provider, PayloadT: PayloadTypes, Pool, Validator, ChainSpec> {
inner: Arc<EngineApiInner<Provider, PayloadT, Pool, Validator, ChainSpec>>,
}
impl<Provider, PayloadT: PayloadTypes, Pool, Validator, ChainSpec>
EngineApi<Provider, PayloadT, Pool, Validator, ChainSpec>
{
/// Returns the configured chainspec.
pub fn chain_spec(&self) -> &Arc<ChainSpec> {
&self.inner.chain_spec
}
}
impl<Provider, PayloadT, Pool, Validator, ChainSpec>
EngineApi<Provider, PayloadT, Pool, Validator, ChainSpec>
where
Provider: HeaderProvider + BlockReader + StateProviderFactory + 'static,
PayloadT: PayloadTypes,
Pool: TransactionPool + 'static,
Validator: EngineApiValidator<PayloadT>,
ChainSpec: EthereumHardforks + Send + Sync + 'static,
{
/// Create new instance of [`EngineApi`].
#[expect(clippy::too_many_arguments)]
pub fn new(
provider: Provider,
chain_spec: Arc<ChainSpec>,
beacon_consensus: ConsensusEngineHandle<PayloadT>,
payload_store: PayloadStore<PayloadT>,
tx_pool: Pool,
task_spawner: Box<dyn TaskSpawner>,
client: ClientVersionV1,
capabilities: EngineCapabilities,
validator: Validator,
accept_execution_requests_hash: bool,
) -> Self {
let inner = Arc::new(EngineApiInner {
provider,
chain_spec,
beacon_consensus,
payload_store,
task_spawner,
metrics: EngineApiMetrics::default(),
client,
capabilities,
tx_pool,
validator,
latest_new_payload_response: Mutex::new(None),
accept_execution_requests_hash,
});
Self { inner }
}
/// Fetches the client version.
pub fn get_client_version_v1(
&self,
_client: ClientVersionV1,
) -> EngineApiResult<Vec<ClientVersionV1>> {
Ok(vec![self.inner.client.clone()])
}
/// Fetches the timestamp of the payload with the given id.
async fn get_payload_timestamp(&self, payload_id: PayloadId) -> EngineApiResult<u64> {
Ok(self
.inner
.payload_store
.payload_timestamp(payload_id)
.await
.ok_or(EngineApiError::UnknownPayload)??)
}
/// See also <https://github.com/ethereum/execution-apis/blob/3d627c95a4d3510a8187dd02e0250ecb4331d27e/src/engine/paris.md#engine_newpayloadv1>
/// Caution: This should not accept the `withdrawals` field
pub async fn new_payload_v1(
&self,
payload: PayloadT::ExecutionData,
) -> EngineApiResult<PayloadStatus> {
let payload_or_attrs = PayloadOrAttributes::<
'_,
PayloadT::ExecutionData,
PayloadT::PayloadAttributes,
>::from_execution_payload(&payload);
self.inner
.validator
.validate_version_specific_fields(EngineApiMessageVersion::V1, payload_or_attrs)?;
Ok(self
.inner
.beacon_consensus
.new_payload(payload)
.await
.inspect(|_| self.inner.on_new_payload_response())?)
}
/// Metered version of `new_payload_v1`.
pub async fn new_payload_v1_metered(
&self,
payload: PayloadT::ExecutionData,
) -> EngineApiResult<PayloadStatus> {
let start = Instant::now();
let gas_used = payload.gas_used();
let res = Self::new_payload_v1(self, payload).await;
let elapsed = start.elapsed();
self.inner.metrics.latency.new_payload_v1.record(elapsed);
self.inner.metrics.new_payload_response.update_response_metrics(&res, gas_used, elapsed);
res
}
/// See also <https://github.com/ethereum/execution-apis/blob/584905270d8ad665718058060267061ecfd79ca5/src/engine/shanghai.md#engine_newpayloadv2>
pub async fn new_payload_v2(
&self,
payload: PayloadT::ExecutionData,
) -> EngineApiResult<PayloadStatus> {
let payload_or_attrs = PayloadOrAttributes::<
'_,
PayloadT::ExecutionData,
PayloadT::PayloadAttributes,
>::from_execution_payload(&payload);
self.inner
.validator
.validate_version_specific_fields(EngineApiMessageVersion::V2, payload_or_attrs)?;
Ok(self
.inner
.beacon_consensus
.new_payload(payload)
.await
.inspect(|_| self.inner.on_new_payload_response())?)
}
/// Metered version of `new_payload_v2`.
pub async fn new_payload_v2_metered(
&self,
payload: PayloadT::ExecutionData,
) -> EngineApiResult<PayloadStatus> {
let start = Instant::now();
let gas_used = payload.gas_used();
let res = Self::new_payload_v2(self, payload).await;
let elapsed = start.elapsed();
self.inner.metrics.latency.new_payload_v2.record(elapsed);
self.inner.metrics.new_payload_response.update_response_metrics(&res, gas_used, elapsed);
res
}
/// See also <https://github.com/ethereum/execution-apis/blob/fe8e13c288c592ec154ce25c534e26cb7ce0530d/src/engine/cancun.md#engine_newpayloadv3>
pub async fn new_payload_v3(
&self,
payload: PayloadT::ExecutionData,
) -> EngineApiResult<PayloadStatus> {
let payload_or_attrs = PayloadOrAttributes::<
'_,
PayloadT::ExecutionData,
PayloadT::PayloadAttributes,
>::from_execution_payload(&payload);
self.inner
.validator
.validate_version_specific_fields(EngineApiMessageVersion::V3, payload_or_attrs)?;
Ok(self
.inner
.beacon_consensus
.new_payload(payload)
.await
.inspect(|_| self.inner.on_new_payload_response())?)
}
/// Metrics version of `new_payload_v3`
pub async fn new_payload_v3_metered(
&self,
payload: PayloadT::ExecutionData,
) -> RpcResult<PayloadStatus> {
let start = Instant::now();
let gas_used = payload.gas_used();
let res = Self::new_payload_v3(self, payload).await;
let elapsed = start.elapsed();
self.inner.metrics.latency.new_payload_v3.record(elapsed);
self.inner.metrics.new_payload_response.update_response_metrics(&res, gas_used, elapsed);
Ok(res?)
}
/// See also <https://github.com/ethereum/execution-apis/blob/7907424db935b93c2fe6a3c0faab943adebe8557/src/engine/prague.md#engine_newpayloadv4>
pub async fn new_payload_v4(
&self,
payload: PayloadT::ExecutionData,
) -> EngineApiResult<PayloadStatus> {
let payload_or_attrs = PayloadOrAttributes::<
'_,
PayloadT::ExecutionData,
PayloadT::PayloadAttributes,
>::from_execution_payload(&payload);
self.inner
.validator
.validate_version_specific_fields(EngineApiMessageVersion::V4, payload_or_attrs)?;
Ok(self
.inner
.beacon_consensus
.new_payload(payload)
.await
.inspect(|_| self.inner.on_new_payload_response())?)
}
/// Metrics version of `new_payload_v4`
pub async fn new_payload_v4_metered(
&self,
payload: PayloadT::ExecutionData,
) -> RpcResult<PayloadStatus> {
let start = Instant::now();
let gas_used = payload.gas_used();
let res = Self::new_payload_v4(self, payload).await;
let elapsed = start.elapsed();
self.inner.metrics.latency.new_payload_v4.record(elapsed);
self.inner.metrics.new_payload_response.update_response_metrics(&res, gas_used, elapsed);
Ok(res?)
}
/// Returns whether the engine accepts execution requests hash.
pub fn accept_execution_requests_hash(&self) -> bool {
self.inner.accept_execution_requests_hash
}
}
impl<Provider, EngineT, Pool, Validator, ChainSpec>
EngineApi<Provider, EngineT, Pool, Validator, ChainSpec>
where
Provider: HeaderProvider + BlockReader + StateProviderFactory + 'static,
EngineT: EngineTypes,
Pool: TransactionPool + 'static,
Validator: EngineApiValidator<EngineT>,
ChainSpec: EthereumHardforks + Send + Sync + 'static,
{
/// Sends a message to the beacon consensus engine to update the fork choice _without_
/// withdrawals.
///
/// See also <https://github.com/ethereum/execution-apis/blob/3d627c95a4d3510a8187dd02e0250ecb4331d27e/src/engine/paris.md#engine_forkchoiceUpdatedV1>
///
/// Caution: This should not accept the `withdrawals` field
pub async fn fork_choice_updated_v1(
&self,
state: ForkchoiceState,
payload_attrs: Option<EngineT::PayloadAttributes>,
) -> EngineApiResult<ForkchoiceUpdated> {
self.validate_and_execute_forkchoice(EngineApiMessageVersion::V1, state, payload_attrs)
.await
}
/// Metrics version of `fork_choice_updated_v1`
pub async fn fork_choice_updated_v1_metered(
&self,
state: ForkchoiceState,
payload_attrs: Option<EngineT::PayloadAttributes>,
) -> EngineApiResult<ForkchoiceUpdated> {
let start = Instant::now();
let res = Self::fork_choice_updated_v1(self, state, payload_attrs).await;
self.inner.metrics.latency.fork_choice_updated_v1.record(start.elapsed());
self.inner.metrics.fcu_response.update_response_metrics(&res);
res
}
/// Sends a message to the beacon consensus engine to update the fork choice _with_ withdrawals,
/// but only _after_ shanghai.
///
/// See also <https://github.com/ethereum/execution-apis/blob/3d627c95a4d3510a8187dd02e0250ecb4331d27e/src/engine/shanghai.md#engine_forkchoiceupdatedv2>
pub async fn fork_choice_updated_v2(
&self,
state: ForkchoiceState,
payload_attrs: Option<EngineT::PayloadAttributes>,
) -> EngineApiResult<ForkchoiceUpdated> {
self.validate_and_execute_forkchoice(EngineApiMessageVersion::V2, state, payload_attrs)
.await
}
/// Metrics version of `fork_choice_updated_v2`
pub async fn fork_choice_updated_v2_metered(
&self,
state: ForkchoiceState,
payload_attrs: Option<EngineT::PayloadAttributes>,
) -> EngineApiResult<ForkchoiceUpdated> {
let start = Instant::now();
let res = Self::fork_choice_updated_v2(self, state, payload_attrs).await;
self.inner.metrics.latency.fork_choice_updated_v2.record(start.elapsed());
self.inner.metrics.fcu_response.update_response_metrics(&res);
res
}
/// Sends a message to the beacon consensus engine to update the fork choice _with_ withdrawals,
/// but only _after_ cancun.
///
/// See also <https://github.com/ethereum/execution-apis/blob/main/src/engine/cancun.md#engine_forkchoiceupdatedv3>
pub async fn fork_choice_updated_v3(
&self,
state: ForkchoiceState,
payload_attrs: Option<EngineT::PayloadAttributes>,
) -> EngineApiResult<ForkchoiceUpdated> {
self.validate_and_execute_forkchoice(EngineApiMessageVersion::V3, state, payload_attrs)
.await
}
/// Metrics version of `fork_choice_updated_v3`
pub async fn fork_choice_updated_v3_metered(
&self,
state: ForkchoiceState,
payload_attrs: Option<EngineT::PayloadAttributes>,
) -> EngineApiResult<ForkchoiceUpdated> {
let start = Instant::now();
let res = Self::fork_choice_updated_v3(self, state, payload_attrs).await;
self.inner.metrics.latency.fork_choice_updated_v3.record(start.elapsed());
self.inner.metrics.fcu_response.update_response_metrics(&res);
res
}
/// Helper function for retrieving the build payload by id.
async fn get_built_payload(
&self,
payload_id: PayloadId,
) -> EngineApiResult<EngineT::BuiltPayload> {
self.inner
.payload_store
.resolve(payload_id)
.await
.ok_or(EngineApiError::UnknownPayload)?
.map_err(|_| EngineApiError::UnknownPayload)
}
/// Helper function for validating the payload timestamp and retrieving & converting the payload
/// into desired envelope.
async fn get_payload_inner<R>(
&self,
payload_id: PayloadId,
version: EngineApiMessageVersion,
) -> EngineApiResult<R>
where
EngineT::BuiltPayload: TryInto<R>,
{
// validate timestamp according to engine rules
let timestamp = self.get_payload_timestamp(payload_id).await?;
validate_payload_timestamp(&self.inner.chain_spec, version, timestamp)?;
// Now resolve the payload
self.get_built_payload(payload_id).await?.try_into().map_err(|_| {
warn!(?version, "could not transform built payload");
EngineApiError::UnknownPayload
})
}
/// Returns the most recent version of the payload that is available in the corresponding
/// payload build process at the time of receiving this call.
///
/// See also <https://github.com/ethereum/execution-apis/blob/3d627c95a4d3510a8187dd02e0250ecb4331d27e/src/engine/paris.md#engine_getPayloadV1>
///
/// Caution: This should not return the `withdrawals` field
///
/// Note:
/// > Provider software MAY stop the corresponding build process after serving this call.
pub async fn get_payload_v1(
&self,
payload_id: PayloadId,
) -> EngineApiResult<EngineT::ExecutionPayloadEnvelopeV1> {
self.get_built_payload(payload_id).await?.try_into().map_err(|_| {
warn!(version = ?EngineApiMessageVersion::V1, "could not transform built payload");
EngineApiError::UnknownPayload
})
}
/// Metrics version of `get_payload_v1`
pub async fn get_payload_v1_metered(
&self,
payload_id: PayloadId,
) -> EngineApiResult<EngineT::ExecutionPayloadEnvelopeV1> {
let start = Instant::now();
let res = Self::get_payload_v1(self, payload_id).await;
self.inner.metrics.latency.get_payload_v1.record(start.elapsed());
res
}
/// Returns the most recent version of the payload that is available in the corresponding
/// payload build process at the time of receiving this call.
///
/// See also <https://github.com/ethereum/execution-apis/blob/3d627c95a4d3510a8187dd02e0250ecb4331d27e/src/engine/shanghai.md#engine_getpayloadv2>
///
/// Note:
/// > Provider software MAY stop the corresponding build process after serving this call.
pub async fn get_payload_v2(
&self,
payload_id: PayloadId,
) -> EngineApiResult<EngineT::ExecutionPayloadEnvelopeV2> {
self.get_payload_inner(payload_id, EngineApiMessageVersion::V2).await
}
/// Metrics version of `get_payload_v2`
pub async fn get_payload_v2_metered(
&self,
payload_id: PayloadId,
) -> EngineApiResult<EngineT::ExecutionPayloadEnvelopeV2> {
let start = Instant::now();
let res = Self::get_payload_v2(self, payload_id).await;
self.inner.metrics.latency.get_payload_v2.record(start.elapsed());
res
}
/// Returns the most recent version of the payload that is available in the corresponding
/// payload build process at the time of receiving this call.
///
/// See also <https://github.com/ethereum/execution-apis/blob/fe8e13c288c592ec154ce25c534e26cb7ce0530d/src/engine/cancun.md#engine_getpayloadv3>
///
/// Note:
/// > Provider software MAY stop the corresponding build process after serving this call.
pub async fn get_payload_v3(
&self,
payload_id: PayloadId,
) -> EngineApiResult<EngineT::ExecutionPayloadEnvelopeV3> {
self.get_payload_inner(payload_id, EngineApiMessageVersion::V3).await
}
/// Metrics version of `get_payload_v3`
pub async fn get_payload_v3_metered(
&self,
payload_id: PayloadId,
) -> EngineApiResult<EngineT::ExecutionPayloadEnvelopeV3> {
let start = Instant::now();
let res = Self::get_payload_v3(self, payload_id).await;
self.inner.metrics.latency.get_payload_v3.record(start.elapsed());
res
}
/// Returns the most recent version of the payload that is available in the corresponding
/// payload build process at the time of receiving this call.
///
/// See also <https://github.com/ethereum/execution-apis/blob/7907424db935b93c2fe6a3c0faab943adebe8557/src/engine/prague.md#engine_newpayloadv4>
///
/// Note:
/// > Provider software MAY stop the corresponding build process after serving this call.
pub async fn get_payload_v4(
&self,
payload_id: PayloadId,
) -> EngineApiResult<EngineT::ExecutionPayloadEnvelopeV4> {
self.get_payload_inner(payload_id, EngineApiMessageVersion::V4).await
}
/// Metrics version of `get_payload_v4`
pub async fn get_payload_v4_metered(
&self,
payload_id: PayloadId,
) -> EngineApiResult<EngineT::ExecutionPayloadEnvelopeV4> {
let start = Instant::now();
let res = Self::get_payload_v4(self, payload_id).await;
self.inner.metrics.latency.get_payload_v4.record(start.elapsed());
res
}
/// Handler for `engine_getPayloadV5`
///
/// Returns the most recent version of the payload that is available in the corresponding
/// payload build process at the time of receiving this call.
///
/// See also <https://github.com/ethereum/execution-apis/blob/15399c2e2f16a5f800bf3f285640357e2c245ad9/src/engine/osaka.md#engine_getpayloadv5>
///
/// Note:
/// > Provider software MAY stop the corresponding build process after serving this call.
pub async fn get_payload_v5(
&self,
payload_id: PayloadId,
) -> EngineApiResult<EngineT::ExecutionPayloadEnvelopeV5> {
self.get_payload_inner(payload_id, EngineApiMessageVersion::V5).await
}
/// Metrics version of `get_payload_v5`
pub async fn get_payload_v5_metered(
&self,
payload_id: PayloadId,
) -> EngineApiResult<EngineT::ExecutionPayloadEnvelopeV5> {
let start = Instant::now();
let res = Self::get_payload_v5(self, payload_id).await;
self.inner.metrics.latency.get_payload_v5.record(start.elapsed());
res
}
/// Fetches all the blocks for the provided range starting at `start`, containing `count`
/// blocks and returns the mapped payload bodies.
pub async fn get_payload_bodies_by_range_with<F, R>(
&self,
start: BlockNumber,
count: u64,
f: F,
) -> EngineApiResult<Vec<Option<R>>>
where
F: Fn(Provider::Block) -> R + Send + 'static,
R: Send + 'static,
{
let (tx, rx) = oneshot::channel();
let inner = self.inner.clone();
self.inner.task_spawner.spawn_blocking(Box::pin(async move {
if count > MAX_PAYLOAD_BODIES_LIMIT {
tx.send(Err(EngineApiError::PayloadRequestTooLarge { len: count })).ok();
return;
}
if start == 0 || count == 0 {
tx.send(Err(EngineApiError::InvalidBodiesRange { start, count })).ok();
return;
}
let mut result = Vec::with_capacity(count as usize);
// -1 so range is inclusive
let mut end = start.saturating_add(count - 1);
// > Client software MUST NOT return trailing null values if the request extends past the current latest known block.
// truncate the end if it's greater than the last block
if let Ok(best_block) = inner.provider.best_block_number() {
if end > best_block {
end = best_block;
}
}
for num in start..=end {
let block_result = inner.provider.block(BlockHashOrNumber::Number(num));
match block_result {
Ok(block) => {
result.push(block.map(&f));
}
Err(err) => {
tx.send(Err(EngineApiError::Internal(Box::new(err)))).ok();
return;
}
};
}
tx.send(Ok(result)).ok();
}));
rx.await.map_err(|err| EngineApiError::Internal(Box::new(err)))?
}
/// Returns the execution payload bodies by the range starting at `start`, containing `count`
/// blocks.
///
/// WARNING: This method is associated with the `BeaconBlocksByRange` message in the consensus
/// layer p2p specification, meaning the input should be treated as untrusted or potentially
/// adversarial.
///
/// Implementers should take care when acting on the input to this method, specifically
/// ensuring that the range is limited properly, and that the range boundaries are computed
/// correctly and without panics.
pub async fn get_payload_bodies_by_range_v1(
&self,
start: BlockNumber,
count: u64,
) -> EngineApiResult<ExecutionPayloadBodiesV1> {
self.get_payload_bodies_by_range_with(start, count, |block| ExecutionPayloadBodyV1 {
transactions: block.body().encoded_2718_transactions(),
withdrawals: block.body().withdrawals().cloned().map(Withdrawals::into_inner),
})
.await
}
/// Metrics version of `get_payload_bodies_by_range_v1`
pub async fn get_payload_bodies_by_range_v1_metered(
&self,
start: BlockNumber,
count: u64,
) -> EngineApiResult<ExecutionPayloadBodiesV1> {
let start_time = Instant::now();
let res = Self::get_payload_bodies_by_range_v1(self, start, count).await;
self.inner.metrics.latency.get_payload_bodies_by_range_v1.record(start_time.elapsed());
res
}
/// Called to retrieve execution payload bodies by hashes.
pub async fn get_payload_bodies_by_hash_with<F, R>(
&self,
hashes: Vec<BlockHash>,
f: F,
) -> EngineApiResult<Vec<Option<R>>>
where
F: Fn(Provider::Block) -> R + Send + 'static,
R: Send + 'static,
{
let len = hashes.len() as u64;
if len > MAX_PAYLOAD_BODIES_LIMIT {
return Err(EngineApiError::PayloadRequestTooLarge { len });
}
let (tx, rx) = oneshot::channel();
let inner = self.inner.clone();
self.inner.task_spawner.spawn_blocking(Box::pin(async move {
let mut result = Vec::with_capacity(hashes.len());
for hash in hashes {
let block_result = inner.provider.block(BlockHashOrNumber::Hash(hash));
match block_result {
Ok(block) => {
result.push(block.map(&f));
}
Err(err) => {
let _ = tx.send(Err(EngineApiError::Internal(Box::new(err))));
return;
}
}
}
tx.send(Ok(result)).ok();
}));
rx.await.map_err(|err| EngineApiError::Internal(Box::new(err)))?
}
/// Called to retrieve execution payload bodies by hashes.
pub async fn get_payload_bodies_by_hash_v1(
&self,
hashes: Vec<BlockHash>,
) -> EngineApiResult<ExecutionPayloadBodiesV1> {
self.get_payload_bodies_by_hash_with(hashes, |block| ExecutionPayloadBodyV1 {
transactions: block.body().encoded_2718_transactions(),
withdrawals: block.body().withdrawals().cloned().map(Withdrawals::into_inner),
})
.await
}
/// Metrics version of `get_payload_bodies_by_hash_v1`
pub async fn get_payload_bodies_by_hash_v1_metered(
&self,
hashes: Vec<BlockHash>,
) -> EngineApiResult<ExecutionPayloadBodiesV1> {
let start = Instant::now();
let res = Self::get_payload_bodies_by_hash_v1(self, hashes);
self.inner.metrics.latency.get_payload_bodies_by_hash_v1.record(start.elapsed());
res.await
}
/// Validates the `engine_forkchoiceUpdated` payload attributes and executes the forkchoice
/// update.
///
/// The payload attributes will be validated according to the engine API rules for the given
/// message version:
/// * If the version is [`EngineApiMessageVersion::V1`], then the payload attributes will be
/// validated according to the Paris rules.
/// * If the version is [`EngineApiMessageVersion::V2`], then the payload attributes will be
/// validated according to the Shanghai rules, as well as the validity changes from cancun:
/// <https://github.com/ethereum/execution-apis/blob/584905270d8ad665718058060267061ecfd79ca5/src/engine/cancun.md#update-the-methods-of-previous-forks>
///
/// * If the version above [`EngineApiMessageVersion::V3`], then the payload attributes will be
/// validated according to the Cancun rules.
async fn validate_and_execute_forkchoice(
&self,
version: EngineApiMessageVersion,
state: ForkchoiceState,
payload_attrs: Option<EngineT::PayloadAttributes>,
) -> EngineApiResult<ForkchoiceUpdated> {
self.inner.record_elapsed_time_on_fcu();
if let Some(ref attrs) = payload_attrs {
let attr_validation_res =
self.inner.validator.ensure_well_formed_attributes(version, attrs);
// From the engine API spec:
//
// Client software MUST ensure that payloadAttributes.timestamp is greater than
// timestamp of a block referenced by forkchoiceState.headBlockHash. If this condition
// isn't held client software MUST respond with -38003: Invalid payload attributes and
// MUST NOT begin a payload build process. In such an event, the forkchoiceState
// update MUST NOT be rolled back.
//
// NOTE: This will also apply to the validation result for the cancun or
// shanghai-specific fields provided in the payload attributes.
//
// To do this, we set the payload attrs to `None` if attribute validation failed, but
// we still apply the forkchoice update.
if let Err(err) = attr_validation_res {
let fcu_res =
self.inner.beacon_consensus.fork_choice_updated(state, None, version).await?;
// TODO: decide if we want this branch - the FCU INVALID response might be more
// useful than the payload attributes INVALID response
if fcu_res.is_invalid() {
return Ok(fcu_res)
}
return Err(err.into())
}
}
Ok(self.inner.beacon_consensus.fork_choice_updated(state, payload_attrs, version).await?)
}
/// Returns reference to supported capabilities.
pub fn capabilities(&self) -> &EngineCapabilities {
&self.inner.capabilities
}
fn get_blobs_v1(
&self,
versioned_hashes: Vec<B256>,
) -> EngineApiResult<Vec<Option<BlobAndProofV1>>> {
if versioned_hashes.len() > MAX_BLOB_LIMIT {
return Err(EngineApiError::BlobRequestTooLarge { len: versioned_hashes.len() })
}
self.inner
.tx_pool
.get_blobs_for_versioned_hashes_v1(&versioned_hashes)
.map_err(|err| EngineApiError::Internal(Box::new(err)))
}
/// Metered version of `get_blobs_v1`.
pub fn get_blobs_v1_metered(
&self,
versioned_hashes: Vec<B256>,
) -> EngineApiResult<Vec<Option<BlobAndProofV1>>> {
let hashes_len = versioned_hashes.len();
let start = Instant::now();
let res = Self::get_blobs_v1(self, versioned_hashes);
self.inner.metrics.latency.get_blobs_v1.record(start.elapsed());
if let Ok(blobs) = &res {
let blobs_found = blobs.iter().flatten().count();
let blobs_missed = hashes_len - blobs_found;
self.inner.metrics.blob_metrics.blob_count.increment(blobs_found as u64);
self.inner.metrics.blob_metrics.blob_misses.increment(blobs_missed as u64);
}
res
}
fn get_blobs_v2(
&self,
versioned_hashes: Vec<B256>,
) -> EngineApiResult<Option<Vec<BlobAndProofV2>>> {
if versioned_hashes.len() > MAX_BLOB_LIMIT {
return Err(EngineApiError::BlobRequestTooLarge { len: versioned_hashes.len() })
}
self.inner
.tx_pool
.get_blobs_for_versioned_hashes_v2(&versioned_hashes)
.map_err(|err| EngineApiError::Internal(Box::new(err)))
}
/// Metered version of `get_blobs_v2`.
pub fn get_blobs_v2_metered(
&self,
versioned_hashes: Vec<B256>,
) -> EngineApiResult<Option<Vec<BlobAndProofV2>>> {
let hashes_len = versioned_hashes.len();
let start = Instant::now();
let res = Self::get_blobs_v2(self, versioned_hashes);
self.inner.metrics.latency.get_blobs_v2.record(start.elapsed());
if let Ok(blobs) = &res {
let blobs_found = blobs.iter().flatten().count();
self.inner
.metrics
.blob_metrics
.get_blobs_requests_blobs_total
.increment(hashes_len as u64);
self.inner
.metrics
.blob_metrics
.get_blobs_requests_blobs_in_blobpool_total
.increment(blobs_found as u64);
if blobs_found == hashes_len {
self.inner.metrics.blob_metrics.get_blobs_requests_success_total.increment(1);
} else {
self.inner.metrics.blob_metrics.get_blobs_requests_failure_total.increment(1);
}
} else {
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | true |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-engine-api/tests/it/payload.rs | crates/rpc/rpc-engine-api/tests/it/payload.rs | //! Some payload tests
use alloy_eips::eip4895::Withdrawals;
use alloy_primitives::Bytes;
use alloy_rlp::Decodable;
use alloy_rpc_types_engine::{
ExecutionPayload, ExecutionPayloadBodyV1, ExecutionPayloadSidecar, ExecutionPayloadV1,
PayloadError,
};
use assert_matches::assert_matches;
use reth_ethereum_primitives::{Block, TransactionSigned};
use reth_primitives_traits::{proofs, SealedBlock};
use reth_testing_utils::generators::{
self, random_block, random_block_range, BlockParams, BlockRangeParams, Rng,
};
fn transform_block<F: FnOnce(Block) -> Block>(src: SealedBlock<Block>, f: F) -> ExecutionPayload {
let unsealed = src.into_block();
let mut transformed: Block = f(unsealed);
// Recalculate roots
transformed.header.transactions_root =
proofs::calculate_transaction_root(&transformed.body.transactions);
transformed.header.ommers_hash = proofs::calculate_ommers_root(&transformed.body.ommers);
ExecutionPayload::from_block_slow(&transformed).0
}
#[test]
fn payload_body_roundtrip() {
let mut rng = generators::rng();
for block in random_block_range(
&mut rng,
0..=99,
BlockRangeParams { tx_count: 0..2, ..Default::default() },
) {
let payload_body: ExecutionPayloadBodyV1 =
ExecutionPayloadBodyV1::from_block(block.clone().into_block());
assert_eq!(
Ok(block.body().transactions.clone()),
payload_body
.transactions
.iter()
.map(|x| TransactionSigned::decode(&mut &x[..]))
.collect::<Result<Vec<_>, _>>(),
);
let withdraw = payload_body.withdrawals.map(Withdrawals::new);
assert_eq!(block.body().withdrawals.clone(), withdraw);
}
}
#[test]
fn payload_validation_conversion() {
let mut rng = generators::rng();
let parent = rng.random();
let block = random_block(
&mut rng,
100,
BlockParams {
parent: Some(parent),
tx_count: Some(3),
ommers_count: Some(0),
..Default::default()
},
);
// Valid extra data
let block_with_valid_extra_data = transform_block(block.clone(), |mut b| {
b.header.extra_data = Bytes::from_static(&[0; 32]);
b
});
assert_matches!(
block_with_valid_extra_data
.try_into_block_with_sidecar::<TransactionSigned>(&ExecutionPayloadSidecar::none()),
Ok(_)
);
// Invalid extra data
let block_with_invalid_extra_data = Bytes::from_static(&[0; 33]);
let invalid_extra_data_block = transform_block(block.clone(), |mut b| {
b.header.extra_data = block_with_invalid_extra_data.clone();
b
});
assert_matches!(
invalid_extra_data_block.try_into_block_with_sidecar::<TransactionSigned>(&ExecutionPayloadSidecar::none()),
Err(PayloadError::ExtraData(data)) if data == block_with_invalid_extra_data
);
// Invalid encoded transactions
let mut payload_with_invalid_txs =
ExecutionPayloadV1::from_block_unchecked(block.hash(), &block.into_block());
payload_with_invalid_txs.transactions.iter_mut().for_each(|tx| {
*tx = Bytes::new();
});
let payload_with_invalid_txs = payload_with_invalid_txs.try_into_block::<TransactionSigned>();
assert_matches!(payload_with_invalid_txs, Err(PayloadError::Decode(_)));
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-engine-api/tests/it/main.rs | crates/rpc/rpc-engine-api/tests/it/main.rs | #![allow(missing_docs)]
mod payload;
const fn main() {}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-testing-util/src/lib.rs | crates/rpc/rpc-testing-util/src/lib.rs | //! Reth RPC testing utilities.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
pub mod debug;
pub mod trace;
pub mod utils;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-testing-util/src/trace.rs | crates/rpc/rpc-testing-util/src/trace.rs | //! Helpers for testing trace calls.
use alloy_eips::BlockId;
use alloy_primitives::{map::HashSet, Bytes, TxHash, B256};
use alloy_rpc_types_eth::{transaction::TransactionRequest, Index};
use alloy_rpc_types_trace::{
filter::TraceFilter,
opcode::BlockOpcodeGas,
parity::{LocalizedTransactionTrace, TraceResults, TraceType},
tracerequest::TraceCallRequest,
};
use futures::{Stream, StreamExt};
use jsonrpsee::core::client::Error as RpcError;
use reth_rpc_api::clients::TraceApiClient;
use std::{
pin::Pin,
task::{Context, Poll},
};
/// A type alias that represents the result of a raw transaction trace stream.
type RawTransactionTraceResult<'a> =
Pin<Box<dyn Stream<Item = Result<(TraceResults, Bytes), (RpcError, Bytes)>> + 'a>>;
/// A result type for the `trace_block` method that also captures the requested block.
pub type TraceBlockResult = Result<(Vec<LocalizedTransactionTrace>, BlockId), (RpcError, BlockId)>;
/// A result type for the `trace_blockOpcodeGas` method that also captures the requested block.
pub type TraceBlockOpCodeGasResult = Result<(BlockOpcodeGas, BlockId), (RpcError, BlockId)>;
/// Type alias representing the result of replaying a transaction.
pub type ReplayTransactionResult = Result<(TraceResults, TxHash), (RpcError, TxHash)>;
/// A type representing the result of calling `trace_call_many` method.
pub type CallManyTraceResult = Result<
(Vec<TraceResults>, Vec<(TransactionRequest, HashSet<TraceType>)>),
(RpcError, Vec<(TransactionRequest, HashSet<TraceType>)>),
>;
/// Result type for the `trace_get` method that also captures the requested transaction hash and
/// index.
pub type TraceGetResult =
Result<(Option<LocalizedTransactionTrace>, B256, Vec<Index>), (RpcError, B256, Vec<Index>)>;
/// Represents a result type for the `trace_filter` stream extension.
pub type TraceFilterResult =
Result<(Vec<LocalizedTransactionTrace>, TraceFilter), (RpcError, TraceFilter)>;
/// Represents the result of a single trace call.
pub type TraceCallResult = Result<TraceResults, (RpcError, TraceCallRequest)>;
/// An extension trait for the Trace API.
pub trait TraceApiExt {
/// The provider type that is used to make the requests.
type Provider;
/// Returns a new stream that yields the traces for the given blocks.
///
/// See also [`StreamExt::buffered`].
fn trace_block_buffered<I, B>(&self, params: I, n: usize) -> TraceBlockStream<'_>
where
I: IntoIterator<Item = B>,
B: Into<BlockId>;
/// Returns a new stream that yields the traces for the given blocks.
///
/// See also [`StreamExt::buffer_unordered`].
fn trace_block_buffered_unordered<I, B>(&self, params: I, n: usize) -> TraceBlockStream<'_>
where
I: IntoIterator<Item = B>,
B: Into<BlockId>;
/// Returns a new stream that yields the traces the opcodes for the given blocks.
///
/// See also [`StreamExt::buffered`].
fn trace_block_opcode_gas_unordered<I, B>(
&self,
params: I,
n: usize,
) -> TraceBlockOpcodeGasStream<'_>
where
I: IntoIterator<Item = B>,
B: Into<BlockId>;
/// Returns a new stream that replays the transactions for the given transaction hashes.
///
/// This returns all results in order.
fn replay_transactions<I>(
&self,
tx_hashes: I,
trace_types: HashSet<TraceType>,
) -> ReplayTransactionStream<'_>
where
I: IntoIterator<Item = TxHash>;
/// Returns a new stream that traces the provided raw transaction data.
fn trace_raw_transaction_stream(
&self,
data: Bytes,
trace_types: HashSet<TraceType>,
block_id: Option<BlockId>,
) -> RawTransactionTraceStream<'_>;
/// Creates a stream of results for multiple dependent transaction calls on top of the same
/// block.
fn trace_call_many_stream<I>(
&self,
calls: I,
block_id: Option<BlockId>,
) -> CallManyTraceStream<'_>
where
I: IntoIterator<Item = (TransactionRequest, HashSet<TraceType>)>;
/// Returns a new stream that yields the traces for the given transaction hash and indices.
fn trace_get_stream<I>(&self, hash: B256, indices: I) -> TraceGetStream<'_>
where
I: IntoIterator<Item = Index>;
/// Returns a new stream that yields traces for given filters.
fn trace_filter_stream<I>(&self, filters: I) -> TraceFilterStream<'_>
where
I: IntoIterator<Item = TraceFilter>;
/// Returns a new stream that yields the trace results for the given call requests.
fn trace_call_stream(&self, request: TraceCallRequest) -> TraceCallStream<'_>;
}
/// `TraceCallStream` provides an asynchronous stream of tracing results.
#[must_use = "streams do nothing unless polled"]
pub struct TraceCallStream<'a> {
stream: Pin<Box<dyn Stream<Item = TraceCallResult> + 'a>>,
}
impl Stream for TraceCallStream<'_> {
type Item = TraceCallResult;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
self.stream.as_mut().poll_next(cx)
}
}
impl std::fmt::Debug for TraceCallStream<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("TraceCallStream").finish()
}
}
/// Represents a stream that asynchronously yields the results of the `trace_filter` method.
#[must_use = "streams do nothing unless polled"]
pub struct TraceFilterStream<'a> {
stream: Pin<Box<dyn Stream<Item = TraceFilterResult> + 'a>>,
}
impl Stream for TraceFilterStream<'_> {
type Item = TraceFilterResult;
/// Attempts to pull out the next value of the stream.
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
self.stream.as_mut().poll_next(cx)
}
}
impl std::fmt::Debug for TraceFilterStream<'_> {
/// Provides a debug representation of the `TraceFilterStream`.
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("TraceFilterStream").finish_non_exhaustive()
}
}
/// A stream that asynchronously yields the results of the `trace_get` method for a given
/// transaction hash and a series of indices.
#[must_use = "streams do nothing unless polled"]
pub struct TraceGetStream<'a> {
stream: Pin<Box<dyn Stream<Item = TraceGetResult> + 'a>>,
}
impl Stream for TraceGetStream<'_> {
type Item = TraceGetResult;
/// Attempts to pull out the next item of the stream
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
self.stream.as_mut().poll_next(cx)
}
}
impl std::fmt::Debug for TraceGetStream<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("TraceGetStream").finish_non_exhaustive()
}
}
/// A stream that provides asynchronous iteration over results from the `trace_call_many` function.
///
/// The stream yields items of type `CallManyTraceResult`.
#[must_use = "streams do nothing unless polled"]
pub struct CallManyTraceStream<'a> {
stream: Pin<Box<dyn Stream<Item = CallManyTraceResult> + 'a>>,
}
impl Stream for CallManyTraceStream<'_> {
type Item = CallManyTraceResult;
/// Polls for the next item from the stream.
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
self.stream.as_mut().poll_next(cx)
}
}
impl std::fmt::Debug for CallManyTraceStream<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("CallManyTraceStream").finish()
}
}
/// A stream that traces the provided raw transaction data.
#[must_use = "streams do nothing unless polled"]
pub struct RawTransactionTraceStream<'a> {
stream: RawTransactionTraceResult<'a>,
}
impl Stream for RawTransactionTraceStream<'_> {
type Item = Result<(TraceResults, Bytes), (RpcError, Bytes)>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
self.stream.as_mut().poll_next(cx)
}
}
impl std::fmt::Debug for RawTransactionTraceStream<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("RawTransactionTraceStream").finish()
}
}
/// A stream that replays the transactions for the requested hashes.
#[must_use = "streams do nothing unless polled"]
pub struct ReplayTransactionStream<'a> {
stream: Pin<Box<dyn Stream<Item = ReplayTransactionResult> + 'a>>,
}
impl Stream for ReplayTransactionStream<'_> {
type Item = ReplayTransactionResult;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
self.stream.as_mut().poll_next(cx)
}
}
impl std::fmt::Debug for ReplayTransactionStream<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("ReplayTransactionStream").finish()
}
}
impl<T: TraceApiClient<TransactionRequest> + Sync> TraceApiExt for T {
type Provider = T;
fn trace_block_buffered<I, B>(&self, params: I, n: usize) -> TraceBlockStream<'_>
where
I: IntoIterator<Item = B>,
B: Into<BlockId>,
{
let blocks = params.into_iter().map(|b| b.into()).collect::<Vec<_>>();
let stream = futures::stream::iter(blocks.into_iter().map(move |block| async move {
match self.trace_block(block).await {
Ok(result) => Ok((result.unwrap_or_default(), block)),
Err(err) => Err((err, block)),
}
}))
.buffered(n);
TraceBlockStream { stream: Box::pin(stream) }
}
fn trace_block_buffered_unordered<I, B>(&self, params: I, n: usize) -> TraceBlockStream<'_>
where
I: IntoIterator<Item = B>,
B: Into<BlockId>,
{
let blocks = params.into_iter().map(|b| b.into()).collect::<Vec<_>>();
let stream = futures::stream::iter(blocks.into_iter().map(move |block| async move {
match self.trace_block(block).await {
Ok(result) => Ok((result.unwrap_or_default(), block)),
Err(err) => Err((err, block)),
}
}))
.buffer_unordered(n);
TraceBlockStream { stream: Box::pin(stream) }
}
fn trace_block_opcode_gas_unordered<I, B>(
&self,
params: I,
n: usize,
) -> TraceBlockOpcodeGasStream<'_>
where
I: IntoIterator<Item = B>,
B: Into<BlockId>,
{
let blocks = params.into_iter().map(|b| b.into()).collect::<Vec<_>>();
let stream = futures::stream::iter(blocks.into_iter().map(move |block| async move {
match self.trace_block_opcode_gas(block).await {
Ok(result) => Ok((result.unwrap(), block)),
Err(err) => Err((err, block)),
}
}))
.buffered(n);
TraceBlockOpcodeGasStream { stream: Box::pin(stream) }
}
fn replay_transactions<I>(
&self,
tx_hashes: I,
trace_types: HashSet<TraceType>,
) -> ReplayTransactionStream<'_>
where
I: IntoIterator<Item = TxHash>,
{
let hashes = tx_hashes.into_iter().collect::<Vec<_>>();
let stream = futures::stream::iter(hashes.into_iter().map(move |hash| {
let trace_types_clone = trace_types.clone(); // Clone outside of the async block
async move {
match self.replay_transaction(hash, trace_types_clone).await {
Ok(result) => Ok((result, hash)),
Err(err) => Err((err, hash)),
}
}
}))
.buffered(10);
ReplayTransactionStream { stream: Box::pin(stream) }
}
fn trace_raw_transaction_stream(
&self,
data: Bytes,
trace_types: HashSet<TraceType>,
block_id: Option<BlockId>,
) -> RawTransactionTraceStream<'_> {
let stream = futures::stream::once(async move {
match self.trace_raw_transaction(data.clone(), trace_types, block_id).await {
Ok(result) => Ok((result, data)),
Err(err) => Err((err, data)),
}
});
RawTransactionTraceStream { stream: Box::pin(stream) }
}
fn trace_call_many_stream<I>(
&self,
calls: I,
block_id: Option<BlockId>,
) -> CallManyTraceStream<'_>
where
I: IntoIterator<Item = (TransactionRequest, HashSet<TraceType>)>,
{
let call_set = calls.into_iter().collect::<Vec<_>>();
let stream = futures::stream::once(async move {
match self.trace_call_many(call_set.clone(), block_id).await {
Ok(results) => Ok((results, call_set)),
Err(err) => Err((err, call_set)),
}
});
CallManyTraceStream { stream: Box::pin(stream) }
}
fn trace_get_stream<I>(&self, hash: B256, indices: I) -> TraceGetStream<'_>
where
I: IntoIterator<Item = Index>,
{
let index_list = indices.into_iter().collect::<Vec<_>>();
let stream = futures::stream::iter(index_list.into_iter().map(move |index| async move {
match self.trace_get(hash, vec![index]).await {
Ok(result) => Ok((result, hash, vec![index])),
Err(err) => Err((err, hash, vec![index])),
}
}))
.buffered(10);
TraceGetStream { stream: Box::pin(stream) }
}
fn trace_filter_stream<I>(&self, filters: I) -> TraceFilterStream<'_>
where
I: IntoIterator<Item = TraceFilter>,
{
let filter_list = filters.into_iter().collect::<Vec<_>>();
let stream = futures::stream::iter(filter_list.into_iter().map(move |filter| async move {
match self.trace_filter(filter.clone()).await {
Ok(result) => Ok((result, filter)),
Err(err) => Err((err, filter)),
}
}))
.buffered(10);
TraceFilterStream { stream: Box::pin(stream) }
}
fn trace_call_stream(&self, request: TraceCallRequest) -> TraceCallStream<'_> {
let stream = futures::stream::once(async move {
match self
.trace_call(
request.call.clone(),
request.trace_types.clone(),
request.block_id,
request.state_overrides.clone(),
request.block_overrides.clone(),
)
.await
{
Ok(result) => Ok(result),
Err(err) => Err((err, request)),
}
});
TraceCallStream { stream: Box::pin(stream) }
}
}
/// A stream that yields the traces for the requested blocks.
#[must_use = "streams do nothing unless polled"]
pub struct TraceBlockStream<'a> {
stream: Pin<Box<dyn Stream<Item = TraceBlockResult> + 'a>>,
}
impl TraceBlockStream<'_> {
/// Returns the next error result of the stream.
pub async fn next_err(&mut self) -> Option<(RpcError, BlockId)> {
loop {
match self.next().await? {
Ok(_) => {}
Err(err) => return Some(err),
}
}
}
}
impl Stream for TraceBlockStream<'_> {
type Item = TraceBlockResult;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
self.stream.as_mut().poll_next(cx)
}
}
impl std::fmt::Debug for TraceBlockStream<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("TraceBlockStream").finish_non_exhaustive()
}
}
/// A stream that yields the opcodes for the requested blocks.
#[must_use = "streams do nothing unless polled"]
pub struct TraceBlockOpcodeGasStream<'a> {
stream: Pin<Box<dyn Stream<Item = TraceBlockOpCodeGasResult> + 'a>>,
}
impl TraceBlockOpcodeGasStream<'_> {
/// Returns the next error result of the stream.
pub async fn next_err(&mut self) -> Option<(RpcError, BlockId)> {
loop {
match self.next().await? {
Ok(_) => {}
Err(err) => return Some(err),
}
}
}
}
impl Stream for TraceBlockOpcodeGasStream<'_> {
type Item = TraceBlockOpCodeGasResult;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
self.stream.as_mut().poll_next(cx)
}
}
impl std::fmt::Debug for TraceBlockOpcodeGasStream<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("TraceBlockOpcodeGasStream").finish_non_exhaustive()
}
}
/// A utility to compare RPC responses from two different clients.
///
/// The `RpcComparer` is designed to perform comparisons between two RPC clients.
/// It is useful in scenarios where there's a need to ensure that two different RPC clients
/// return consistent responses. This can be particularly valuable in testing environments
/// where one might want to compare a test client's responses against a production client
/// or compare two different Ethereum client implementations.
#[derive(Debug)]
pub struct RpcComparer<C1, C2>
where
C1: TraceApiExt,
C2: TraceApiExt,
{
client1: C1,
client2: C2,
}
impl<C1, C2> RpcComparer<C1, C2>
where
C1: TraceApiExt,
C2: TraceApiExt,
{
/// Constructs a new `RpcComparer`.
///
/// Initializes the comparer with two clients that will be used for fetching
/// and comparison.
///
/// # Arguments
///
/// * `client1` - The first RPC client.
/// * `client2` - The second RPC client.
pub const fn new(client1: C1, client2: C2) -> Self {
Self { client1, client2 }
}
/// Compares the `trace_block` responses from the two RPC clients.
///
/// Fetches the `trace_block` responses for the provided block IDs from both clients
/// and compares them. If there are inconsistencies between the two responses, this
/// method will panic with a relevant message indicating the difference.
pub async fn compare_trace_block_responses(&self, block_ids: Vec<BlockId>) {
let stream1 = self.client1.trace_block_buffered(block_ids.clone(), 2);
let stream2 = self.client2.trace_block_buffered(block_ids, 2);
let mut zipped_streams = stream1.zip(stream2);
while let Some((result1, result2)) = zipped_streams.next().await {
match (result1, result2) {
(Ok((ref traces1_data, ref block1)), Ok((ref traces2_data, ref block2))) => {
similar_asserts::assert_eq!(
traces1_data,
traces2_data,
"Mismatch in traces for block: {:?}",
block1
);
assert_eq!(block1, block2, "Mismatch in block ids.");
}
(Err((ref err1, ref block1)), Err((ref err2, ref block2))) => {
assert_eq!(
format!("{err1:?}"),
format!("{err2:?}"),
"Different errors for block: {block1:?}"
);
assert_eq!(block1, block2, "Mismatch in block ids.");
}
_ => panic!("One client returned Ok while the other returned Err."),
}
}
}
/// Compares the `replay_transactions` responses from the two RPC clients.
pub async fn compare_replay_transaction_responses(
&self,
transaction_hashes: Vec<TxHash>,
trace_types: HashSet<TraceType>,
) {
let stream1 =
self.client1.replay_transactions(transaction_hashes.clone(), trace_types.clone());
let stream2 = self.client2.replay_transactions(transaction_hashes, trace_types);
let mut zipped_streams = stream1.zip(stream2);
while let Some((result1, result2)) = zipped_streams.next().await {
match (result1, result2) {
(Ok((ref trace1_data, ref tx_hash1)), Ok((ref trace2_data, ref tx_hash2))) => {
similar_asserts::assert_eq!(
trace1_data,
trace2_data,
"Mismatch in trace results for transaction: {tx_hash1:?}",
);
assert_eq!(tx_hash1, tx_hash2, "Mismatch in transaction hashes.");
}
(Err((ref err1, ref tx_hash1)), Err((ref err2, ref tx_hash2))) => {
assert_eq!(
format!("{err1:?}"),
format!("{err2:?}"),
"Different errors for transaction: {tx_hash1:?}",
);
assert_eq!(tx_hash1, tx_hash2, "Mismatch in transaction hashes.");
}
_ => panic!("One client returned Ok while the other returned Err."),
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_eips::BlockNumberOrTag;
use alloy_rpc_types_trace::filter::TraceFilterMode;
use jsonrpsee::http_client::HttpClientBuilder;
const fn assert_is_stream<St: Stream>(_: &St) {}
#[tokio::test]
async fn can_create_block_stream() {
let client = HttpClientBuilder::default().build("http://localhost:8545").unwrap();
let block = vec![BlockId::Number(5u64.into()), BlockNumberOrTag::Latest.into()];
let stream = client.trace_block_buffered(block, 2);
assert_is_stream(&stream);
}
#[tokio::test]
#[ignore]
async fn can_create_replay_transaction_stream() {
let client = HttpClientBuilder::default().build("http://localhost:8545").unwrap();
// Assuming you have some transactions you want to test, replace with actual hashes.
let transactions = vec![
"0x4e08fe36db723a338e852f89f613e606b0c9a17e649b18b01251f86236a2cef3".parse().unwrap(),
"0xea2817f1aeeb587b82f4ab87a6dbd3560fc35ed28de1be280cb40b2a24ab48bb".parse().unwrap(),
];
let trace_types = HashSet::from_iter([TraceType::StateDiff, TraceType::VmTrace]);
let mut stream = client.replay_transactions(transactions, trace_types);
let mut successes = 0;
let mut failures = 0;
assert_is_stream(&stream);
while let Some(result) = stream.next().await {
match result {
Ok((trace_result, tx_hash)) => {
println!("Success for tx_hash {tx_hash:?}: {trace_result:?}");
successes += 1;
}
Err((error, tx_hash)) => {
println!("Error for tx_hash {tx_hash:?}: {error:?}");
failures += 1;
}
}
}
println!("Total successes: {successes}");
println!("Total failures: {failures}");
}
#[tokio::test]
#[ignore]
async fn can_create_trace_call_many_stream() {
let client = HttpClientBuilder::default().build("http://localhost:8545").unwrap();
let call_request_1 = TransactionRequest::default();
let call_request_2 = TransactionRequest::default();
let trace_types = HashSet::from_iter([TraceType::StateDiff, TraceType::VmTrace]);
let calls = vec![(call_request_1, trace_types.clone()), (call_request_2, trace_types)];
let mut stream = client.trace_call_many_stream(calls, None);
assert_is_stream(&stream);
while let Some(result) = stream.next().await {
match result {
Ok(trace_result) => {
println!("Success: {trace_result:?}");
}
Err(error) => {
println!("Error: {error:?}");
}
}
}
}
#[tokio::test]
#[ignore]
async fn can_create_trace_get_stream() {
let client = HttpClientBuilder::default().build("http://localhost:8545").unwrap();
let tx_hash: B256 = "".parse().unwrap();
let indices: Vec<Index> = vec![Index::from(0)];
let mut stream = client.trace_get_stream(tx_hash, indices);
while let Some(result) = stream.next().await {
match result {
Ok(trace) => {
println!("Received trace: {trace:?}");
}
Err(e) => {
println!("Error fetching trace: {e:?}");
}
}
}
}
#[tokio::test]
#[ignore]
async fn can_create_trace_filter() {
let client = HttpClientBuilder::default().build("http://localhost:8545").unwrap();
let filter = TraceFilter {
from_block: None,
to_block: None,
from_address: Vec::new(),
to_address: Vec::new(),
mode: TraceFilterMode::Union,
after: None,
count: None,
};
let filters = vec![filter];
let mut stream = client.trace_filter_stream(filters);
while let Some(result) = stream.next().await {
match result {
Ok(trace) => {
println!("Received trace: {trace:?}");
}
Err(e) => {
println!("Error fetching trace: {e:?}");
}
}
}
}
#[tokio::test]
#[ignore]
async fn can_create_trace_call_stream() {
let client = HttpClientBuilder::default().build("http://localhost:8545").unwrap();
let trace_call_request = TraceCallRequest::default();
let mut stream = client.trace_call_stream(trace_call_request);
let mut successes = 0;
let mut failures = 0;
assert_is_stream(&stream);
while let Some(result) = stream.next().await {
match result {
Ok(trace_result) => {
println!("Success: {trace_result:?}");
successes += 1;
}
Err((error, request)) => {
println!("Error for request {request:?}: {error:?}");
failures += 1;
}
}
}
println!("Total successes: {successes}");
println!("Total failures: {failures}");
}
#[tokio::test]
#[ignore]
async fn block_opcode_gas_stream() {
let client = HttpClientBuilder::default().build("http://localhost:8545").unwrap();
let block = vec![BlockNumberOrTag::Latest];
let mut stream = client.trace_block_opcode_gas_unordered(block, 2);
assert_is_stream(&stream);
let _opcodes = stream.next().await.unwrap();
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-testing-util/src/debug.rs | crates/rpc/rpc-testing-util/src/debug.rs | //! Helpers for testing debug trace calls.
use std::{
future::Future,
pin::Pin,
task::{Context, Poll},
};
use alloy_eips::BlockId;
use alloy_primitives::{TxHash, B256};
use alloy_rpc_types_eth::{transaction::TransactionRequest, Block, Header, Transaction};
use alloy_rpc_types_trace::{
common::TraceResult,
geth::{GethDebugTracerType, GethDebugTracingOptions, GethTrace},
};
use futures::{Stream, StreamExt};
use jsonrpsee::core::client::Error as RpcError;
use reth_ethereum_primitives::Receipt;
use reth_rpc_api::{clients::DebugApiClient, EthApiClient};
const NOOP_TRACER: &str = include_str!("../assets/noop-tracer.js");
const JS_TRACER_TEMPLATE: &str = include_str!("../assets/tracer-template.js");
/// A result type for the `debug_trace_transaction` method that also captures the requested hash.
pub type TraceTransactionResult = Result<(serde_json::Value, TxHash), (RpcError, TxHash)>;
/// A result type for the `debug_trace_block` method that also captures the requested block.
pub type DebugTraceBlockResult =
Result<(Vec<TraceResult<GethTrace, String>>, BlockId), (RpcError, BlockId)>;
/// An extension trait for the Trace API.
pub trait DebugApiExt {
/// The provider type that is used to make the requests.
type Provider;
/// Same as [`DebugApiClient::debug_trace_transaction`] but returns the result as json.
fn debug_trace_transaction_json(
&self,
hash: B256,
opts: GethDebugTracingOptions,
) -> impl Future<Output = Result<serde_json::Value, RpcError>> + Send;
/// Trace all transactions in a block individually with the given tracing opts.
fn debug_trace_transactions_in_block<B>(
&self,
block: B,
opts: GethDebugTracingOptions,
) -> impl Future<Output = Result<DebugTraceTransactionsStream<'_>, RpcError>> + Send
where
B: Into<BlockId> + Send;
/// Trace all given blocks with the given tracing opts, returning a stream.
fn debug_trace_block_buffered_unordered<I, B>(
&self,
params: I,
opts: Option<GethDebugTracingOptions>,
n: usize,
) -> DebugTraceBlockStream<'_>
where
I: IntoIterator<Item = B>,
B: Into<BlockId> + Send;
/// method for `debug_traceCall`
fn debug_trace_call_json(
&self,
request: TransactionRequest,
opts: GethDebugTracingOptions,
) -> impl Future<Output = Result<serde_json::Value, RpcError>> + Send;
/// method for `debug_traceCall` using raw JSON strings for the request and options.
fn debug_trace_call_raw_json(
&self,
request_json: String,
opts_json: String,
) -> impl Future<Output = Result<serde_json::Value, RpcError>> + Send;
}
impl<T> DebugApiExt for T
where
T: EthApiClient<TransactionRequest, Transaction, Block, Receipt, Header>
+ DebugApiClient<TransactionRequest>
+ Sync,
{
type Provider = T;
async fn debug_trace_transaction_json(
&self,
hash: B256,
opts: GethDebugTracingOptions,
) -> Result<serde_json::Value, RpcError> {
let mut params = jsonrpsee::core::params::ArrayParams::new();
params.insert(hash).unwrap();
params.insert(opts).unwrap();
self.request("debug_traceTransaction", params).await
}
async fn debug_trace_transactions_in_block<B>(
&self,
block: B,
opts: GethDebugTracingOptions,
) -> Result<DebugTraceTransactionsStream<'_>, RpcError>
where
B: Into<BlockId> + Send,
{
let block = match block.into() {
BlockId::Hash(hash) => self.block_by_hash(hash.block_hash, false).await,
BlockId::Number(tag) => self.block_by_number(tag, false).await,
}?
.ok_or_else(|| RpcError::Custom("block not found".to_string()))?;
let hashes = block.transactions.hashes().map(|tx| (tx, opts.clone())).collect::<Vec<_>>();
let stream = futures::stream::iter(hashes.into_iter().map(move |(tx, opts)| async move {
match self.debug_trace_transaction_json(tx, opts).await {
Ok(result) => Ok((result, tx)),
Err(err) => Err((err, tx)),
}
}))
.buffered(10);
Ok(DebugTraceTransactionsStream { stream: Box::pin(stream) })
}
fn debug_trace_block_buffered_unordered<I, B>(
&self,
params: I,
opts: Option<GethDebugTracingOptions>,
n: usize,
) -> DebugTraceBlockStream<'_>
where
I: IntoIterator<Item = B>,
B: Into<BlockId> + Send,
{
let blocks =
params.into_iter().map(|block| (block.into(), opts.clone())).collect::<Vec<_>>();
let stream =
futures::stream::iter(blocks.into_iter().map(move |(block, opts)| async move {
let trace_future = match block {
BlockId::Hash(hash) => {
self.debug_trace_block_by_hash(hash.block_hash, opts).await
}
BlockId::Number(tag) => self.debug_trace_block_by_number(tag, opts).await,
};
match trace_future {
Ok(result) => Ok((result, block)),
Err(err) => Err((err, block)),
}
}))
.buffer_unordered(n);
DebugTraceBlockStream { stream: Box::pin(stream) }
}
async fn debug_trace_call_json(
&self,
request: TransactionRequest,
opts: GethDebugTracingOptions,
) -> Result<serde_json::Value, RpcError> {
let mut params = jsonrpsee::core::params::ArrayParams::new();
params.insert(request).unwrap();
params.insert(opts).unwrap();
self.request("debug_traceCall", params).await
}
async fn debug_trace_call_raw_json(
&self,
request_json: String,
opts_json: String,
) -> Result<serde_json::Value, RpcError> {
let request = serde_json::from_str::<TransactionRequest>(&request_json)
.map_err(|e| RpcError::Custom(e.to_string()))?;
let opts = serde_json::from_str::<GethDebugTracingOptions>(&opts_json)
.map_err(|e| RpcError::Custom(e.to_string()))?;
self.debug_trace_call_json(request, opts).await
}
}
/// A helper type that can be used to build a javascript tracer.
#[derive(Debug, Clone, Default)]
pub struct JsTracerBuilder {
/// `setup_body` is invoked once at the beginning, during the construction of a given
/// transaction.
setup_body: Option<String>,
/// `fault_body` is invoked when an error happens during the execution of an opcode which
/// wasn't reported in step.
fault_body: Option<String>,
/// `result_body` returns a JSON-serializable value to the RPC caller.
result_body: Option<String>,
/// `enter_body` is invoked on stepping in of an internal call.
enter_body: Option<String>,
/// `step_body` is called for each step of the EVM, or when an error occurs, as the specified
/// transaction is traced.
step_body: Option<String>,
/// `exit_body` is invoked on stepping out of an internal call.
exit_body: Option<String>,
}
impl JsTracerBuilder {
/// Sets the body of the fault function
///
/// The body code has access to the `log` and `db` variables.
pub fn fault_body(mut self, body: impl Into<String>) -> Self {
self.fault_body = Some(body.into());
self
}
/// Sets the body of the setup function
///
/// This body includes the `cfg` object variable
pub fn setup_body(mut self, body: impl Into<String>) -> Self {
self.setup_body = Some(body.into());
self
}
/// Sets the body of the result function
///
/// The body code has access to the `ctx` and `db` variables.
///
/// ```
/// use reth_rpc_api_testing_util::debug::JsTracerBuilder;
/// let code = JsTracerBuilder::default().result_body("return {};").code();
/// ```
pub fn result_body(mut self, body: impl Into<String>) -> Self {
self.result_body = Some(body.into());
self
}
/// Sets the body of the enter function
///
/// The body code has access to the `frame` variable.
pub fn enter_body(mut self, body: impl Into<String>) -> Self {
self.enter_body = Some(body.into());
self
}
/// Sets the body of the step function
///
/// The body code has access to the `log` and `db` variables.
pub fn step_body(mut self, body: impl Into<String>) -> Self {
self.step_body = Some(body.into());
self
}
/// Sets the body of the exit function
///
/// The body code has access to the `res` variable.
pub fn exit_body(mut self, body: impl Into<String>) -> Self {
self.exit_body = Some(body.into());
self
}
/// Returns the tracers JS code
pub fn code(self) -> String {
let mut template = JS_TRACER_TEMPLATE.to_string();
template = template.replace("//<setup>", self.setup_body.as_deref().unwrap_or_default());
template = template.replace("//<fault>", self.fault_body.as_deref().unwrap_or_default());
template =
template.replace("//<result>", self.result_body.as_deref().unwrap_or("return {};"));
template = template.replace("//<step>", self.step_body.as_deref().unwrap_or_default());
template = template.replace("//<enter>", self.enter_body.as_deref().unwrap_or_default());
template = template.replace("//<exit>", self.exit_body.as_deref().unwrap_or_default());
template
}
}
impl std::fmt::Display for JsTracerBuilder {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.clone().code())
}
}
impl From<JsTracerBuilder> for GethDebugTracingOptions {
fn from(b: JsTracerBuilder) -> Self {
Self {
tracer: Some(GethDebugTracerType::JsTracer(b.code())),
tracer_config: serde_json::Value::Object(Default::default()).into(),
..Default::default()
}
}
}
impl From<JsTracerBuilder> for Option<GethDebugTracingOptions> {
fn from(b: JsTracerBuilder) -> Self {
Some(b.into())
}
}
/// A stream that yields the traces for the requested blocks.
#[must_use = "streams do nothing unless polled"]
pub struct DebugTraceTransactionsStream<'a> {
stream: Pin<Box<dyn Stream<Item = TraceTransactionResult> + 'a>>,
}
impl DebugTraceTransactionsStream<'_> {
/// Returns the next error result of the stream.
pub async fn next_err(&mut self) -> Option<(RpcError, TxHash)> {
loop {
match self.next().await? {
Ok(_) => {}
Err(err) => return Some(err),
}
}
}
}
impl Stream for DebugTraceTransactionsStream<'_> {
type Item = TraceTransactionResult;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
self.stream.as_mut().poll_next(cx)
}
}
impl std::fmt::Debug for DebugTraceTransactionsStream<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("DebugTraceTransactionsStream").finish_non_exhaustive()
}
}
/// A stream that yields the `debug_` traces for the requested blocks.
#[must_use = "streams do nothing unless polled"]
pub struct DebugTraceBlockStream<'a> {
stream: Pin<Box<dyn Stream<Item = DebugTraceBlockResult> + 'a>>,
}
impl DebugTraceBlockStream<'_> {
/// Returns the next error result of the stream.
pub async fn next_err(&mut self) -> Option<(RpcError, BlockId)> {
loop {
match self.next().await? {
Ok(_) => {}
Err(err) => return Some(err),
}
}
}
}
impl Stream for DebugTraceBlockStream<'_> {
type Item = DebugTraceBlockResult;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
self.stream.as_mut().poll_next(cx)
}
}
impl std::fmt::Debug for DebugTraceBlockStream<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("DebugTraceBlockStream").finish_non_exhaustive()
}
}
/// A javascript tracer that does nothing
#[derive(Debug, Clone, Copy, Default)]
#[non_exhaustive]
pub struct NoopJsTracer;
impl From<NoopJsTracer> for GethDebugTracingOptions {
fn from(_: NoopJsTracer) -> Self {
Self {
tracer: Some(GethDebugTracerType::JsTracer(NOOP_TRACER.to_string())),
tracer_config: serde_json::Value::Object(Default::default()).into(),
..Default::default()
}
}
}
impl From<NoopJsTracer> for Option<GethDebugTracingOptions> {
fn from(_: NoopJsTracer) -> Self {
Some(NoopJsTracer.into())
}
}
#[cfg(test)]
mod tests {
use crate::{
debug::{DebugApiExt, JsTracerBuilder, NoopJsTracer},
utils::parse_env_url,
};
use alloy_rpc_types_trace::geth::{CallConfig, GethDebugTracingOptions};
use futures::StreamExt;
use jsonrpsee::http_client::HttpClientBuilder;
// random tx <https://sepolia.etherscan.io/tx/0x5525c63a805df2b83c113ebcc8c7672a3b290673c4e81335b410cd9ebc64e085>
const TX_1: &str = "0x5525c63a805df2b83c113ebcc8c7672a3b290673c4e81335b410cd9ebc64e085";
#[tokio::test]
#[ignore]
async fn can_trace_noop_sepolia() {
let tx = TX_1.parse().unwrap();
let url = parse_env_url("RETH_RPC_TEST_NODE_URL").unwrap();
let client = HttpClientBuilder::default().build(url).unwrap();
let res =
client.debug_trace_transaction_json(tx, NoopJsTracer::default().into()).await.unwrap();
assert_eq!(res, serde_json::Value::Object(Default::default()));
}
#[tokio::test]
#[ignore]
async fn can_trace_default_template() {
let tx = TX_1.parse().unwrap();
let url = parse_env_url("RETH_RPC_TEST_NODE_URL").unwrap();
let client = HttpClientBuilder::default().build(url).unwrap();
let res = client
.debug_trace_transaction_json(tx, JsTracerBuilder::default().into())
.await
.unwrap();
assert_eq!(res, serde_json::Value::Object(Default::default()));
}
#[tokio::test]
#[ignore]
async fn can_debug_trace_block_transactions() {
let block = 11_117_104u64;
let url = parse_env_url("RETH_RPC_TEST_NODE_URL").unwrap();
let client = HttpClientBuilder::default().build(url).unwrap();
let opts = GethDebugTracingOptions::default()
.with_call_config(CallConfig::default().only_top_call());
let mut stream = client.debug_trace_transactions_in_block(block, opts).await.unwrap();
while let Some(res) = stream.next().await {
if let Err((err, tx)) = res {
println!("failed to trace {tx:?} {err}");
}
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-testing-util/src/utils.rs | crates/rpc/rpc-testing-util/src/utils.rs | //! Utils for testing RPC.
/// This will read the value of the given environment variable and parse it as a URL.
///
/// If the value has no http(s) scheme, it will be appended: `http://{var}`.
pub fn parse_env_url(var: &str) -> Result<String, std::env::VarError> {
let var = std::env::var(var)?;
if var.starts_with("http") {
Ok(var)
} else {
Ok(format!("http://{var}"))
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-testing-util/tests/it/trace.rs | crates/rpc/rpc-testing-util/tests/it/trace.rs | //! Integration tests for the trace API.
use alloy_primitives::map::HashSet;
use alloy_rpc_types_eth::{Block, Header, Transaction, TransactionRequest};
use alloy_rpc_types_trace::{
filter::TraceFilter, parity::TraceType, tracerequest::TraceCallRequest,
};
use futures::StreamExt;
use jsonrpsee::http_client::HttpClientBuilder;
use jsonrpsee_http_client::HttpClient;
use reth_ethereum_primitives::Receipt;
use reth_rpc_api_testing_util::{debug::DebugApiExt, trace::TraceApiExt, utils::parse_env_url};
use reth_rpc_eth_api::EthApiClient;
use std::time::Instant;
/// This is intended to be run locally against a running node.
///
/// This is a noop of env var `RETH_RPC_TEST_NODE_URL` is not set.
#[tokio::test(flavor = "multi_thread")]
async fn trace_many_blocks() {
let url = parse_env_url("RETH_RPC_TEST_NODE_URL");
if url.is_err() {
return
}
let url = url.unwrap();
let client = HttpClientBuilder::default().build(url).unwrap();
let mut stream = client.trace_block_buffered_unordered(15_000_000..=16_000_100, 20);
let now = Instant::now();
while let Some((err, block)) = stream.next_err().await {
eprintln!("Error tracing block {block:?}: {err}");
}
println!("Traced all blocks in {:?}", now.elapsed());
}
/// Tests the replaying of transactions on a local Ethereum node.
#[tokio::test(flavor = "multi_thread")]
#[ignore]
async fn replay_transactions() {
let url = parse_env_url("RETH_RPC_TEST_NODE_URL").unwrap();
let client = HttpClientBuilder::default().build(url).unwrap();
let tx_hashes = vec![
"0x4e08fe36db723a338e852f89f613e606b0c9a17e649b18b01251f86236a2cef3".parse().unwrap(),
"0xea2817f1aeeb587b82f4ab87a6dbd3560fc35ed28de1be280cb40b2a24ab48bb".parse().unwrap(),
];
let trace_types = HashSet::from_iter([TraceType::StateDiff, TraceType::VmTrace]);
let mut stream = client.replay_transactions(tx_hashes, trace_types);
let now = Instant::now();
while let Some(replay_txs) = stream.next().await {
println!("Transaction: {replay_txs:?}");
println!("Replayed transactions in {:?}", now.elapsed());
}
}
/// Tests the tracers filters on a local Ethereum node
#[tokio::test(flavor = "multi_thread")]
#[ignore]
async fn trace_filters() {
// Parse the node URL from environment variable and create an HTTP client.
let url = parse_env_url("RETH_RPC_TEST_NODE_URL").unwrap();
let client = HttpClientBuilder::default().build(url).unwrap();
// Set up trace filters.
let filter = TraceFilter::default();
let filters = vec![filter];
// Initialize a stream for the trace filters.
let mut stream = client.trace_filter_stream(filters);
let start_time = Instant::now();
while let Some(trace) = stream.next().await {
println!("Transaction Trace: {trace:?}");
println!("Duration since test start: {:?}", start_time.elapsed());
}
}
#[tokio::test(flavor = "multi_thread")]
#[ignore]
async fn trace_call() {
let url = parse_env_url("RETH_RPC_TEST_NODE_URL").unwrap();
let client = HttpClientBuilder::default().build(url).unwrap();
let trace_call_request = TraceCallRequest::default();
let mut stream = client.trace_call_stream(trace_call_request);
let start_time = Instant::now();
while let Some(result) = stream.next().await {
match result {
Ok(trace_result) => {
println!("Trace Result: {trace_result:?}");
}
Err((error, request)) => {
eprintln!("Error for request {request:?}: {error:?}");
}
}
}
println!("Completed in {:?}", start_time.elapsed());
}
/// This is intended to be run locally against a running node. This traces all blocks for a given
/// chain.
///
/// This is a noop of env var `RETH_RPC_TEST_NODE_URL` is not set.
#[tokio::test(flavor = "multi_thread")]
async fn debug_trace_block_entire_chain() {
let url = parse_env_url("RETH_RPC_TEST_NODE_URL");
if url.is_err() {
return
}
let url = url.unwrap();
let client = HttpClientBuilder::default().build(url).unwrap();
let current_block: u64 = <HttpClient as EthApiClient<
TransactionRequest,
Transaction,
Block,
Receipt,
Header,
>>::block_number(&client)
.await
.unwrap()
.try_into()
.unwrap();
let range = 0..=current_block;
let mut stream = client.debug_trace_block_buffered_unordered(range, None, 20);
let now = Instant::now();
while let Some((err, block)) = stream.next_err().await {
eprintln!("Error tracing block {block:?}: {err}");
}
println!("Traced all blocks in {:?}", now.elapsed());
}
/// This is intended to be run locally against a running node. This traces all blocks for a given
/// chain.
///
/// This is a noop of env var `RETH_RPC_TEST_NODE_URL` is not set.
#[tokio::test(flavor = "multi_thread")]
async fn debug_trace_block_opcodes_entire_chain() {
let opcodes7702 = ["EXTCODESIZE", "EXTCODECOPY", "EXTCODEHASH"];
let url = parse_env_url("RETH_RPC_TEST_NODE_URL");
if url.is_err() {
return
}
let url = url.unwrap();
let client = HttpClientBuilder::default().build(url).unwrap();
let current_block: u64 = <HttpClient as EthApiClient<
TransactionRequest,
Transaction,
Block,
Receipt,
Header,
>>::block_number(&client)
.await
.unwrap()
.try_into()
.unwrap();
let range = 0..=current_block;
println!("Tracing blocks {range:?} for opcodes");
let mut stream = client.trace_block_opcode_gas_unordered(range, 2).enumerate();
let now = Instant::now();
while let Some((num, next)) = stream.next().await {
match next {
Ok((block_opcodes, block)) => {
for opcode in opcodes7702 {
if block_opcodes.contains(opcode) {
eprintln!("Found opcode {opcode}: in {block}");
}
}
}
Err((err, block)) => {
eprintln!("Error tracing block {block:?}: {err}");
}
};
if num % 10000 == 0 {
println!("Traced {num} blocks");
}
}
println!("Traced all blocks in {:?}", now.elapsed());
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-testing-util/tests/it/main.rs | crates/rpc/rpc-testing-util/tests/it/main.rs | #![allow(missing_docs)]
mod trace;
const fn main() {}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/ipc/src/lib.rs | crates/rpc/ipc/src/lib.rs | //! Reth IPC transport implementation
//!
//! ## Feature Flags
//!
//! - `client`: Enables JSON-RPC client support.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
pub mod client;
pub mod server;
/// Json codec implementation
pub mod stream_codec;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/ipc/src/stream_codec.rs | crates/rpc/ipc/src/stream_codec.rs | // Copyright (c) 2015-2017 Parity Technologies Limited
//
// Permission is hereby granted, free of charge, to any
// person obtaining a copy of this software and associated
// documentation files (the "Software"), to deal in the
// Software without restriction, including without
// limitation the rights to use, copy, modify, merge,
// publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software
// is furnished to do so, subject to the following
// conditions:
//
// The above copyright notice and this permission notice
// shall be included in all copies or substantial portions
// of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
// This basis of this file has been taken from the deprecated jsonrpc codebase:
// https://github.com/paritytech/jsonrpc
use bytes::BytesMut;
use std::{io, str};
/// Separator for enveloping messages in streaming codecs
#[derive(Debug, Clone)]
pub enum Separator {
/// No envelope is expected between messages. Decoder will try to figure out
/// message boundaries by accumulating incoming bytes until valid JSON is formed.
/// Encoder will send messages without any boundaries between requests.
Empty,
/// Byte is used as a sentinel between messages
Byte(u8),
}
impl Default for Separator {
fn default() -> Self {
Self::Byte(b'\n')
}
}
/// Stream codec for streaming protocols (ipc, tcp)
#[derive(Debug, Default)]
pub struct StreamCodec {
incoming_separator: Separator,
outgoing_separator: Separator,
}
impl StreamCodec {
/// Default codec with streaming input data. Input can be both enveloped and not.
pub fn stream_incoming() -> Self {
Self::new(Separator::Empty, Default::default())
}
/// New custom stream codec
pub const fn new(incoming_separator: Separator, outgoing_separator: Separator) -> Self {
Self { incoming_separator, outgoing_separator }
}
}
#[inline]
const fn is_whitespace(byte: u8) -> bool {
matches!(byte, 0x0D | 0x0A | 0x20 | 0x09)
}
impl tokio_util::codec::Decoder for StreamCodec {
type Item = String;
type Error = io::Error;
fn decode(&mut self, buf: &mut BytesMut) -> io::Result<Option<Self::Item>> {
if let Separator::Byte(separator) = self.incoming_separator {
if let Some(i) = buf.as_ref().iter().position(|&b| b == separator) {
let line = buf.split_to(i);
let _ = buf.split_to(1);
match str::from_utf8(line.as_ref()) {
Ok(s) => Ok(Some(s.to_string())),
Err(_) => Err(io::Error::other("invalid UTF-8")),
}
} else {
Ok(None)
}
} else {
let mut depth = 0;
let mut in_str = false;
let mut is_escaped = false;
let mut start_idx = 0;
let mut whitespaces = 0;
for idx in 0..buf.as_ref().len() {
let byte = buf.as_ref()[idx];
if (byte == b'{' || byte == b'[') && !in_str {
if depth == 0 {
start_idx = idx;
}
depth += 1;
} else if (byte == b'}' || byte == b']') && !in_str {
depth -= 1;
} else if byte == b'"' && !is_escaped {
in_str = !in_str;
} else if is_whitespace(byte) {
whitespaces += 1;
}
is_escaped = byte == b'\\' && !is_escaped && in_str;
if depth == 0 && idx != start_idx && idx - start_idx + 1 > whitespaces {
let bts = buf.split_to(idx + 1);
return match String::from_utf8(bts.into()) {
Ok(val) => Ok(Some(val)),
Err(_) => Ok(None),
}
}
}
Ok(None)
}
}
}
impl tokio_util::codec::Encoder<String> for StreamCodec {
type Error = io::Error;
fn encode(&mut self, msg: String, buf: &mut BytesMut) -> io::Result<()> {
let mut payload = msg.into_bytes();
if let Separator::Byte(separator) = self.outgoing_separator {
payload.push(separator);
}
buf.extend_from_slice(&payload);
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use bytes::BufMut;
use tokio_util::codec::Decoder;
#[test]
fn simple_encode() {
let mut buf = BytesMut::with_capacity(2048);
buf.put_slice(b"{ test: 1 }{ test: 2 }{ test: 3 }");
let mut codec = StreamCodec::stream_incoming();
let request = codec
.decode(&mut buf)
.expect("There should be no error in simple test")
.expect("There should be at least one request in simple test");
assert_eq!(request, "{ test: 1 }");
}
#[test]
fn escape() {
let mut buf = BytesMut::with_capacity(2048);
buf.put_slice(br#"{ test: "\"\\" }{ test: "\ " }{ test: "\}" }[ test: "\]" ]"#);
let mut codec = StreamCodec::stream_incoming();
let request = codec
.decode(&mut buf)
.expect("There should be no error in first escape test")
.expect("There should be a request in first escape test");
assert_eq!(request, r#"{ test: "\"\\" }"#);
let request2 = codec
.decode(&mut buf)
.expect("There should be no error in 2nd escape test")
.expect("There should be a request in 2nd escape test");
assert_eq!(request2, r#"{ test: "\ " }"#);
let request3 = codec
.decode(&mut buf)
.expect("There should be no error in 3rd escape test")
.expect("There should be a request in 3rd escape test");
assert_eq!(request3, r#"{ test: "\}" }"#);
let request4 = codec
.decode(&mut buf)
.expect("There should be no error in 4th escape test")
.expect("There should be a request in 4th escape test");
assert_eq!(request4, r#"[ test: "\]" ]"#);
}
#[test]
fn whitespace() {
let mut buf = BytesMut::with_capacity(2048);
buf.put_slice(b"{ test: 1 }\n\n\n\n{ test: 2 }\n\r{\n test: 3 } ");
let mut codec = StreamCodec::stream_incoming();
let request = codec
.decode(&mut buf)
.expect("There should be no error in first whitespace test")
.expect("There should be a request in first whitespace test");
assert_eq!(request, "{ test: 1 }");
let request2 = codec
.decode(&mut buf)
.expect("There should be no error in first 2nd test")
.expect("There should be a request in 2nd whitespace test");
// TODO: maybe actually trim it out
assert_eq!(request2, "\n\n\n\n{ test: 2 }");
let request3 = codec
.decode(&mut buf)
.expect("There should be no error in first 3rd test")
.expect("There should be a request in 3rd whitespace test");
assert_eq!(request3, "\n\r{\n test: 3 }");
let request4 = codec.decode(&mut buf).expect("There should be no error in first 4th test");
assert!(
request4.is_none(),
"There should be no 4th request because it contains only whitespaces"
);
}
#[test]
fn fragmented_encode() {
let mut buf = BytesMut::with_capacity(2048);
buf.put_slice(b"{ test: 1 }{ test: 2 }{ tes");
let mut codec = StreamCodec::stream_incoming();
let request = codec
.decode(&mut buf)
.expect("There should be no error in first fragmented test")
.expect("There should be at least one request in first fragmented test");
assert_eq!(request, "{ test: 1 }");
codec
.decode(&mut buf)
.expect("There should be no error in second fragmented test")
.expect("There should be at least one request in second fragmented test");
assert_eq!(String::from_utf8(buf.as_ref().to_vec()).unwrap(), "{ tes");
buf.put_slice(b"t: 3 }");
let request = codec
.decode(&mut buf)
.expect("There should be no error in third fragmented test")
.expect("There should be at least one request in third fragmented test");
assert_eq!(request, "{ test: 3 }");
}
#[test]
fn huge() {
let request = r#"
{
"jsonrpc":"2.0",
"method":"say_hello",
"params": [
42,
0,
{
"from":"0xb60e8dd61c5d32be8058bb8eb970870f07233155",
"gas":"0x2dc6c0",
"data":"0x606060405260003411156010576002565b6001805433600160a060020a0319918216811790925560028054909116909117905561291f806100406000396000f3606060405236156100e55760e060020a600035046304029f2381146100ed5780630a1273621461015f57806317c1dd87146102335780631f9ea25d14610271578063266fa0e91461029357806349593f5314610429578063569aa0d8146104fc57806359a4669f14610673578063647a4d5f14610759578063656104f5146108095780636e9febfe1461082b57806370de8c6e1461090d57806371bde852146109ed5780638f30435d14610ab4578063916dbc1714610da35780639f5a7cd414610eef578063c91540f614610fe6578063eae99e1c146110b5578063fedc2a281461115a575b61122d610002565b61122d6004808035906020019082018035906020019191908080601f01602080910402602001604051908101604052809392919081815260200183838082843750949650509335935050604435915050606435600154600090600160a060020a03908116339091161461233357610002565b61122f6004808035906020019082018035906020019191908080601f016020809104026020016040519081016040528093929190818152602001838380828437509496505093359350506044359150506064355b60006000600060005086604051808280519060200190808383829060006004602084601f0104600f02600301f1509050019150509081526020016040518091039020600050905042816005016000508560ff1660028110156100025760040201835060010154604060020a90046001604060020a0316116115df576115d6565b6112416004355b604080516001604060020a038316408152606060020a33600160a060020a031602602082015290519081900360340190205b919050565b61122d600435600254600160a060020a0390811633909116146128e357610002565b61125e6004808035906020019082018035906020019191908080601f01602080910402602001604051908101604052809392919081815260200183838082843750949650509335935050505060006000600060006000600060005087604051808280519060200190808383829060006004602084601f0104600f02600301f1509050019150509081526020016040518091039020600050905080600001600050600087600160a060020a0316815260200190815260200160002060005060000160059054906101000a90046001604060020a03169450845080600001600050600087600160a060020a03168152602001908152602001600020600050600001600d9054906101000a90046001604060020a03169350835080600001600050600087600160a060020a0316815260200190815260200160002060005060000160009054906101000a900460ff169250825080600001600050600087600160a060020a0316815260200190815260200160002060005060000160019054906101000a900463ffffffff16915081505092959194509250565b61122d6004808035906020019082018035906020019191908080601f01602080910402602001604051908101604052809392919081815260200183838082843750949650509335935050604435915050606435608435600060006000600060005088604051808280519060200190808383829060006004602084601f0104600f02600301f15090500191505090815260200160405180910390206000509250346000141515611c0e5760405133600160a060020a0316908290349082818181858883f193505050501515611c1a57610002565b6112996004808035906020019082018035906020019191908080601f01602080910402602001604051908101604052809392919081815260200183838082843750949650509335935050604435915050600060006000600060006000600060006000508a604051808280519060200190808383829060006004602084601f0104600f02600301f15090500191505090815260200160405180910390206000509050806001016000508960ff16600281101561000257600160a060020a038a168452828101600101602052604084205463ffffffff1698506002811015610002576040842054606060020a90046001604060020a031697506002811015610002576040842054640100000000900463ffffffff169650600281101561000257604084206001015495506002811015610002576040842054604060020a900463ffffffff169450600281101561000257505060409091205495999498509296509094509260a060020a90046001604060020a0316919050565b61122d6004808035906020019082018035906020019191908080601f016020809104026020016040519081016040528093929190818152602001838380828437509496505050505050506000600060005082604051808280519060200190808383829060006004602084601f0104600f02600301f15090500191505090815260200160405180910390206000509050348160050160005082600d0160009054906101000a900460ff1660ff16600281101561000257600402830160070180546001608060020a0381169093016001608060020a03199390931692909217909155505b5050565b6112e26004808035906020019082018035906020019191908080601f01602080910003423423094734987103498712093847102938740192387401349857109487501938475"
}
]
}"#;
let mut buf = BytesMut::with_capacity(65536);
buf.put_slice(request.as_bytes());
let mut codec = StreamCodec::stream_incoming();
let parsed_request = codec
.decode(&mut buf)
.expect("There should be no error in huge test")
.expect("There should be at least one request huge test");
assert_eq!(request, parsed_request);
}
#[test]
fn simple_line_codec() {
let mut buf = BytesMut::with_capacity(2048);
buf.put_slice(b"{ test: 1 }\n{ test: 2 }\n{ test: 3 }");
let mut codec = StreamCodec::default();
let request = codec
.decode(&mut buf)
.expect("There should be no error in simple test")
.expect("There should be at least one request in simple test");
let request2 = codec
.decode(&mut buf)
.expect("There should be no error in simple test")
.expect("There should be at least one request in simple test");
assert_eq!(request, "{ test: 1 }");
assert_eq!(request2, "{ test: 2 }");
}
#[test]
fn serde_json_accepts_whitespace_wrapped_json() {
let json = " { \"key\": \"value\" } ";
#[derive(serde::Deserialize, Debug, PartialEq)]
struct Obj {
key: String,
}
let parsed: Result<Obj, _> = serde_json::from_str(json);
assert!(parsed.is_ok(), "serde_json should accept whitespace-wrapped JSON");
assert_eq!(parsed.unwrap(), Obj { key: "value".into() });
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/ipc/src/server/connection.rs | crates/rpc/ipc/src/server/connection.rs | //! An IPC connection.
use crate::stream_codec::StreamCodec;
use futures::{stream::FuturesUnordered, FutureExt, Sink, Stream};
use std::{
collections::VecDeque,
future::Future,
io,
pin::Pin,
task::{Context, Poll},
};
use tokio::io::{AsyncRead, AsyncWrite};
use tokio_util::codec::Framed;
use tower::Service;
pub(crate) type JsonRpcStream<T> = Framed<T, StreamCodec>;
#[pin_project::pin_project]
pub(crate) struct IpcConn<T>(#[pin] pub(crate) T);
impl<T> Stream for IpcConn<JsonRpcStream<T>>
where
T: AsyncRead + AsyncWrite,
{
type Item = io::Result<String>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
self.project().0.poll_next(cx)
}
}
impl<T> Sink<String> for IpcConn<JsonRpcStream<T>>
where
T: AsyncRead + AsyncWrite,
{
type Error = io::Error;
fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
// NOTE: we always flush here this prevents buffering in the underlying
// `Framed` impl that would cause stalled requests
self.project().0.poll_flush(cx)
}
fn start_send(self: Pin<&mut Self>, item: String) -> Result<(), Self::Error> {
self.project().0.start_send(item)
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.project().0.poll_flush(cx)
}
fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.project().0.poll_close(cx)
}
}
/// Drives an [`IpcConn`] forward.
///
/// This forwards received requests from the connection to the service and sends responses to the
/// connection.
///
/// This future terminates when the connection is closed.
#[pin_project::pin_project]
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub(crate) struct IpcConnDriver<T, S, Fut> {
#[pin]
pub(crate) conn: IpcConn<JsonRpcStream<T>>,
pub(crate) service: S,
/// rpc requests in progress
#[pin]
pub(crate) pending_calls: FuturesUnordered<Fut>,
pub(crate) items: VecDeque<String>,
}
impl<T, S, Fut> IpcConnDriver<T, S, Fut> {
/// Add a new item to the send queue.
pub(crate) fn push_back(&mut self, item: String) {
self.items.push_back(item);
}
}
impl<T, S> Future for IpcConnDriver<T, S, S::Future>
where
S: Service<String, Response = Option<String>> + Send + 'static,
S::Error: Into<Box<dyn core::error::Error + Send + Sync>>,
S::Future: Send + Unpin,
T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
{
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut this = self.project();
// items are also pushed from external
// this will act as a manual yield point to reduce latencies of the polling future that may
// submit items from an additional source (subscription)
let mut budget = 5;
// ensure we still have enough budget for another iteration
'outer: loop {
budget -= 1;
if budget == 0 {
// make sure we're woken up again
cx.waker().wake_by_ref();
return Poll::Pending
}
// write all responses to the sink
while this.conn.as_mut().poll_ready(cx).is_ready() {
if let Some(item) = this.items.pop_front() {
if let Err(err) = this.conn.as_mut().start_send(item) {
tracing::warn!("IPC response failed: {:?}", err);
return Poll::Ready(())
}
} else {
break
}
}
'inner: loop {
// drain all calls that are ready and put them in the output item queue
let drained = if this.pending_calls.is_empty() {
false
} else {
if let Poll::Ready(Some(res)) = this.pending_calls.as_mut().poll_next(cx) {
let item = match res {
Ok(Some(resp)) => resp,
Ok(None) => continue 'inner,
Err(err) => err.into().to_string(),
};
this.items.push_back(item);
continue 'outer;
}
true
};
// read from the stream
match this.conn.as_mut().poll_next(cx) {
Poll::Ready(res) => match res {
Some(Ok(item)) => {
let mut call = this.service.call(item);
match call.poll_unpin(cx) {
Poll::Ready(res) => {
let item = match res {
Ok(Some(resp)) => resp,
Ok(None) => continue 'inner,
Err(err) => err.into().to_string(),
};
this.items.push_back(item);
continue 'outer
}
Poll::Pending => {
this.pending_calls.push(call);
}
}
}
Some(Err(err)) => {
// this can happen if the client closes the connection
tracing::debug!("IPC request failed: {:?}", err);
return Poll::Ready(())
}
None => return Poll::Ready(()),
},
Poll::Pending => {
if drained || this.pending_calls.is_empty() {
// at this point all things are pending
return Poll::Pending
}
}
}
}
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/ipc/src/server/mod.rs | crates/rpc/ipc/src/server/mod.rs | //! JSON-RPC IPC server implementation
use crate::server::connection::{IpcConn, JsonRpcStream};
use futures::StreamExt;
use futures_util::future::Either;
use interprocess::local_socket::{
tokio::prelude::{LocalSocketListener, LocalSocketStream},
traits::tokio::{Listener, Stream},
GenericFilePath, ListenerOptions, ToFsName,
};
use jsonrpsee::{
core::{middleware::layer::RpcLoggerLayer, JsonRawValue, TEN_MB_SIZE_BYTES},
server::{
middleware::rpc::RpcServiceT, stop_channel, ConnectionGuard, ConnectionPermit, IdProvider,
RandomIntegerIdProvider, ServerHandle, StopHandle,
},
BoundedSubscriptions, MethodResponse, MethodSink, Methods,
};
use std::{
future::Future,
io,
pin::{pin, Pin},
sync::Arc,
task::{Context, Poll},
};
use tokio::{
io::{AsyncRead, AsyncWrite, AsyncWriteExt},
sync::oneshot,
};
use tower::{layer::util::Identity, Layer, Service};
use tracing::{debug, instrument, trace, warn, Instrument};
// re-export so can be used during builder setup
use crate::{
server::{connection::IpcConnDriver, rpc_service::RpcServiceCfg},
stream_codec::StreamCodec,
};
use tokio::sync::mpsc;
use tokio_stream::wrappers::ReceiverStream;
use tower::layer::{util::Stack, LayerFn};
mod connection;
mod ipc;
mod rpc_service;
pub use rpc_service::RpcService;
/// Ipc Server implementation
///
/// This is an adapted `jsonrpsee` Server, but for `Ipc` connections.
pub struct IpcServer<HttpMiddleware = Identity, RpcMiddleware = Identity> {
/// The endpoint we listen for incoming transactions
endpoint: String,
id_provider: Arc<dyn IdProvider>,
cfg: Settings,
rpc_middleware: RpcServiceBuilder<RpcMiddleware>,
http_middleware: tower::ServiceBuilder<HttpMiddleware>,
}
impl<HttpMiddleware, RpcMiddleware> IpcServer<HttpMiddleware, RpcMiddleware> {
/// Returns the configured endpoint
pub fn endpoint(&self) -> String {
self.endpoint.clone()
}
}
impl<HttpMiddleware, RpcMiddleware> IpcServer<HttpMiddleware, RpcMiddleware>
where
RpcMiddleware: for<'a> Layer<RpcService, Service: RpcServiceT> + Clone + Send + 'static,
HttpMiddleware: Layer<
TowerServiceNoHttp<RpcMiddleware>,
Service: Service<
String,
Response = Option<String>,
Error = Box<dyn core::error::Error + Send + Sync + 'static>,
Future: Send + Unpin,
> + Send,
> + Send
+ 'static,
{
/// Start responding to connections requests.
///
/// This will run on the tokio runtime until the server is stopped or the `ServerHandle` is
/// dropped.
///
/// ```
/// use jsonrpsee::RpcModule;
/// use reth_ipc::server::Builder;
/// async fn run_server() -> Result<(), Box<dyn core::error::Error + Send + Sync>> {
/// let server = Builder::default().build("/tmp/my-uds".into());
/// let mut module = RpcModule::new(());
/// module.register_method("say_hello", |_, _, _| "lo")?;
/// let handle = server.start(module).await?;
///
/// // In this example we don't care about doing shutdown so let's it run forever.
/// // You may use the `ServerHandle` to shut it down or manage it yourself.
/// let server = tokio::spawn(handle.stopped());
/// server.await.unwrap();
/// Ok(())
/// }
/// ```
pub async fn start(
mut self,
methods: impl Into<Methods>,
) -> Result<ServerHandle, IpcServerStartError> {
let methods = methods.into();
let (stop_handle, server_handle) = stop_channel();
// use a signal channel to wait until we're ready to accept connections
let (tx, rx) = oneshot::channel();
match self.cfg.tokio_runtime.take() {
Some(rt) => rt.spawn(self.start_inner(methods, stop_handle, tx)),
None => tokio::spawn(self.start_inner(methods, stop_handle, tx)),
};
rx.await.expect("channel is open")?;
Ok(server_handle)
}
async fn start_inner(
self,
methods: Methods,
stop_handle: StopHandle,
on_ready: oneshot::Sender<Result<(), IpcServerStartError>>,
) {
trace!(endpoint = ?self.endpoint, "starting ipc server");
if cfg!(unix) {
// ensure the file does not exist
if std::fs::remove_file(&self.endpoint).is_ok() {
debug!(endpoint = ?self.endpoint, "removed existing IPC endpoint file");
}
}
let listener = match self
.endpoint
.as_str()
.to_fs_name::<GenericFilePath>()
.and_then(|name| ListenerOptions::new().name(name).create_tokio())
{
Ok(listener) => {
#[cfg(unix)]
{
// set permissions only on unix
use std::os::unix::fs::PermissionsExt;
if let Some(perms_str) = &self.cfg.ipc_socket_permissions {
if let Ok(mode) = u32::from_str_radix(&perms_str.replace("0o", ""), 8) {
let perms = std::fs::Permissions::from_mode(mode);
let _ = std::fs::set_permissions(&self.endpoint, perms);
}
}
}
listener
}
Err(err) => {
on_ready
.send(Err(IpcServerStartError { endpoint: self.endpoint.clone(), source: err }))
.ok();
return;
}
};
// signal that we're ready to accept connections
on_ready.send(Ok(())).ok();
let mut id: u32 = 0;
let connection_guard = ConnectionGuard::new(self.cfg.max_connections as usize);
let stopped = stop_handle.clone().shutdown();
let mut stopped = pin!(stopped);
let (drop_on_completion, mut process_connection_awaiter) = mpsc::channel::<()>(1);
trace!("accepting ipc connections");
loop {
match try_accept_conn(&listener, stopped).await {
AcceptConnection::Established { local_socket_stream, stop } => {
let Some(conn_permit) = connection_guard.try_acquire() else {
let (_reader, mut writer) = local_socket_stream.split();
let _ = writer
.write_all(b"Too many connections. Please try again later.")
.await;
stopped = stop;
continue;
};
let max_conns = connection_guard.max_connections();
let curr_conns = max_conns - connection_guard.available_connections();
trace!("Accepting new connection {}/{}", curr_conns, max_conns);
let conn_permit = Arc::new(conn_permit);
process_connection(ProcessConnection {
http_middleware: &self.http_middleware,
rpc_middleware: self.rpc_middleware.clone(),
conn_permit,
conn_id: id,
server_cfg: self.cfg.clone(),
stop_handle: stop_handle.clone(),
drop_on_completion: drop_on_completion.clone(),
methods: methods.clone(),
id_provider: self.id_provider.clone(),
local_socket_stream,
});
id = id.wrapping_add(1);
stopped = stop;
}
AcceptConnection::Shutdown => {
break;
}
AcceptConnection::Err((err, stop)) => {
tracing::error!(%err, "Failed accepting a new IPC connection");
stopped = stop;
}
}
}
// Drop the last Sender
drop(drop_on_completion);
// Once this channel is closed it is safe to assume that all connections have been
// gracefully shutdown
while process_connection_awaiter.recv().await.is_some() {
// Generally, messages should not be sent across this channel,
// but we'll loop here to wait for `None` just to be on the safe side
}
}
}
enum AcceptConnection<S> {
Shutdown,
Established { local_socket_stream: LocalSocketStream, stop: S },
Err((io::Error, S)),
}
async fn try_accept_conn<S>(listener: &LocalSocketListener, stopped: S) -> AcceptConnection<S>
where
S: Future + Unpin,
{
match futures_util::future::select(pin!(listener.accept()), stopped).await {
Either::Left((res, stop)) => match res {
Ok(local_socket_stream) => AcceptConnection::Established { local_socket_stream, stop },
Err(e) => AcceptConnection::Err((e, stop)),
},
Either::Right(_) => AcceptConnection::Shutdown,
}
}
impl std::fmt::Debug for IpcServer {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("IpcServer")
.field("endpoint", &self.endpoint)
.field("cfg", &self.cfg)
.field("id_provider", &self.id_provider)
.finish()
}
}
/// Error thrown when server couldn't be started.
#[derive(Debug, thiserror::Error)]
#[error("failed to listen on ipc endpoint `{endpoint}`: {source}")]
pub struct IpcServerStartError {
endpoint: String,
#[source]
source: io::Error,
}
/// Data required by the server to handle requests received via an IPC connection
#[derive(Debug, Clone)]
#[allow(dead_code)]
pub(crate) struct ServiceData {
/// Registered server methods.
pub(crate) methods: Methods,
/// Subscription ID provider.
pub(crate) id_provider: Arc<dyn IdProvider>,
/// Stop handle.
pub(crate) stop_handle: StopHandle,
/// Connection ID
pub(crate) conn_id: u32,
/// Connection Permit.
pub(crate) conn_permit: Arc<ConnectionPermit>,
/// Limits the number of subscriptions for this connection
pub(crate) bounded_subscriptions: BoundedSubscriptions,
/// Sink that is used to send back responses to the connection.
///
/// This is used for subscriptions.
pub(crate) method_sink: MethodSink,
/// `ServerConfig`
pub(crate) server_cfg: Settings,
}
/// Similar to [`tower::ServiceBuilder`] but doesn't
/// support any tower middleware implementations.
#[derive(Debug, Clone)]
pub struct RpcServiceBuilder<L>(tower::ServiceBuilder<L>);
impl Default for RpcServiceBuilder<Identity> {
fn default() -> Self {
Self(tower::ServiceBuilder::new())
}
}
impl RpcServiceBuilder<Identity> {
/// Create a new [`RpcServiceBuilder`].
pub const fn new() -> Self {
Self(tower::ServiceBuilder::new())
}
}
impl<L> RpcServiceBuilder<L> {
/// Optionally add a new layer `T` to the [`RpcServiceBuilder`].
///
/// See the documentation for [`tower::ServiceBuilder::option_layer`] for more details.
pub fn option_layer<T>(
self,
layer: Option<T>,
) -> RpcServiceBuilder<Stack<Either<T, Identity>, L>> {
let layer = if let Some(layer) = layer {
Either::Left(layer)
} else {
Either::Right(Identity::new())
};
self.layer(layer)
}
/// Add a new layer `T` to the [`RpcServiceBuilder`].
///
/// See the documentation for [`tower::ServiceBuilder::layer`] for more details.
pub fn layer<T>(self, layer: T) -> RpcServiceBuilder<Stack<T, L>> {
RpcServiceBuilder(self.0.layer(layer))
}
/// Add a [`tower::Layer`] built from a function that accepts a service and returns another
/// service.
///
/// See the documentation for [`tower::ServiceBuilder::layer_fn`] for more details.
pub fn layer_fn<F>(self, f: F) -> RpcServiceBuilder<Stack<LayerFn<F>, L>> {
RpcServiceBuilder(self.0.layer_fn(f))
}
/// Add a logging layer to [`RpcServiceBuilder`]
///
/// This logs each request and response for every call.
pub fn rpc_logger(self, max_log_len: u32) -> RpcServiceBuilder<Stack<RpcLoggerLayer, L>> {
RpcServiceBuilder(self.0.layer(RpcLoggerLayer::new(max_log_len)))
}
/// Wrap the service `S` with the middleware.
pub(crate) fn service<S>(&self, service: S) -> L::Service
where
L: tower::Layer<S>,
{
self.0.service(service)
}
}
/// `JsonRPSee` service compatible with `tower`.
///
/// # Note
/// This is similar to [`hyper::service::service_fn`](https://docs.rs/hyper/latest/hyper/service/fn.service_fn.html).
#[derive(Debug, Clone)]
pub struct TowerServiceNoHttp<L> {
inner: ServiceData,
rpc_middleware: RpcServiceBuilder<L>,
}
impl<RpcMiddleware> Service<String> for TowerServiceNoHttp<RpcMiddleware>
where
RpcMiddleware: for<'a> Layer<RpcService>,
for<'a> <RpcMiddleware as Layer<RpcService>>::Service:
Send + Sync + 'static + RpcServiceT<MethodResponse = MethodResponse>,
{
/// The response of a handled RPC call
///
/// This is an `Option` because subscriptions and call responses are handled differently.
/// This will be `Some` for calls, and `None` for subscriptions, because the subscription
/// response will be emitted via the `method_sink`.
type Response = Option<String>;
type Error = Box<dyn core::error::Error + Send + Sync + 'static>;
type Future = Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send>>;
/// Opens door for back pressure implementation.
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, request: String) -> Self::Future {
trace!("{:?}", request);
let cfg = RpcServiceCfg::CallsAndSubscriptions {
bounded_subscriptions: BoundedSubscriptions::new(
self.inner.server_cfg.max_subscriptions_per_connection,
),
id_provider: self.inner.id_provider.clone(),
sink: self.inner.method_sink.clone(),
};
let max_response_body_size = self.inner.server_cfg.max_response_body_size as usize;
let max_request_body_size = self.inner.server_cfg.max_request_body_size as usize;
let conn = self.inner.conn_permit.clone();
let rpc_service = self.rpc_middleware.service(RpcService::new(
self.inner.methods.clone(),
max_response_body_size,
self.inner.conn_id.into(),
cfg,
));
// an ipc connection needs to handle read+write concurrently
// even if the underlying rpc handler spawns the actual work or is does a lot of async any
// additional overhead performed by `handle_request` can result in I/O latencies, for
// example tracing calls are relatively CPU expensive on serde::serialize alone, moving this
// work to a separate task takes the pressure off the connection so all concurrent responses
// are also serialized concurrently and the connection can focus on read+write
let f = tokio::task::spawn(async move {
ipc::call_with_service(
request,
rpc_service,
max_response_body_size,
max_request_body_size,
conn,
)
.await
});
Box::pin(async move { f.await.map_err(|err| err.into()) })
}
}
struct ProcessConnection<'a, HttpMiddleware, RpcMiddleware> {
http_middleware: &'a tower::ServiceBuilder<HttpMiddleware>,
rpc_middleware: RpcServiceBuilder<RpcMiddleware>,
conn_permit: Arc<ConnectionPermit>,
conn_id: u32,
server_cfg: Settings,
stop_handle: StopHandle,
drop_on_completion: mpsc::Sender<()>,
methods: Methods,
id_provider: Arc<dyn IdProvider>,
local_socket_stream: LocalSocketStream,
}
/// Spawns the IPC connection onto a new task
#[instrument(name = "connection", skip_all, fields(conn_id = %params.conn_id), level = "INFO")]
fn process_connection<RpcMiddleware, HttpMiddleware>(
params: ProcessConnection<'_, HttpMiddleware, RpcMiddleware>,
) where
RpcMiddleware: Layer<RpcService> + Clone + Send + 'static,
for<'a> <RpcMiddleware as Layer<RpcService>>::Service: RpcServiceT,
HttpMiddleware: Layer<TowerServiceNoHttp<RpcMiddleware>> + Send + 'static,
<HttpMiddleware as Layer<TowerServiceNoHttp<RpcMiddleware>>>::Service: Send
+ Service<
String,
Response = Option<String>,
Error = Box<dyn core::error::Error + Send + Sync + 'static>,
>,
<<HttpMiddleware as Layer<TowerServiceNoHttp<RpcMiddleware>>>::Service as Service<String>>::Future:
Send + Unpin,
{
let ProcessConnection {
http_middleware,
rpc_middleware,
conn_permit,
conn_id,
server_cfg,
stop_handle,
drop_on_completion,
id_provider,
methods,
local_socket_stream,
} = params;
let ipc = IpcConn(tokio_util::codec::Decoder::framed(
StreamCodec::stream_incoming(),
local_socket_stream,
));
let (tx, rx) = mpsc::channel::<Box<JsonRawValue>>(server_cfg.message_buffer_capacity as usize);
let method_sink = MethodSink::new_with_limit(tx, server_cfg.max_response_body_size);
let tower_service = TowerServiceNoHttp {
inner: ServiceData {
methods,
id_provider,
stop_handle: stop_handle.clone(),
server_cfg: server_cfg.clone(),
conn_id,
conn_permit,
bounded_subscriptions: BoundedSubscriptions::new(
server_cfg.max_subscriptions_per_connection,
),
method_sink,
},
rpc_middleware,
};
let service = http_middleware.service(tower_service);
tokio::spawn(async {
to_ipc_service(ipc, service, stop_handle, rx).in_current_span().await;
drop(drop_on_completion)
});
}
async fn to_ipc_service<S, T>(
ipc: IpcConn<JsonRpcStream<T>>,
service: S,
stop_handle: StopHandle,
rx: mpsc::Receiver<Box<JsonRawValue>>,
) where
S: Service<String, Response = Option<String>> + Send + 'static,
S::Error: Into<Box<dyn core::error::Error + Send + Sync>>,
S::Future: Send + Unpin,
T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
{
let rx_item = ReceiverStream::new(rx);
let conn = IpcConnDriver {
conn: ipc,
service,
pending_calls: Default::default(),
items: Default::default(),
};
let stopped = stop_handle.shutdown();
let mut conn = pin!(conn);
let mut rx_item = pin!(rx_item);
let mut stopped = pin!(stopped);
loop {
tokio::select! {
_ = &mut conn => {
break
}
item = rx_item.next() => {
if let Some(item) = item {
conn.push_back(item.to_string());
}
}
_ = &mut stopped => {
// shutdown
break
}
}
}
}
/// JSON-RPC IPC server settings.
#[derive(Debug, Clone)]
pub struct Settings {
/// Maximum size in bytes of a request.
max_request_body_size: u32,
/// Maximum size in bytes of a response.
max_response_body_size: u32,
/// Max length for logging for requests and responses
///
/// Logs bigger than this limit will be truncated.
max_log_length: u32,
/// Maximum number of incoming connections allowed.
max_connections: u32,
/// Maximum number of subscriptions per connection.
max_subscriptions_per_connection: u32,
/// Number of messages that server is allowed `buffer` until backpressure kicks in.
message_buffer_capacity: u32,
/// Custom tokio runtime to run the server on.
tokio_runtime: Option<tokio::runtime::Handle>,
/// The permissions to create the IPC socket with.
ipc_socket_permissions: Option<String>,
}
impl Default for Settings {
fn default() -> Self {
Self {
max_request_body_size: TEN_MB_SIZE_BYTES,
max_response_body_size: TEN_MB_SIZE_BYTES,
max_log_length: 4096,
max_connections: 100,
max_subscriptions_per_connection: 1024,
message_buffer_capacity: 1024,
tokio_runtime: None,
ipc_socket_permissions: None,
}
}
}
/// Builder to configure and create a JSON-RPC server
#[derive(Debug)]
pub struct Builder<HttpMiddleware, RpcMiddleware> {
settings: Settings,
/// Subscription ID provider.
id_provider: Arc<dyn IdProvider>,
rpc_middleware: RpcServiceBuilder<RpcMiddleware>,
http_middleware: tower::ServiceBuilder<HttpMiddleware>,
}
impl Default for Builder<Identity, Identity> {
fn default() -> Self {
Self {
settings: Settings::default(),
id_provider: Arc::new(RandomIntegerIdProvider),
rpc_middleware: RpcServiceBuilder::new(),
http_middleware: tower::ServiceBuilder::new(),
}
}
}
impl<HttpMiddleware, RpcMiddleware> Builder<HttpMiddleware, RpcMiddleware> {
/// Set the maximum size of a request body in bytes. Default is 10 MiB.
pub const fn max_request_body_size(mut self, size: u32) -> Self {
self.settings.max_request_body_size = size;
self
}
/// Set the maximum size of a response body in bytes. Default is 10 MiB.
pub const fn max_response_body_size(mut self, size: u32) -> Self {
self.settings.max_response_body_size = size;
self
}
/// Set the maximum size of a log
pub const fn max_log_length(mut self, size: u32) -> Self {
self.settings.max_log_length = size;
self
}
/// Set the maximum number of connections allowed. Default is 100.
pub const fn max_connections(mut self, max: u32) -> Self {
self.settings.max_connections = max;
self
}
/// Set the maximum number of subscriptions per connection. Default is 1024.
pub const fn max_subscriptions_per_connection(mut self, max: u32) -> Self {
self.settings.max_subscriptions_per_connection = max;
self
}
/// The server enforces backpressure which means that
/// `n` messages can be buffered and if the client
/// can't keep up with the server.
///
/// This `capacity` is applied per connection and
/// applies globally on the connection which implies
/// all JSON-RPC messages.
///
/// For example if a subscription produces plenty of new items
/// and the client can't keep up then no new messages are handled.
///
/// If this limit is exceeded then the server will "back-off"
/// and only accept new messages once the client reads pending messages.
///
/// # Panics
///
/// Panics if the buffer capacity is 0.
pub const fn set_message_buffer_capacity(mut self, c: u32) -> Self {
self.settings.message_buffer_capacity = c;
self
}
/// Configure a custom [`tokio::runtime::Handle`] to run the server on.
///
/// Default: [`tokio::spawn`]
pub fn custom_tokio_runtime(mut self, rt: tokio::runtime::Handle) -> Self {
self.settings.tokio_runtime = Some(rt);
self
}
/// Sets the permissions for the IPC socket file.
pub fn set_ipc_socket_permissions(mut self, permissions: Option<String>) -> Self {
self.settings.ipc_socket_permissions = permissions;
self
}
/// Configure custom `subscription ID` provider for the server to use
/// to when getting new subscription calls.
///
/// You may choose static dispatch or dynamic dispatch because
/// `IdProvider` is implemented for `Box<T>`.
///
/// Default: [`RandomIntegerIdProvider`].
///
/// # Examples
///
/// ```rust
/// use jsonrpsee::server::RandomStringIdProvider;
/// use reth_ipc::server::Builder;
///
/// // static dispatch
/// let builder1 = Builder::default().set_id_provider(RandomStringIdProvider::new(16));
///
/// // or dynamic dispatch
/// let builder2 = Builder::default().set_id_provider(Box::new(RandomStringIdProvider::new(16)));
/// ```
pub fn set_id_provider<I: IdProvider + 'static>(mut self, id_provider: I) -> Self {
self.id_provider = Arc::new(id_provider);
self
}
/// Configure a custom [`tower::ServiceBuilder`] middleware for composing layers to be applied
/// to the RPC service.
///
/// Default: No tower layers are applied to the RPC service.
///
/// # Examples
///
/// ```rust
/// #[tokio::main]
/// async fn main() {
/// let builder = tower::ServiceBuilder::new();
/// let server = reth_ipc::server::Builder::default()
/// .set_http_middleware(builder)
/// .build("/tmp/my-uds".into());
/// }
/// ```
pub fn set_http_middleware<T>(
self,
service_builder: tower::ServiceBuilder<T>,
) -> Builder<T, RpcMiddleware> {
Builder {
settings: self.settings,
id_provider: self.id_provider,
http_middleware: service_builder,
rpc_middleware: self.rpc_middleware,
}
}
/// Enable middleware that is invoked on every JSON-RPC call.
///
/// The middleware itself is very similar to the `tower middleware` but
/// it has a different service trait which takes &self instead &mut self
/// which means that you can't use built-in middleware from tower.
///
/// Another consequence of `&self` is that you must wrap any of the middleware state in
/// a type which is Send and provides interior mutability such `Arc<Mutex>`.
///
/// The builder itself exposes a similar API as the [`tower::ServiceBuilder`]
/// where it is possible to compose layers to the middleware.
pub fn set_rpc_middleware<T>(
self,
rpc_middleware: RpcServiceBuilder<T>,
) -> Builder<HttpMiddleware, T> {
Builder {
settings: self.settings,
id_provider: self.id_provider,
rpc_middleware,
http_middleware: self.http_middleware,
}
}
/// Finalize the configuration of the server. Consumes the [`Builder`].
pub fn build(self, endpoint: String) -> IpcServer<HttpMiddleware, RpcMiddleware> {
IpcServer {
endpoint,
cfg: self.settings,
id_provider: self.id_provider,
http_middleware: self.http_middleware,
rpc_middleware: self.rpc_middleware,
}
}
}
#[cfg(test)]
#[expect(missing_docs)]
pub fn dummy_name() -> String {
use rand::Rng;
let num: u64 = rand::rng().random();
if cfg!(windows) {
format!(r"\\.\pipe\my-pipe-{num}")
} else {
format!(r"/tmp/my-uds-{num}")
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::client::IpcClientBuilder;
use futures::future::select;
use jsonrpsee::{
core::{
client::{self, ClientT, Error, Subscription, SubscriptionClientT},
middleware::{Batch, BatchEntry, Notification},
params::BatchRequestBuilder,
},
rpc_params,
types::Request,
PendingSubscriptionSink, RpcModule, SubscriptionMessage,
};
use reth_tracing::init_test_tracing;
use std::pin::pin;
use tokio::sync::broadcast;
use tokio_stream::wrappers::BroadcastStream;
#[tokio::test]
#[cfg(unix)]
async fn test_ipc_socket_permissions() {
use std::os::unix::fs::PermissionsExt;
let endpoint = &dummy_name();
let perms = "0777";
let server = Builder::default()
.set_ipc_socket_permissions(Some(perms.to_string()))
.build(endpoint.clone());
let module = RpcModule::new(());
let handle = server.start(module).await.unwrap();
tokio::spawn(handle.stopped());
let meta = std::fs::metadata(endpoint).unwrap();
let perms = meta.permissions();
assert_eq!(perms.mode() & 0o777, 0o777);
}
async fn pipe_from_stream_with_bounded_buffer(
pending: PendingSubscriptionSink,
stream: BroadcastStream<usize>,
) -> Result<(), Box<dyn core::error::Error + Send + Sync>> {
let sink = pending.accept().await.unwrap();
let closed = sink.closed();
let mut closed = pin!(closed);
let mut stream = pin!(stream);
loop {
match select(closed, stream.next()).await {
// subscription closed or stream is closed.
Either::Left((_, _)) | Either::Right((None, _)) => break Ok(()),
// received new item from the stream.
Either::Right((Some(Ok(item)), c)) => {
let raw_value = serde_json::value::to_raw_value(&item)?;
let notif = SubscriptionMessage::from(raw_value);
// NOTE: this will block until there a spot in the queue
// and you might want to do something smarter if it's
// critical that "the most recent item" must be sent when it is produced.
if sink.send(notif).await.is_err() {
break Ok(());
}
closed = c;
}
// Send back the error.
Either::Right((Some(Err(e)), _)) => break Err(e.into()),
}
}
}
// Naive example that broadcasts the produced values to all active subscribers.
fn produce_items(tx: broadcast::Sender<usize>) {
for c in 1..=100 {
std::thread::sleep(std::time::Duration::from_millis(1));
let _ = tx.send(c);
}
}
#[tokio::test]
async fn can_set_the_max_response_body_size() {
// init_test_tracing();
let endpoint = &dummy_name();
let server = Builder::default().max_response_body_size(100).build(endpoint.clone());
let mut module = RpcModule::new(());
module.register_method("anything", |_, _, _| "a".repeat(101)).unwrap();
let handle = server.start(module).await.unwrap();
tokio::spawn(handle.stopped());
let client = IpcClientBuilder::default().build(endpoint).await.unwrap();
let response: Result<String, Error> = client.request("anything", rpc_params![]).await;
assert!(response.unwrap_err().to_string().contains("Exceeded max limit of"));
}
#[tokio::test]
async fn can_set_the_max_request_body_size() {
init_test_tracing();
let endpoint = &dummy_name();
let server = Builder::default().max_request_body_size(100).build(endpoint.clone());
let mut module = RpcModule::new(());
module.register_method("anything", |_, _, _| "succeed").unwrap();
let handle = server.start(module).await.unwrap();
tokio::spawn(handle.stopped());
let client = IpcClientBuilder::default().build(endpoint).await.unwrap();
let response: Result<String, Error> =
client.request("anything", rpc_params!["a".repeat(101)]).await;
assert!(response.is_err());
let mut batch_request_builder = BatchRequestBuilder::new();
let _ = batch_request_builder.insert("anything", rpc_params![]);
let _ = batch_request_builder.insert("anything", rpc_params![]);
let _ = batch_request_builder.insert("anything", rpc_params![]);
// the raw request string is:
// [{"jsonrpc":"2.0","id":0,"method":"anything"},{"jsonrpc":"2.0","id":1, \
// "method":"anything"},{"jsonrpc":"2.0","id":2,"method":"anything"}]"
// which is 136 bytes, more than 100 bytes.
let response: Result<client::BatchResponse<'_, String>, Error> =
client.batch_request(batch_request_builder).await;
assert!(response.is_err());
}
#[tokio::test]
async fn can_set_max_connections() {
init_test_tracing();
let endpoint = &dummy_name();
let server = Builder::default().max_connections(2).build(endpoint.clone());
let mut module = RpcModule::new(());
module.register_method("anything", |_, _, _| "succeed").unwrap();
let handle = server.start(module).await.unwrap();
tokio::spawn(handle.stopped());
let client1 = IpcClientBuilder::default().build(endpoint).await.unwrap();
let client2 = IpcClientBuilder::default().build(endpoint).await.unwrap();
let client3 = IpcClientBuilder::default().build(endpoint).await.unwrap();
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | true |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/ipc/src/server/rpc_service.rs | crates/rpc/ipc/src/server/rpc_service.rs | //! JSON-RPC service middleware.
use futures::{
future::Either,
stream::{FuturesOrdered, StreamExt},
};
use jsonrpsee::{
core::middleware::{Batch, BatchEntry},
server::{
middleware::rpc::{ResponseFuture, RpcServiceT},
IdProvider,
},
types::{error::reject_too_many_subscriptions, ErrorCode, ErrorObject, Id, Request},
BatchResponse, BatchResponseBuilder, BoundedSubscriptions, ConnectionId, MethodCallback,
MethodResponse, MethodSink, Methods, SubscriptionState,
};
use std::{future::Future, sync::Arc};
/// JSON-RPC service middleware.
#[derive(Clone, Debug)]
pub struct RpcService {
conn_id: ConnectionId,
methods: Methods,
max_response_body_size: usize,
cfg: RpcServiceCfg,
}
/// Configuration of the `RpcService`.
#[allow(dead_code)]
#[derive(Clone, Debug)]
pub(crate) enum RpcServiceCfg {
/// The server supports only calls.
OnlyCalls,
/// The server supports both method calls and subscriptions.
CallsAndSubscriptions {
bounded_subscriptions: BoundedSubscriptions,
sink: MethodSink,
id_provider: Arc<dyn IdProvider>,
},
}
impl RpcService {
/// Create a new service.
pub(crate) const fn new(
methods: Methods,
max_response_body_size: usize,
conn_id: ConnectionId,
cfg: RpcServiceCfg,
) -> Self {
Self { methods, max_response_body_size, conn_id, cfg }
}
}
impl RpcServiceT for RpcService {
type MethodResponse = MethodResponse;
type NotificationResponse = Option<MethodResponse>;
type BatchResponse = BatchResponse;
fn call<'a>(&self, req: Request<'a>) -> impl Future<Output = Self::MethodResponse> + Send + 'a {
let conn_id = self.conn_id;
let max_response_body_size = self.max_response_body_size;
let params = req.params();
let name = req.method_name();
let id = req.id().clone();
let extensions = req.extensions.clone();
match self.methods.method_with_name(name) {
None => {
let rp = MethodResponse::error(id, ErrorObject::from(ErrorCode::MethodNotFound));
ResponseFuture::ready(rp)
}
Some((_name, method)) => match method {
MethodCallback::Sync(callback) => {
let rp = (callback)(id, params, max_response_body_size, extensions);
ResponseFuture::ready(rp)
}
MethodCallback::Async(callback) => {
let params = params.into_owned();
let id = id.into_owned();
let fut = (callback)(id, params, conn_id, max_response_body_size, extensions);
ResponseFuture::future(fut)
}
MethodCallback::Subscription(callback) => {
let RpcServiceCfg::CallsAndSubscriptions {
bounded_subscriptions,
sink,
id_provider,
} = &self.cfg
else {
tracing::warn!(id = ?id, method = %name, "Attempted subscription on a service not configured for subscriptions.");
let rp =
MethodResponse::error(id, ErrorObject::from(ErrorCode::InternalError));
return ResponseFuture::ready(rp);
};
if let Some(p) = bounded_subscriptions.acquire() {
let conn_state = SubscriptionState {
conn_id,
id_provider: &**id_provider,
subscription_permit: p,
};
let fut =
callback(id.clone(), params, sink.clone(), conn_state, extensions);
ResponseFuture::future(fut)
} else {
let max = bounded_subscriptions.max();
let rp = MethodResponse::error(id, reject_too_many_subscriptions(max));
ResponseFuture::ready(rp)
}
}
MethodCallback::Unsubscription(callback) => {
// Don't adhere to any resource or subscription limits; always let unsubscribing
// happen!
let RpcServiceCfg::CallsAndSubscriptions { .. } = self.cfg else {
tracing::warn!(id = ?id, method = %name, "Attempted unsubscription on a service not configured for subscriptions.");
let rp =
MethodResponse::error(id, ErrorObject::from(ErrorCode::InternalError));
return ResponseFuture::ready(rp);
};
let rp = callback(id, params, conn_id, max_response_body_size, extensions);
ResponseFuture::ready(rp)
}
},
}
}
fn batch<'a>(&self, req: Batch<'a>) -> impl Future<Output = Self::BatchResponse> + Send + 'a {
let entries: Vec<_> = req.into_iter().collect();
let mut got_notif = false;
let mut batch_response = BatchResponseBuilder::new_with_limit(self.max_response_body_size);
let mut pending_calls: FuturesOrdered<_> = entries
.into_iter()
.filter_map(|v| match v {
Ok(BatchEntry::Call(call)) => Some(Either::Right(self.call(call))),
Ok(BatchEntry::Notification(_n)) => {
got_notif = true;
None
}
Err(_err) => Some(Either::Left(async {
MethodResponse::error(Id::Null, ErrorObject::from(ErrorCode::InvalidRequest))
})),
})
.collect();
async move {
while let Some(response) = pending_calls.next().await {
if let Err(too_large) = batch_response.append(response) {
let mut error_batch = BatchResponseBuilder::new_with_limit(1);
let _ = error_batch.append(too_large);
return error_batch.finish();
}
}
batch_response.finish()
}
}
#[allow(clippy::manual_async_fn)]
fn notification<'a>(
&self,
_n: jsonrpsee::core::middleware::Notification<'a>,
) -> impl Future<Output = Self::NotificationResponse> + Send + 'a {
async move { None }
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/ipc/src/server/ipc.rs | crates/rpc/ipc/src/server/ipc.rs | //! IPC request handling adapted from [`jsonrpsee`] http request handling
use futures::{stream::FuturesOrdered, StreamExt};
use jsonrpsee::{
batch_response_error,
core::{server::helpers::prepare_error, JsonRawValue},
server::middleware::rpc::RpcServiceT,
types::{
error::{reject_too_big_request, ErrorCode},
ErrorObject, Id, InvalidRequest, Notification, Request,
},
BatchResponseBuilder, MethodResponse,
};
use std::sync::Arc;
use tokio::sync::OwnedSemaphorePermit;
use tokio_util::either::Either;
use tracing::instrument;
type Notif<'a> = Notification<'a, Option<&'a JsonRawValue>>;
#[derive(Debug, Clone)]
pub(crate) struct Batch<S> {
data: Vec<u8>,
rpc_service: S,
}
// Batch responses must be sent back as a single message so we read the results from each
// request in the batch and read the results off of a new channel, `rx_batch`, and then send the
// complete batch response back to the client over `tx`.
#[instrument(name = "batch", skip(b), level = "TRACE")]
pub(crate) async fn process_batch_request<S>(
b: Batch<S>,
max_response_body_size: usize,
) -> Option<String>
where
S: RpcServiceT<MethodResponse = MethodResponse> + Send,
{
let Batch { data, rpc_service } = b;
if let Ok(batch) = serde_json::from_slice::<Vec<&JsonRawValue>>(&data) {
let mut got_notif = false;
let mut batch_response = BatchResponseBuilder::new_with_limit(max_response_body_size);
let mut pending_calls: FuturesOrdered<_> = batch
.into_iter()
.filter_map(|v| {
if let Ok(req) = serde_json::from_str::<Request<'_>>(v.get()) {
Some(Either::Right(rpc_service.call(req)))
} else if let Ok(_notif) = serde_json::from_str::<Notif<'_>>(v.get()) {
// notifications should not be answered.
got_notif = true;
None
} else {
// valid JSON but could be not parsable as `InvalidRequest`
let id = match serde_json::from_str::<InvalidRequest<'_>>(v.get()) {
Ok(err) => err.id,
Err(_) => Id::Null,
};
Some(Either::Left(async {
MethodResponse::error(id, ErrorObject::from(ErrorCode::InvalidRequest))
}))
}
})
.collect();
while let Some(response) = pending_calls.next().await {
if let Err(too_large) = batch_response.append(response) {
return Some(too_large.to_json().to_string())
}
}
if got_notif && batch_response.is_empty() {
None
} else {
let batch_resp = batch_response.finish();
Some(MethodResponse::from_batch(batch_resp).to_json().to_string())
}
} else {
Some(batch_response_error(Id::Null, ErrorObject::from(ErrorCode::ParseError)).to_string())
}
}
pub(crate) async fn process_single_request<S>(
data: Vec<u8>,
rpc_service: &S,
) -> Option<MethodResponse>
where
S: RpcServiceT<MethodResponse = MethodResponse> + Send,
{
if let Ok(req) = serde_json::from_slice::<Request<'_>>(&data) {
Some(execute_call_with_tracing(req, rpc_service).await)
} else if serde_json::from_slice::<Notif<'_>>(&data).is_ok() {
None
} else {
let (id, code) = prepare_error(&data);
Some(MethodResponse::error(id, ErrorObject::from(code)))
}
}
#[instrument(name = "method_call", fields(method = req.method.as_ref()), skip(req, rpc_service), level = "TRACE")]
pub(crate) async fn execute_call_with_tracing<'a, S>(
req: Request<'a>,
rpc_service: &S,
) -> MethodResponse
where
S: RpcServiceT<MethodResponse = MethodResponse> + Send,
{
rpc_service.call(req).await
}
pub(crate) async fn call_with_service<S>(
request: String,
rpc_service: S,
max_response_body_size: usize,
max_request_body_size: usize,
conn: Arc<OwnedSemaphorePermit>,
) -> Option<String>
where
S: RpcServiceT<MethodResponse = MethodResponse> + Send,
{
enum Kind {
Single,
Batch,
}
let request_kind = request
.chars()
.find_map(|c| match c {
'{' => Some(Kind::Single),
'[' => Some(Kind::Batch),
_ => None,
})
.unwrap_or(Kind::Single);
let data = request.into_bytes();
if data.len() > max_request_body_size {
return Some(
batch_response_error(Id::Null, reject_too_big_request(max_request_body_size as u32))
.to_string(),
)
}
// Single request or notification
let res = if matches!(request_kind, Kind::Single) {
let response = process_single_request(data, &rpc_service).await;
match response {
Some(response) if response.is_method_call() => Some(response.to_json().to_string()),
_ => {
// subscription responses are sent directly over the sink, return a response here
// would lead to duplicate responses for the subscription response
None
}
}
} else {
process_batch_request(Batch { data, rpc_service }, max_response_body_size).await
};
drop(conn);
res
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/ipc/src/client/mod.rs | crates/rpc/ipc/src/client/mod.rs | //! [`jsonrpsee`] transport adapter implementation for IPC.
use crate::stream_codec::StreamCodec;
use futures::{StreamExt, TryFutureExt};
use interprocess::local_socket::{
tokio::{prelude::*, RecvHalf, SendHalf},
GenericFilePath,
};
use jsonrpsee::{
async_client::{Client, ClientBuilder},
core::client::{ReceivedMessage, TransportReceiverT, TransportSenderT},
};
use std::{io, time::Duration};
use tokio::io::AsyncWriteExt;
use tokio_util::codec::FramedRead;
/// Sending end of IPC transport.
#[derive(Debug)]
pub(crate) struct Sender {
inner: SendHalf,
}
impl TransportSenderT for Sender {
type Error = IpcError;
/// Sends out a request. Returns a Future that finishes when the request has been successfully
/// sent.
async fn send(&mut self, msg: String) -> Result<(), Self::Error> {
Ok(self.inner.write_all(msg.as_bytes()).await?)
}
async fn send_ping(&mut self) -> Result<(), Self::Error> {
tracing::trace!("send ping - not implemented");
Err(IpcError::NotSupported)
}
/// Close the connection.
async fn close(&mut self) -> Result<(), Self::Error> {
Ok(())
}
}
/// Receiving end of IPC transport.
#[derive(Debug)]
pub(crate) struct Receiver {
pub(crate) inner: FramedRead<RecvHalf, StreamCodec>,
}
impl TransportReceiverT for Receiver {
type Error = IpcError;
/// Returns a Future resolving when the server sent us something back.
async fn receive(&mut self) -> Result<ReceivedMessage, Self::Error> {
self.inner.next().await.map_or(Err(IpcError::Closed), |val| Ok(ReceivedMessage::Text(val?)))
}
}
/// Builder for IPC transport [`Sender`] and [`Receiver`] pair.
#[derive(Debug, Clone, Default)]
#[non_exhaustive]
pub(crate) struct IpcTransportClientBuilder;
impl IpcTransportClientBuilder {
pub(crate) async fn build(self, path: &str) -> Result<(Sender, Receiver), IpcError> {
let conn = async { path.to_fs_name::<GenericFilePath>() }
.and_then(LocalSocketStream::connect)
.await
.map_err(|err| IpcError::FailedToConnect { path: path.to_string(), err })?;
let (recv, send) = conn.split();
Ok((
Sender { inner: send },
Receiver { inner: FramedRead::new(recv, StreamCodec::stream_incoming()) },
))
}
}
/// Builder type for [`Client`]
#[derive(Clone, Debug)]
#[non_exhaustive]
pub struct IpcClientBuilder {
request_timeout: Duration,
}
impl Default for IpcClientBuilder {
fn default() -> Self {
Self { request_timeout: Duration::from_secs(60) }
}
}
impl IpcClientBuilder {
/// Connects to an IPC socket
///
/// ```
/// use jsonrpsee::{core::client::ClientT, rpc_params};
/// use reth_ipc::client::IpcClientBuilder;
///
/// # async fn run_client() -> Result<(), Box<dyn core::error::Error + Send + Sync>> {
/// let client = IpcClientBuilder::default().build("/tmp/my-uds").await?;
/// let response: String = client.request("say_hello", rpc_params![]).await?;
/// # Ok(()) }
/// ```
pub async fn build(self, name: &str) -> Result<Client, IpcError> {
let (tx, rx) = IpcTransportClientBuilder::default().build(name).await?;
Ok(self.build_with_tokio(tx, rx))
}
/// Uses the sender and receiver channels to connect to the socket.
pub fn build_with_tokio<S, R>(self, sender: S, receiver: R) -> Client
where
S: TransportSenderT + Send,
R: TransportReceiverT + Send,
{
ClientBuilder::default()
.request_timeout(self.request_timeout)
.build_with_tokio(sender, receiver)
}
/// Set request timeout (default is 60 seconds).
pub const fn request_timeout(mut self, timeout: Duration) -> Self {
self.request_timeout = timeout;
self
}
}
/// Error variants that can happen in IPC transport.
#[derive(Debug, thiserror::Error)]
pub enum IpcError {
/// Operation not supported
#[error("operation not supported")]
NotSupported,
/// Stream was closed
#[error("stream closed")]
Closed,
/// Thrown when failed to establish a socket connection.
#[error("failed to connect to socket {path}: {err}")]
FailedToConnect {
/// The path of the socket.
#[doc(hidden)]
path: String,
/// The error occurred while connecting.
#[doc(hidden)]
err: io::Error,
},
/// Wrapped IO Error
#[error(transparent)]
Io(#[from] io::Error),
}
#[cfg(test)]
mod tests {
use super::*;
use crate::server::dummy_name;
use interprocess::local_socket::ListenerOptions;
#[tokio::test]
async fn test_connect() {
let name = &dummy_name();
let binding = ListenerOptions::new()
.name(name.as_str().to_fs_name::<GenericFilePath>().unwrap())
.create_tokio()
.unwrap();
tokio::spawn(async move {
let _x = binding.accept().await;
});
let (tx, rx) = IpcTransportClientBuilder::default().build(name).await.unwrap();
let _ = IpcClientBuilder::default().build_with_tokio(tx, rx);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc/src/miner.rs | crates/rpc/rpc/src/miner.rs | use alloy_primitives::{Bytes, U128};
use async_trait::async_trait;
use jsonrpsee::core::RpcResult;
use reth_rpc_api::MinerApiServer;
/// `miner` API implementation.
///
/// This type provides the functionality for handling `miner` related requests.
#[derive(Clone, Debug, Default)]
pub struct MinerApi {}
#[async_trait]
impl MinerApiServer for MinerApi {
fn set_extra(&self, _record: Bytes) -> RpcResult<bool> {
Ok(false)
}
fn set_gas_price(&self, _gas_price: U128) -> RpcResult<bool> {
Ok(false)
}
fn set_gas_limit(&self, _gas_price: U128) -> RpcResult<bool> {
Ok(false)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc/src/engine.rs | crates/rpc/rpc/src/engine.rs | use alloy_eips::{BlockId, BlockNumberOrTag};
use alloy_primitives::{Address, Bytes, B256, U256, U64};
use alloy_rpc_types_eth::{
state::StateOverride, BlockOverrides, EIP1186AccountProofResponse, Filter, Log, SyncStatus,
};
use alloy_serde::JsonStorageKey;
use jsonrpsee::core::RpcResult as Result;
use reth_rpc_api::{EngineEthApiServer, EthApiServer};
use reth_rpc_convert::RpcTxReq;
use reth_rpc_eth_api::{
EngineEthFilter, FullEthApiTypes, QueryLimits, RpcBlock, RpcHeader, RpcReceipt, RpcTransaction,
};
use tracing_futures::Instrument;
pub use reth_rpc_engine_api::EngineApi;
macro_rules! engine_span {
() => {
tracing::trace_span!(target: "rpc", "engine")
};
}
/// A wrapper type for the `EthApi` and `EthFilter` implementations that only expose the required
/// subset for the `eth_` namespace used in auth server alongside the `engine_` namespace.
#[derive(Debug, Clone)]
pub struct EngineEthApi<Eth, EthFilter> {
eth: Eth,
eth_filter: EthFilter,
}
impl<Eth, EthFilter> EngineEthApi<Eth, EthFilter> {
/// Create a new `EngineEthApi` instance.
pub const fn new(eth: Eth, eth_filter: EthFilter) -> Self {
Self { eth, eth_filter }
}
}
#[async_trait::async_trait]
impl<Eth, EthFilter>
EngineEthApiServer<
RpcTxReq<Eth::NetworkTypes>,
RpcBlock<Eth::NetworkTypes>,
RpcReceipt<Eth::NetworkTypes>,
> for EngineEthApi<Eth, EthFilter>
where
Eth: EthApiServer<
RpcTxReq<Eth::NetworkTypes>,
RpcTransaction<Eth::NetworkTypes>,
RpcBlock<Eth::NetworkTypes>,
RpcReceipt<Eth::NetworkTypes>,
RpcHeader<Eth::NetworkTypes>,
> + FullEthApiTypes,
EthFilter: EngineEthFilter,
{
/// Handler for: `eth_syncing`
fn syncing(&self) -> Result<SyncStatus> {
let span = engine_span!();
let _enter = span.enter();
self.eth.syncing()
}
/// Handler for: `eth_chainId`
async fn chain_id(&self) -> Result<Option<U64>> {
let span = engine_span!();
let _enter = span.enter();
self.eth.chain_id().await
}
/// Handler for: `eth_blockNumber`
fn block_number(&self) -> Result<U256> {
let span = engine_span!();
let _enter = span.enter();
self.eth.block_number()
}
/// Handler for: `eth_call`
async fn call(
&self,
request: RpcTxReq<Eth::NetworkTypes>,
block_id: Option<BlockId>,
state_overrides: Option<StateOverride>,
block_overrides: Option<Box<BlockOverrides>>,
) -> Result<Bytes> {
self.eth
.call(request, block_id, state_overrides, block_overrides)
.instrument(engine_span!())
.await
}
/// Handler for: `eth_getCode`
async fn get_code(&self, address: Address, block_id: Option<BlockId>) -> Result<Bytes> {
self.eth.get_code(address, block_id).instrument(engine_span!()).await
}
/// Handler for: `eth_getBlockByHash`
async fn block_by_hash(
&self,
hash: B256,
full: bool,
) -> Result<Option<RpcBlock<Eth::NetworkTypes>>> {
self.eth.block_by_hash(hash, full).instrument(engine_span!()).await
}
/// Handler for: `eth_getBlockByNumber`
async fn block_by_number(
&self,
number: BlockNumberOrTag,
full: bool,
) -> Result<Option<RpcBlock<Eth::NetworkTypes>>> {
self.eth.block_by_number(number, full).instrument(engine_span!()).await
}
async fn block_receipts(
&self,
block_id: BlockId,
) -> Result<Option<Vec<RpcReceipt<Eth::NetworkTypes>>>> {
self.eth.block_receipts(block_id).instrument(engine_span!()).await
}
/// Handler for: `eth_sendRawTransaction`
async fn send_raw_transaction(&self, tx: Bytes) -> Result<B256> {
self.eth.send_raw_transaction(tx).instrument(engine_span!()).await
}
async fn transaction_receipt(
&self,
hash: B256,
) -> Result<Option<RpcReceipt<Eth::NetworkTypes>>> {
self.eth.transaction_receipt(hash).instrument(engine_span!()).await
}
/// Handler for `eth_getLogs`
async fn logs(&self, filter: Filter) -> Result<Vec<Log>> {
self.eth_filter.logs(filter, QueryLimits::no_limits()).instrument(engine_span!()).await
}
/// Handler for `eth_getProof`
async fn get_proof(
&self,
address: Address,
keys: Vec<JsonStorageKey>,
block_number: Option<BlockId>,
) -> Result<EIP1186AccountProofResponse> {
self.eth.get_proof(address, keys, block_number).instrument(engine_span!()).await
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc/src/lib.rs | crates/rpc/rpc/src/lib.rs | //! Reth RPC implementation
//!
//! Provides the implementation of all RPC interfaces.
//!
//!
//! ## Note on blocking behaviour
//!
//! All async RPC handlers must non-blocking, see also [What is blocking](https://ryhl.io/blog/async-what-is-blocking/).
//!
//! A lot of the RPC are using a mix of async and direct calls to the database, which are blocking
//! and can reduce overall performance of all concurrent requests handled via the jsonrpsee server.
//!
//! To avoid this, all blocking or CPU intensive handlers must be spawned to a separate task. See
//! the [`EthApi`] handler implementations for examples. The rpc-api traits make no use of the
//! available jsonrpsee `blocking` attribute to give implementers more freedom because the
//! `blocking` attribute and async handlers are mutually exclusive. However, as mentioned above, a
//! lot of handlers make use of async functions, caching for example, but are also using blocking
//! disk-io, hence these calls are spawned as futures to a blocking task manually.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
use http as _;
use http_body as _;
use hyper as _;
use jsonwebtoken as _;
use pin_project as _;
use tower as _;
mod admin;
mod debug;
mod engine;
pub mod eth;
mod miner;
mod net;
mod otterscan;
mod reth;
mod rpc;
mod trace;
mod txpool;
mod validation;
mod web3;
pub use admin::AdminApi;
pub use debug::DebugApi;
pub use engine::{EngineApi, EngineEthApi};
pub use eth::{helpers::SyncListener, EthApi, EthApiBuilder, EthBundle, EthFilter, EthPubSub};
pub use miner::MinerApi;
pub use net::NetApi;
pub use otterscan::OtterscanApi;
pub use reth::RethApi;
pub use reth_rpc_convert::RpcTypes;
pub use rpc::RPCApi;
pub use trace::TraceApi;
pub use txpool::TxPoolApi;
pub use validation::{ValidationApi, ValidationApiConfig};
pub use web3::Web3Api;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc/src/validation.rs | crates/rpc/rpc/src/validation.rs | use alloy_consensus::{
BlobTransactionValidationError, BlockHeader, EnvKzgSettings, Transaction, TxReceipt,
};
use alloy_eips::{eip4844::kzg_to_versioned_hash, eip7685::RequestsOrHash};
use alloy_rpc_types_beacon::relay::{
BidTrace, BuilderBlockValidationRequest, BuilderBlockValidationRequestV2,
BuilderBlockValidationRequestV3, BuilderBlockValidationRequestV4,
BuilderBlockValidationRequestV5,
};
use alloy_rpc_types_engine::{
BlobsBundleV1, BlobsBundleV2, CancunPayloadFields, ExecutionData, ExecutionPayload,
ExecutionPayloadSidecar, PraguePayloadFields,
};
use async_trait::async_trait;
use core::fmt;
use jsonrpsee::core::RpcResult;
use jsonrpsee_types::error::ErrorObject;
use reth_chainspec::{ChainSpecProvider, EthereumHardforks};
use reth_consensus::{Consensus, FullConsensus};
use reth_consensus_common::validation::MAX_RLP_BLOCK_SIZE;
use reth_engine_primitives::PayloadValidator;
use reth_errors::{BlockExecutionError, ConsensusError, ProviderError};
use reth_evm::{execute::Executor, ConfigureEvm};
use reth_execution_types::BlockExecutionOutput;
use reth_metrics::{
metrics,
metrics::{gauge, Gauge},
Metrics,
};
use reth_node_api::{NewPayloadError, PayloadTypes};
use reth_primitives_traits::{
constants::GAS_LIMIT_BOUND_DIVISOR, BlockBody, GotExpected, NodePrimitives, RecoveredBlock,
SealedBlock, SealedHeaderFor,
};
use reth_revm::{cached::CachedReads, database::StateProviderDatabase};
use reth_rpc_api::BlockSubmissionValidationApiServer;
use reth_rpc_server_types::result::{internal_rpc_err, invalid_params_rpc_err};
use reth_storage_api::{BlockReaderIdExt, StateProviderFactory};
use reth_tasks::TaskSpawner;
use revm_primitives::{Address, B256, U256};
use serde::{Deserialize, Serialize};
use sha2::{Digest, Sha256};
use std::{collections::HashSet, sync::Arc};
use tokio::sync::{oneshot, RwLock};
use tracing::warn;
/// The type that implements the `validation` rpc namespace trait
#[derive(Clone, Debug, derive_more::Deref)]
pub struct ValidationApi<Provider, E: ConfigureEvm, T: PayloadTypes> {
#[deref]
inner: Arc<ValidationApiInner<Provider, E, T>>,
}
impl<Provider, E, T> ValidationApi<Provider, E, T>
where
E: ConfigureEvm,
T: PayloadTypes,
{
/// Create a new instance of the [`ValidationApi`]
pub fn new(
provider: Provider,
consensus: Arc<dyn FullConsensus<E::Primitives, Error = ConsensusError>>,
evm_config: E,
config: ValidationApiConfig,
task_spawner: Box<dyn TaskSpawner>,
payload_validator: Arc<
dyn PayloadValidator<T, Block = <E::Primitives as NodePrimitives>::Block>,
>,
) -> Self {
let ValidationApiConfig { disallow, validation_window } = config;
let inner = Arc::new(ValidationApiInner {
provider,
consensus,
payload_validator,
evm_config,
disallow,
validation_window,
cached_state: Default::default(),
task_spawner,
metrics: Default::default(),
});
inner.metrics.disallow_size.set(inner.disallow.len() as f64);
let disallow_hash = hash_disallow_list(&inner.disallow);
let hash_gauge = gauge!("builder_validation_disallow_hash", "hash" => disallow_hash);
hash_gauge.set(1.0);
Self { inner }
}
/// Returns the cached reads for the given head hash.
async fn cached_reads(&self, head: B256) -> CachedReads {
let cache = self.inner.cached_state.read().await;
if cache.0 == head {
cache.1.clone()
} else {
Default::default()
}
}
/// Updates the cached state for the given head hash.
async fn update_cached_reads(&self, head: B256, cached_state: CachedReads) {
let mut cache = self.inner.cached_state.write().await;
if cache.0 == head {
cache.1.extend(cached_state);
} else {
*cache = (head, cached_state)
}
}
}
impl<Provider, E, T> ValidationApi<Provider, E, T>
where
Provider: BlockReaderIdExt<Header = <E::Primitives as NodePrimitives>::BlockHeader>
+ ChainSpecProvider<ChainSpec: EthereumHardforks>
+ StateProviderFactory
+ 'static,
E: ConfigureEvm + 'static,
T: PayloadTypes<ExecutionData = ExecutionData>,
{
/// Validates the given block and a [`BidTrace`] against it.
pub async fn validate_message_against_block(
&self,
block: RecoveredBlock<<E::Primitives as NodePrimitives>::Block>,
message: BidTrace,
registered_gas_limit: u64,
) -> Result<(), ValidationApiError> {
self.validate_message_against_header(block.sealed_header(), &message)?;
self.consensus.validate_header(block.sealed_header())?;
self.consensus.validate_block_pre_execution(block.sealed_block())?;
if !self.disallow.is_empty() {
if self.disallow.contains(&block.beneficiary()) {
return Err(ValidationApiError::Blacklist(block.beneficiary()))
}
if self.disallow.contains(&message.proposer_fee_recipient) {
return Err(ValidationApiError::Blacklist(message.proposer_fee_recipient))
}
for (sender, tx) in block.senders_iter().zip(block.body().transactions()) {
if self.disallow.contains(sender) {
return Err(ValidationApiError::Blacklist(*sender))
}
if let Some(to) = tx.to() {
if self.disallow.contains(&to) {
return Err(ValidationApiError::Blacklist(to))
}
}
}
}
let latest_header =
self.provider.latest_header()?.ok_or_else(|| ValidationApiError::MissingLatestBlock)?;
let parent_header = if block.parent_hash() == latest_header.hash() {
latest_header
} else {
// parent is not the latest header so we need to fetch it and ensure it's not too old
let parent_header = self
.provider
.sealed_header_by_hash(block.parent_hash())?
.ok_or_else(|| ValidationApiError::MissingParentBlock)?;
if latest_header.number().saturating_sub(parent_header.number()) >
self.validation_window
{
return Err(ValidationApiError::BlockTooOld)
}
parent_header
};
self.consensus.validate_header_against_parent(block.sealed_header(), &parent_header)?;
self.validate_gas_limit(registered_gas_limit, &parent_header, block.sealed_header())?;
let parent_header_hash = parent_header.hash();
let state_provider = self.provider.state_by_block_hash(parent_header_hash)?;
let mut request_cache = self.cached_reads(parent_header_hash).await;
let cached_db = request_cache.as_db_mut(StateProviderDatabase::new(&state_provider));
let executor = self.evm_config.batch_executor(cached_db);
let mut accessed_blacklisted = None;
let output = executor.execute_with_state_closure(&block, |state| {
if !self.disallow.is_empty() {
// Check whether the submission interacted with any blacklisted account by scanning
// the `State`'s cache that records everything read from database during execution.
for account in state.cache.accounts.keys() {
if self.disallow.contains(account) {
accessed_blacklisted = Some(*account);
}
}
}
})?;
if let Some(account) = accessed_blacklisted {
return Err(ValidationApiError::Blacklist(account))
}
// update the cached reads
self.update_cached_reads(parent_header_hash, request_cache).await;
self.consensus.validate_block_post_execution(&block, &output)?;
self.ensure_payment(&block, &output, &message)?;
let state_root =
state_provider.state_root(state_provider.hashed_post_state(&output.state))?;
if state_root != block.header().state_root() {
return Err(ConsensusError::BodyStateRootDiff(
GotExpected { got: state_root, expected: block.header().state_root() }.into(),
)
.into())
}
Ok(())
}
/// Ensures that fields of [`BidTrace`] match the fields of the [`SealedHeaderFor`].
fn validate_message_against_header(
&self,
header: &SealedHeaderFor<E::Primitives>,
message: &BidTrace,
) -> Result<(), ValidationApiError> {
if header.hash() != message.block_hash {
Err(ValidationApiError::BlockHashMismatch(GotExpected {
got: message.block_hash,
expected: header.hash(),
}))
} else if header.parent_hash() != message.parent_hash {
Err(ValidationApiError::ParentHashMismatch(GotExpected {
got: message.parent_hash,
expected: header.parent_hash(),
}))
} else if header.gas_limit() != message.gas_limit {
Err(ValidationApiError::GasLimitMismatch(GotExpected {
got: message.gas_limit,
expected: header.gas_limit(),
}))
} else if header.gas_used() != message.gas_used {
Err(ValidationApiError::GasUsedMismatch(GotExpected {
got: message.gas_used,
expected: header.gas_used(),
}))
} else {
Ok(())
}
}
/// Ensures that the chosen gas limit is the closest possible value for the validator's
/// registered gas limit.
///
/// Ref: <https://github.com/flashbots/builder/blob/a742641e24df68bc2fc476199b012b0abce40ffe/core/blockchain.go#L2474-L2477>
fn validate_gas_limit(
&self,
registered_gas_limit: u64,
parent_header: &SealedHeaderFor<E::Primitives>,
header: &SealedHeaderFor<E::Primitives>,
) -> Result<(), ValidationApiError> {
let max_gas_limit =
parent_header.gas_limit() + parent_header.gas_limit() / GAS_LIMIT_BOUND_DIVISOR - 1;
let min_gas_limit =
parent_header.gas_limit() - parent_header.gas_limit() / GAS_LIMIT_BOUND_DIVISOR + 1;
let best_gas_limit =
std::cmp::max(min_gas_limit, std::cmp::min(max_gas_limit, registered_gas_limit));
if best_gas_limit != header.gas_limit() {
return Err(ValidationApiError::GasLimitMismatch(GotExpected {
got: header.gas_limit(),
expected: best_gas_limit,
}))
}
Ok(())
}
/// Ensures that the proposer has received [`BidTrace::value`] for this block.
///
/// Firstly attempts to verify the payment by checking the state changes, otherwise falls back
/// to checking the latest block transaction.
fn ensure_payment(
&self,
block: &SealedBlock<<E::Primitives as NodePrimitives>::Block>,
output: &BlockExecutionOutput<<E::Primitives as NodePrimitives>::Receipt>,
message: &BidTrace,
) -> Result<(), ValidationApiError> {
let (mut balance_before, balance_after) = if let Some(acc) =
output.state.state.get(&message.proposer_fee_recipient)
{
let balance_before = acc.original_info.as_ref().map(|i| i.balance).unwrap_or_default();
let balance_after = acc.info.as_ref().map(|i| i.balance).unwrap_or_default();
(balance_before, balance_after)
} else {
// account might have balance but considering it zero is fine as long as we know
// that balance have not changed
(U256::ZERO, U256::ZERO)
};
if let Some(withdrawals) = block.body().withdrawals() {
for withdrawal in withdrawals {
if withdrawal.address == message.proposer_fee_recipient {
balance_before += withdrawal.amount_wei();
}
}
}
if balance_after >= balance_before.saturating_add(message.value) {
return Ok(())
}
let (receipt, tx) = output
.receipts
.last()
.zip(block.body().transactions().last())
.ok_or(ValidationApiError::ProposerPayment)?;
if !receipt.status() {
return Err(ValidationApiError::ProposerPayment)
}
if tx.to() != Some(message.proposer_fee_recipient) {
return Err(ValidationApiError::ProposerPayment)
}
if tx.value() != message.value {
return Err(ValidationApiError::ProposerPayment)
}
if !tx.input().is_empty() {
return Err(ValidationApiError::ProposerPayment)
}
if let Some(block_base_fee) = block.header().base_fee_per_gas() {
if tx.effective_tip_per_gas(block_base_fee).unwrap_or_default() != 0 {
return Err(ValidationApiError::ProposerPayment)
}
}
Ok(())
}
/// Validates the given [`BlobsBundleV1`] and returns versioned hashes for blobs.
pub fn validate_blobs_bundle(
&self,
mut blobs_bundle: BlobsBundleV1,
) -> Result<Vec<B256>, ValidationApiError> {
if blobs_bundle.commitments.len() != blobs_bundle.proofs.len() ||
blobs_bundle.commitments.len() != blobs_bundle.blobs.len()
{
return Err(ValidationApiError::InvalidBlobsBundle)
}
let versioned_hashes = blobs_bundle
.commitments
.iter()
.map(|c| kzg_to_versioned_hash(c.as_slice()))
.collect::<Vec<_>>();
let sidecar = blobs_bundle.pop_sidecar(blobs_bundle.blobs.len());
sidecar.validate(&versioned_hashes, EnvKzgSettings::default().get())?;
Ok(versioned_hashes)
}
/// Validates the given [`BlobsBundleV1`] and returns versioned hashes for blobs.
pub fn validate_blobs_bundle_v2(
&self,
blobs_bundle: BlobsBundleV2,
) -> Result<Vec<B256>, ValidationApiError> {
let versioned_hashes = blobs_bundle
.commitments
.iter()
.map(|c| kzg_to_versioned_hash(c.as_slice()))
.collect::<Vec<_>>();
blobs_bundle
.try_into_sidecar()
.map_err(|_| ValidationApiError::InvalidBlobsBundle)?
.validate(&versioned_hashes, EnvKzgSettings::default().get())?;
Ok(versioned_hashes)
}
/// Core logic for validating the builder submission v3
async fn validate_builder_submission_v3(
&self,
request: BuilderBlockValidationRequestV3,
) -> Result<(), ValidationApiError> {
let block = self.payload_validator.ensure_well_formed_payload(ExecutionData {
payload: ExecutionPayload::V3(request.request.execution_payload),
sidecar: ExecutionPayloadSidecar::v3(CancunPayloadFields {
parent_beacon_block_root: request.parent_beacon_block_root,
versioned_hashes: self.validate_blobs_bundle(request.request.blobs_bundle)?,
}),
})?;
self.validate_message_against_block(
block,
request.request.message,
request.registered_gas_limit,
)
.await
}
/// Core logic for validating the builder submission v4
async fn validate_builder_submission_v4(
&self,
request: BuilderBlockValidationRequestV4,
) -> Result<(), ValidationApiError> {
let block = self.payload_validator.ensure_well_formed_payload(ExecutionData {
payload: ExecutionPayload::V3(request.request.execution_payload),
sidecar: ExecutionPayloadSidecar::v4(
CancunPayloadFields {
parent_beacon_block_root: request.parent_beacon_block_root,
versioned_hashes: self.validate_blobs_bundle(request.request.blobs_bundle)?,
},
PraguePayloadFields {
requests: RequestsOrHash::Requests(
request.request.execution_requests.to_requests(),
),
},
),
})?;
self.validate_message_against_block(
block,
request.request.message,
request.registered_gas_limit,
)
.await
}
/// Core logic for validating the builder submission v5
async fn validate_builder_submission_v5(
&self,
request: BuilderBlockValidationRequestV5,
) -> Result<(), ValidationApiError> {
let block = self.payload_validator.ensure_well_formed_payload(ExecutionData {
payload: ExecutionPayload::V3(request.request.execution_payload),
sidecar: ExecutionPayloadSidecar::v4(
CancunPayloadFields {
parent_beacon_block_root: request.parent_beacon_block_root,
versioned_hashes: self
.validate_blobs_bundle_v2(request.request.blobs_bundle)?,
},
PraguePayloadFields {
requests: RequestsOrHash::Requests(
request.request.execution_requests.to_requests(),
),
},
),
})?;
// Check block size as per EIP-7934 (only applies when Osaka hardfork is active)
let chain_spec = self.provider.chain_spec();
if chain_spec.is_osaka_active_at_timestamp(block.timestamp()) &&
block.rlp_length() > MAX_RLP_BLOCK_SIZE
{
return Err(ValidationApiError::Consensus(ConsensusError::BlockTooLarge {
rlp_length: block.rlp_length(),
max_rlp_length: MAX_RLP_BLOCK_SIZE,
}));
}
self.validate_message_against_block(
block,
request.request.message,
request.registered_gas_limit,
)
.await
}
}
#[async_trait]
impl<Provider, E, T> BlockSubmissionValidationApiServer for ValidationApi<Provider, E, T>
where
Provider: BlockReaderIdExt<Header = <E::Primitives as NodePrimitives>::BlockHeader>
+ ChainSpecProvider<ChainSpec: EthereumHardforks>
+ StateProviderFactory
+ Clone
+ 'static,
E: ConfigureEvm + 'static,
T: PayloadTypes<ExecutionData = ExecutionData>,
{
async fn validate_builder_submission_v1(
&self,
_request: BuilderBlockValidationRequest,
) -> RpcResult<()> {
warn!(target: "rpc::flashbots", "Method `flashbots_validateBuilderSubmissionV1` is not supported");
Err(internal_rpc_err("unimplemented"))
}
async fn validate_builder_submission_v2(
&self,
_request: BuilderBlockValidationRequestV2,
) -> RpcResult<()> {
warn!(target: "rpc::flashbots", "Method `flashbots_validateBuilderSubmissionV2` is not supported");
Err(internal_rpc_err("unimplemented"))
}
/// Validates a block submitted to the relay
async fn validate_builder_submission_v3(
&self,
request: BuilderBlockValidationRequestV3,
) -> RpcResult<()> {
let this = self.clone();
let (tx, rx) = oneshot::channel();
self.task_spawner.spawn_blocking(Box::pin(async move {
let result = Self::validate_builder_submission_v3(&this, request)
.await
.map_err(ErrorObject::from);
let _ = tx.send(result);
}));
rx.await.map_err(|_| internal_rpc_err("Internal blocking task error"))?
}
/// Validates a block submitted to the relay
async fn validate_builder_submission_v4(
&self,
request: BuilderBlockValidationRequestV4,
) -> RpcResult<()> {
let this = self.clone();
let (tx, rx) = oneshot::channel();
self.task_spawner.spawn_blocking(Box::pin(async move {
let result = Self::validate_builder_submission_v4(&this, request)
.await
.map_err(ErrorObject::from);
let _ = tx.send(result);
}));
rx.await.map_err(|_| internal_rpc_err("Internal blocking task error"))?
}
/// Validates a block submitted to the relay
async fn validate_builder_submission_v5(
&self,
request: BuilderBlockValidationRequestV5,
) -> RpcResult<()> {
let this = self.clone();
let (tx, rx) = oneshot::channel();
self.task_spawner.spawn_blocking(Box::pin(async move {
let result = Self::validate_builder_submission_v5(&this, request)
.await
.map_err(ErrorObject::from);
let _ = tx.send(result);
}));
rx.await.map_err(|_| internal_rpc_err("Internal blocking task error"))?
}
}
pub struct ValidationApiInner<Provider, E: ConfigureEvm, T: PayloadTypes> {
/// The provider that can interact with the chain.
provider: Provider,
/// Consensus implementation.
consensus: Arc<dyn FullConsensus<E::Primitives, Error = ConsensusError>>,
/// Execution payload validator.
payload_validator:
Arc<dyn PayloadValidator<T, Block = <E::Primitives as NodePrimitives>::Block>>,
/// Block executor factory.
evm_config: E,
/// Set of disallowed addresses
disallow: HashSet<Address>,
/// The maximum block distance - parent to latest - allowed for validation
validation_window: u64,
/// Cached state reads to avoid redundant disk I/O across multiple validation attempts
/// targeting the same state. Stores a tuple of (`block_hash`, `cached_reads`) for the
/// latest head block state. Uses async `RwLock` to safely handle concurrent validation
/// requests.
cached_state: RwLock<(B256, CachedReads)>,
/// Task spawner for blocking operations
task_spawner: Box<dyn TaskSpawner>,
/// Validation metrics
metrics: ValidationMetrics,
}
/// Calculates a deterministic hash of the blocklist for change detection.
///
/// This function sorts addresses to ensure deterministic output regardless of
/// insertion order, then computes a SHA256 hash of the concatenated addresses.
fn hash_disallow_list(disallow: &HashSet<Address>) -> String {
let mut sorted: Vec<_> = disallow.iter().collect();
sorted.sort(); // sort for deterministic hashing
let mut hasher = Sha256::new();
for addr in sorted {
hasher.update(addr.as_slice());
}
format!("{:x}", hasher.finalize())
}
impl<Provider, E: ConfigureEvm, T: PayloadTypes> fmt::Debug for ValidationApiInner<Provider, E, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("ValidationApiInner").finish_non_exhaustive()
}
}
/// Configuration for validation API.
#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)]
pub struct ValidationApiConfig {
/// Disallowed addresses.
pub disallow: HashSet<Address>,
/// The maximum block distance - parent to latest - allowed for validation
pub validation_window: u64,
}
impl ValidationApiConfig {
/// Default validation blocks window of 3 blocks
pub const DEFAULT_VALIDATION_WINDOW: u64 = 3;
}
impl Default for ValidationApiConfig {
fn default() -> Self {
Self { disallow: Default::default(), validation_window: Self::DEFAULT_VALIDATION_WINDOW }
}
}
/// Errors thrown by the validation API.
#[derive(Debug, thiserror::Error)]
pub enum ValidationApiError {
#[error("block gas limit mismatch: {_0}")]
GasLimitMismatch(GotExpected<u64>),
#[error("block gas used mismatch: {_0}")]
GasUsedMismatch(GotExpected<u64>),
#[error("block parent hash mismatch: {_0}")]
ParentHashMismatch(GotExpected<B256>),
#[error("block hash mismatch: {_0}")]
BlockHashMismatch(GotExpected<B256>),
#[error("missing latest block in database")]
MissingLatestBlock,
#[error("parent block not found")]
MissingParentBlock,
#[error("block is too old, outside validation window")]
BlockTooOld,
#[error("could not verify proposer payment")]
ProposerPayment,
#[error("invalid blobs bundle")]
InvalidBlobsBundle,
#[error("block accesses blacklisted address: {_0}")]
Blacklist(Address),
#[error(transparent)]
Blob(#[from] BlobTransactionValidationError),
#[error(transparent)]
Consensus(#[from] ConsensusError),
#[error(transparent)]
Provider(#[from] ProviderError),
#[error(transparent)]
Execution(#[from] BlockExecutionError),
#[error(transparent)]
Payload(#[from] NewPayloadError),
}
impl From<ValidationApiError> for ErrorObject<'static> {
fn from(error: ValidationApiError) -> Self {
match error {
ValidationApiError::GasLimitMismatch(_) |
ValidationApiError::GasUsedMismatch(_) |
ValidationApiError::ParentHashMismatch(_) |
ValidationApiError::BlockHashMismatch(_) |
ValidationApiError::Blacklist(_) |
ValidationApiError::ProposerPayment |
ValidationApiError::InvalidBlobsBundle |
ValidationApiError::Blob(_) => invalid_params_rpc_err(error.to_string()),
ValidationApiError::MissingLatestBlock |
ValidationApiError::MissingParentBlock |
ValidationApiError::BlockTooOld |
ValidationApiError::Consensus(_) |
ValidationApiError::Provider(_) => internal_rpc_err(error.to_string()),
ValidationApiError::Execution(err) => match err {
error @ BlockExecutionError::Validation(_) => {
invalid_params_rpc_err(error.to_string())
}
error @ BlockExecutionError::Internal(_) => internal_rpc_err(error.to_string()),
},
ValidationApiError::Payload(err) => match err {
error @ NewPayloadError::Eth(_) => invalid_params_rpc_err(error.to_string()),
error @ NewPayloadError::Other(_) => internal_rpc_err(error.to_string()),
},
}
}
}
/// Metrics for the validation endpoint.
#[derive(Metrics)]
#[metrics(scope = "builder.validation")]
pub(crate) struct ValidationMetrics {
/// The number of entries configured in the builder validation disallow list.
pub(crate) disallow_size: Gauge,
}
#[cfg(test)]
mod tests {
use super::hash_disallow_list;
use revm_primitives::Address;
use std::collections::HashSet;
#[test]
fn test_hash_disallow_list_deterministic() {
let mut addresses = HashSet::new();
addresses.insert(Address::from([1u8; 20]));
addresses.insert(Address::from([2u8; 20]));
let hash1 = hash_disallow_list(&addresses);
let hash2 = hash_disallow_list(&addresses);
assert_eq!(hash1, hash2);
}
#[test]
fn test_hash_disallow_list_different_content() {
let mut addresses1 = HashSet::new();
addresses1.insert(Address::from([1u8; 20]));
let mut addresses2 = HashSet::new();
addresses2.insert(Address::from([2u8; 20]));
let hash1 = hash_disallow_list(&addresses1);
let hash2 = hash_disallow_list(&addresses2);
assert_ne!(hash1, hash2);
}
#[test]
fn test_hash_disallow_list_order_independent() {
let mut addresses1 = HashSet::new();
addresses1.insert(Address::from([1u8; 20]));
addresses1.insert(Address::from([2u8; 20]));
let mut addresses2 = HashSet::new();
addresses2.insert(Address::from([2u8; 20])); // Different insertion order
addresses2.insert(Address::from([1u8; 20]));
let hash1 = hash_disallow_list(&addresses1);
let hash2 = hash_disallow_list(&addresses2);
assert_eq!(hash1, hash2);
}
#[test]
//ensures parity with rbuilder hashing https://github.com/flashbots/rbuilder/blob/962c8444cdd490a216beda22c7eec164db9fc3ac/crates/rbuilder/src/live_builder/block_list_provider.rs#L248
fn test_disallow_list_hash_rbuilder_parity() {
let json = r#"["0x05E0b5B40B7b66098C2161A5EE11C5740A3A7C45","0x01e2919679362dFBC9ee1644Ba9C6da6D6245BB1","0x03893a7c7463AE47D46bc7f091665f1893656003","0x04DBA1194ee10112fE6C3207C0687DEf0e78baCf"]"#;
let blocklist: Vec<Address> = serde_json::from_str(json).unwrap();
let blocklist: HashSet<Address> = blocklist.into_iter().collect();
let expected_hash = "ee14e9d115e182f61871a5a385ab2f32ecf434f3b17bdbacc71044810d89e608";
let hash = hash_disallow_list(&blocklist);
assert_eq!(expected_hash, hash);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc/src/trace.rs | crates/rpc/rpc/src/trace.rs | use alloy_consensus::BlockHeader as _;
use alloy_eips::BlockId;
use alloy_evm::block::calc::{base_block_reward_pre_merge, block_reward, ommer_reward};
use alloy_primitives::{
map::{HashMap, HashSet},
Address, BlockHash, Bytes, B256, U256,
};
use alloy_rpc_types_eth::{
state::{EvmOverrides, StateOverride},
BlockOverrides, Index,
};
use alloy_rpc_types_trace::{
filter::TraceFilter,
opcode::{BlockOpcodeGas, TransactionOpcodeGas},
parity::*,
tracerequest::TraceCallRequest,
};
use async_trait::async_trait;
use jsonrpsee::core::RpcResult;
use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardfork, MAINNET, SEPOLIA};
use reth_evm::ConfigureEvm;
use reth_primitives_traits::{BlockBody, BlockHeader};
use reth_revm::{database::StateProviderDatabase, db::CacheDB};
use reth_rpc_api::TraceApiServer;
use reth_rpc_convert::RpcTxReq;
use reth_rpc_eth_api::{
helpers::{Call, LoadPendingBlock, LoadTransaction, Trace, TraceExt},
FromEthApiError, RpcNodeCore,
};
use reth_rpc_eth_types::{error::EthApiError, utils::recover_raw_transaction, EthConfig};
use reth_storage_api::{BlockNumReader, BlockReader};
use reth_tasks::pool::BlockingTaskGuard;
use reth_transaction_pool::{PoolPooledTx, PoolTransaction, TransactionPool};
use revm::DatabaseCommit;
use revm_inspectors::{
opcode::OpcodeGasInspector,
storage::StorageInspector,
tracing::{parity::populate_state_diff, TracingInspector, TracingInspectorConfig},
};
use serde::{Deserialize, Serialize};
use std::sync::Arc;
use tokio::sync::{AcquireError, OwnedSemaphorePermit};
/// `trace` API implementation.
///
/// This type provides the functionality for handling `trace` related requests.
pub struct TraceApi<Eth> {
inner: Arc<TraceApiInner<Eth>>,
}
// === impl TraceApi ===
impl<Eth> TraceApi<Eth> {
/// Create a new instance of the [`TraceApi`]
pub fn new(
eth_api: Eth,
blocking_task_guard: BlockingTaskGuard,
eth_config: EthConfig,
) -> Self {
let inner = Arc::new(TraceApiInner { eth_api, blocking_task_guard, eth_config });
Self { inner }
}
/// Acquires a permit to execute a tracing call.
async fn acquire_trace_permit(
&self,
) -> std::result::Result<OwnedSemaphorePermit, AcquireError> {
self.inner.blocking_task_guard.clone().acquire_owned().await
}
/// Access the underlying `Eth` API.
pub fn eth_api(&self) -> &Eth {
&self.inner.eth_api
}
}
impl<Eth: RpcNodeCore> TraceApi<Eth> {
/// Access the underlying provider.
pub fn provider(&self) -> &Eth::Provider {
self.inner.eth_api.provider()
}
}
// === impl TraceApi === //
impl<Eth> TraceApi<Eth>
where
// tracing methods do _not_ read from mempool, hence no `LoadBlock` trait
// bound
Eth: Trace + Call + LoadPendingBlock + LoadTransaction + 'static,
{
/// Executes the given call and returns a number of possible traces for it.
pub async fn trace_call(
&self,
trace_request: TraceCallRequest<RpcTxReq<Eth::NetworkTypes>>,
) -> Result<TraceResults, Eth::Error> {
let at = trace_request.block_id.unwrap_or_default();
let config = TracingInspectorConfig::from_parity_config(&trace_request.trace_types);
let overrides =
EvmOverrides::new(trace_request.state_overrides, trace_request.block_overrides);
let mut inspector = TracingInspector::new(config);
let this = self.clone();
self.eth_api()
.spawn_with_call_at(trace_request.call, at, overrides, move |db, evm_env, tx_env| {
// wrapper is hack to get around 'higher-ranked lifetime error', see
// <https://github.com/rust-lang/rust/issues/100013>
let db = db.0;
let res = this.eth_api().inspect(&mut *db, evm_env, tx_env, &mut inspector)?;
let trace_res = inspector
.into_parity_builder()
.into_trace_results_with_state(&res, &trace_request.trace_types, &db)
.map_err(Eth::Error::from_eth_err)?;
Ok(trace_res)
})
.await
}
/// Traces a call to `eth_sendRawTransaction` without making the call, returning the traces.
pub async fn trace_raw_transaction(
&self,
tx: Bytes,
trace_types: HashSet<TraceType>,
block_id: Option<BlockId>,
) -> Result<TraceResults, Eth::Error> {
let tx = recover_raw_transaction::<PoolPooledTx<Eth::Pool>>(&tx)?
.map(<Eth::Pool as TransactionPool>::Transaction::pooled_into_consensus);
let (evm_env, at) = self.eth_api().evm_env_at(block_id.unwrap_or_default()).await?;
let tx_env = self.eth_api().evm_config().tx_env(tx);
let config = TracingInspectorConfig::from_parity_config(&trace_types);
self.eth_api()
.spawn_trace_at_with_state(evm_env, tx_env, config, at, move |inspector, res, db| {
inspector
.into_parity_builder()
.into_trace_results_with_state(&res, &trace_types, &db)
.map_err(Eth::Error::from_eth_err)
})
.await
}
/// Performs multiple call traces on top of the same block. i.e. transaction n will be executed
/// on top of a pending block with all n-1 transactions applied (traced) first.
///
/// Note: Allows tracing dependent transactions, hence all transactions are traced in sequence
pub async fn trace_call_many(
&self,
calls: Vec<(RpcTxReq<Eth::NetworkTypes>, HashSet<TraceType>)>,
block_id: Option<BlockId>,
) -> Result<Vec<TraceResults>, Eth::Error> {
let at = block_id.unwrap_or(BlockId::pending());
let (evm_env, at) = self.eth_api().evm_env_at(at).await?;
let this = self.clone();
// execute all transactions on top of each other and record the traces
self.eth_api()
.spawn_with_state_at_block(at, move |state| {
let mut results = Vec::with_capacity(calls.len());
let mut db = CacheDB::new(StateProviderDatabase::new(state));
let mut calls = calls.into_iter().peekable();
while let Some((call, trace_types)) = calls.next() {
let (evm_env, tx_env) = this.eth_api().prepare_call_env(
evm_env.clone(),
call,
&mut db,
Default::default(),
)?;
let config = TracingInspectorConfig::from_parity_config(&trace_types);
let mut inspector = TracingInspector::new(config);
let res = this.eth_api().inspect(&mut db, evm_env, tx_env, &mut inspector)?;
let trace_res = inspector
.into_parity_builder()
.into_trace_results_with_state(&res, &trace_types, &db)
.map_err(Eth::Error::from_eth_err)?;
results.push(trace_res);
// need to apply the state changes of this call before executing the
// next call
if calls.peek().is_some() {
// need to apply the state changes of this call before executing
// the next call
db.commit(res.state)
}
}
Ok(results)
})
.await
}
/// Replays a transaction, returning the traces.
pub async fn replay_transaction(
&self,
hash: B256,
trace_types: HashSet<TraceType>,
) -> Result<TraceResults, Eth::Error> {
let config = TracingInspectorConfig::from_parity_config(&trace_types);
self.eth_api()
.spawn_trace_transaction_in_block(hash, config, move |_, inspector, res, db| {
let trace_res = inspector
.into_parity_builder()
.into_trace_results_with_state(&res, &trace_types, &db)
.map_err(Eth::Error::from_eth_err)?;
Ok(trace_res)
})
.await
.transpose()
.ok_or(EthApiError::TransactionNotFound)?
}
/// Returns transaction trace objects at the given index
///
/// Note: For compatibility reasons this only supports 1 single index, since this method is
/// supposed to return a single trace. See also: <https://github.com/ledgerwatch/erigon/blob/862faf054b8a0fa15962a9c73839b619886101eb/turbo/jsonrpc/trace_filtering.go#L114-L133>
///
/// This returns `None` if `indices` is empty
pub async fn trace_get(
&self,
hash: B256,
indices: Vec<usize>,
) -> Result<Option<LocalizedTransactionTrace>, Eth::Error> {
if indices.len() != 1 {
// The OG impl failed if it gets more than a single index
return Ok(None)
}
self.trace_get_index(hash, indices[0]).await
}
/// Returns transaction trace object at the given index.
///
/// Returns `None` if the trace object at that index does not exist
pub async fn trace_get_index(
&self,
hash: B256,
index: usize,
) -> Result<Option<LocalizedTransactionTrace>, Eth::Error> {
Ok(self.trace_transaction(hash).await?.and_then(|traces| traces.into_iter().nth(index)))
}
/// Returns all traces for the given transaction hash
pub async fn trace_transaction(
&self,
hash: B256,
) -> Result<Option<Vec<LocalizedTransactionTrace>>, Eth::Error> {
self.eth_api()
.spawn_trace_transaction_in_block(
hash,
TracingInspectorConfig::default_parity(),
move |tx_info, inspector, _, _| {
let traces =
inspector.into_parity_builder().into_localized_transaction_traces(tx_info);
Ok(traces)
},
)
.await
}
/// Returns all opcodes with their count and combined gas usage for the given transaction in no
/// particular order.
pub async fn trace_transaction_opcode_gas(
&self,
tx_hash: B256,
) -> Result<Option<TransactionOpcodeGas>, Eth::Error> {
self.eth_api()
.spawn_trace_transaction_in_block_with_inspector(
tx_hash,
OpcodeGasInspector::default(),
move |_tx_info, inspector, _res, _| {
let trace = TransactionOpcodeGas {
transaction_hash: tx_hash,
opcode_gas: inspector.opcode_gas_iter().collect(),
};
Ok(trace)
},
)
.await
}
/// Calculates the base block reward for the given block:
///
/// - if Paris hardfork is activated, no block rewards are given
/// - if Paris hardfork is not activated, calculate block rewards with block number only
/// - if Paris hardfork is unknown, calculate block rewards with block number and ttd
fn calculate_base_block_reward<H: BlockHeader>(
&self,
header: &H,
) -> Result<Option<u128>, Eth::Error> {
let chain_spec = self.provider().chain_spec();
let is_paris_activated = if chain_spec.chain() == MAINNET.chain() {
Some(header.number()) >= EthereumHardfork::Paris.mainnet_activation_block()
} else if chain_spec.chain() == SEPOLIA.chain() {
Some(header.number()) >= EthereumHardfork::Paris.sepolia_activation_block()
} else {
true
};
if is_paris_activated {
return Ok(None)
}
Ok(Some(base_block_reward_pre_merge(&chain_spec, header.number())))
}
/// Extracts the reward traces for the given block:
/// - block reward
/// - uncle rewards
fn extract_reward_traces<H: BlockHeader>(
&self,
header: &H,
ommers: Option<&[H]>,
base_block_reward: u128,
) -> Vec<LocalizedTransactionTrace> {
let ommers_cnt = ommers.map(|o| o.len()).unwrap_or_default();
let mut traces = Vec::with_capacity(ommers_cnt + 1);
let block_reward = block_reward(base_block_reward, ommers_cnt);
traces.push(reward_trace(
header,
RewardAction {
author: header.beneficiary(),
reward_type: RewardType::Block,
value: U256::from(block_reward),
},
));
let Some(ommers) = ommers else { return traces };
for uncle in ommers {
let uncle_reward = ommer_reward(base_block_reward, header.number(), uncle.number());
traces.push(reward_trace(
header,
RewardAction {
author: uncle.beneficiary(),
reward_type: RewardType::Uncle,
value: U256::from(uncle_reward),
},
));
}
traces
}
}
impl<Eth> TraceApi<Eth>
where
// tracing methods read from mempool, hence `LoadBlock` trait bound via
// `TraceExt`
Eth: TraceExt + 'static,
{
/// Returns all transaction traces that match the given filter.
///
/// This is similar to [`Self::trace_block`] but only returns traces for transactions that match
/// the filter.
pub async fn trace_filter(
&self,
filter: TraceFilter,
) -> Result<Vec<LocalizedTransactionTrace>, Eth::Error> {
// We'll reuse the matcher across multiple blocks that are traced in parallel
let matcher = Arc::new(filter.matcher());
let TraceFilter { from_block, to_block, after, count, .. } = filter;
let start = from_block.unwrap_or(0);
let latest_block = self.provider().best_block_number().map_err(Eth::Error::from_eth_err)?;
if start > latest_block {
// can't trace that range
return Err(EthApiError::HeaderNotFound(start.into()).into());
}
let end = to_block.unwrap_or(latest_block);
if start > end {
return Err(EthApiError::InvalidParams(
"invalid parameters: fromBlock cannot be greater than toBlock".to_string(),
)
.into())
}
// ensure that the range is not too large, since we need to fetch all blocks in the range
let distance = end.saturating_sub(start);
if distance > self.inner.eth_config.max_trace_filter_blocks {
return Err(EthApiError::InvalidParams(
"Block range too large; currently limited to 100 blocks".to_string(),
)
.into())
}
// fetch all blocks in that range
let blocks = self
.provider()
.recovered_block_range(start..=end)
.map_err(Eth::Error::from_eth_err)?
.into_iter()
.map(Arc::new)
.collect::<Vec<_>>();
// trace all blocks
let mut block_traces = Vec::with_capacity(blocks.len());
for block in &blocks {
let matcher = matcher.clone();
let traces = self.eth_api().trace_block_until(
block.hash().into(),
Some(block.clone()),
None,
TracingInspectorConfig::default_parity(),
move |tx_info, mut ctx| {
let mut traces = ctx
.take_inspector()
.into_parity_builder()
.into_localized_transaction_traces(tx_info);
traces.retain(|trace| matcher.matches(&trace.trace));
Ok(Some(traces))
},
);
block_traces.push(traces);
}
let block_traces = futures::future::try_join_all(block_traces).await?;
let mut all_traces = block_traces
.into_iter()
.flatten()
.flat_map(|traces| traces.into_iter().flatten().flat_map(|traces| traces.into_iter()))
.collect::<Vec<_>>();
// add reward traces for all blocks
for block in &blocks {
if let Some(base_block_reward) = self.calculate_base_block_reward(block.header())? {
all_traces.extend(
self.extract_reward_traces(
block.header(),
block.body().ommers(),
base_block_reward,
)
.into_iter()
.filter(|trace| matcher.matches(&trace.trace)),
);
} else {
// no block reward, means we're past the Paris hardfork and don't expect any rewards
// because the blocks in ascending order
break
}
}
// Skips the first `after` number of matching traces.
// If `after` is greater than or equal to the number of matched traces, it returns an empty
// array.
if let Some(after) = after.map(|a| a as usize) {
if after < all_traces.len() {
all_traces.drain(..after);
} else {
return Ok(vec![])
}
}
// Return at most `count` of traces
if let Some(count) = count {
let count = count as usize;
if count < all_traces.len() {
all_traces.truncate(count);
}
};
Ok(all_traces)
}
/// Returns traces created at given block.
pub async fn trace_block(
&self,
block_id: BlockId,
) -> Result<Option<Vec<LocalizedTransactionTrace>>, Eth::Error> {
let traces = self.eth_api().trace_block_with(
block_id,
None,
TracingInspectorConfig::default_parity(),
|tx_info, mut ctx| {
let traces = ctx
.take_inspector()
.into_parity_builder()
.into_localized_transaction_traces(tx_info);
Ok(traces)
},
);
let block = self.eth_api().recovered_block(block_id);
let (maybe_traces, maybe_block) = futures::try_join!(traces, block)?;
let mut maybe_traces =
maybe_traces.map(|traces| traces.into_iter().flatten().collect::<Vec<_>>());
if let (Some(block), Some(traces)) = (maybe_block, maybe_traces.as_mut()) {
if let Some(base_block_reward) = self.calculate_base_block_reward(block.header())? {
traces.extend(self.extract_reward_traces(
block.header(),
block.body().ommers(),
base_block_reward,
));
}
}
Ok(maybe_traces)
}
/// Replays all transactions in a block
pub async fn replay_block_transactions(
&self,
block_id: BlockId,
trace_types: HashSet<TraceType>,
) -> Result<Option<Vec<TraceResultsWithTransactionHash>>, Eth::Error> {
self.eth_api()
.trace_block_with(
block_id,
None,
TracingInspectorConfig::from_parity_config(&trace_types),
move |tx_info, mut ctx| {
let mut full_trace = ctx
.take_inspector()
.into_parity_builder()
.into_trace_results(&ctx.result, &trace_types);
// If statediffs were requested, populate them with the account balance and
// nonce from pre-state
if let Some(ref mut state_diff) = full_trace.state_diff {
populate_state_diff(state_diff, &ctx.db, ctx.state.iter())
.map_err(Eth::Error::from_eth_err)?;
}
let trace = TraceResultsWithTransactionHash {
transaction_hash: tx_info.hash.expect("tx hash is set"),
full_trace,
};
Ok(trace)
},
)
.await
}
/// Returns the opcodes of all transactions in the given block.
///
/// This is the same as [`Self::trace_transaction_opcode_gas`] but for all transactions in a
/// block.
pub async fn trace_block_opcode_gas(
&self,
block_id: BlockId,
) -> Result<Option<BlockOpcodeGas>, Eth::Error> {
let res = self
.eth_api()
.trace_block_inspector(
block_id,
None,
OpcodeGasInspector::default,
move |tx_info, ctx| {
let trace = TransactionOpcodeGas {
transaction_hash: tx_info.hash.expect("tx hash is set"),
opcode_gas: ctx.inspector.opcode_gas_iter().collect(),
};
Ok(trace)
},
)
.await?;
let Some(transactions) = res else { return Ok(None) };
let Some(block) = self.eth_api().recovered_block(block_id).await? else { return Ok(None) };
Ok(Some(BlockOpcodeGas {
block_hash: block.hash(),
block_number: block.number(),
transactions,
}))
}
/// Returns all storage slots accessed during transaction execution along with their access
/// counts.
pub async fn trace_block_storage_access(
&self,
block_id: BlockId,
) -> Result<Option<BlockStorageAccess>, Eth::Error> {
let res = self
.eth_api()
.trace_block_inspector(
block_id,
None,
StorageInspector::default,
move |tx_info, ctx| {
let trace = TransactionStorageAccess {
transaction_hash: tx_info.hash.expect("tx hash is set"),
storage_access: ctx.inspector.accessed_slots().clone(),
unique_loads: ctx.inspector.unique_loads(),
warm_loads: ctx.inspector.warm_loads(),
};
Ok(trace)
},
)
.await?;
let Some(transactions) = res else { return Ok(None) };
let Some(block) = self.eth_api().recovered_block(block_id).await? else { return Ok(None) };
Ok(Some(BlockStorageAccess {
block_hash: block.hash(),
block_number: block.number(),
transactions,
}))
}
}
#[async_trait]
impl<Eth> TraceApiServer<RpcTxReq<Eth::NetworkTypes>> for TraceApi<Eth>
where
Eth: TraceExt + 'static,
{
/// Executes the given call and returns a number of possible traces for it.
///
/// Handler for `trace_call`
async fn trace_call(
&self,
call: RpcTxReq<Eth::NetworkTypes>,
trace_types: HashSet<TraceType>,
block_id: Option<BlockId>,
state_overrides: Option<StateOverride>,
block_overrides: Option<Box<BlockOverrides>>,
) -> RpcResult<TraceResults> {
let _permit = self.acquire_trace_permit().await;
let request =
TraceCallRequest { call, trace_types, block_id, state_overrides, block_overrides };
Ok(Self::trace_call(self, request).await.map_err(Into::into)?)
}
/// Handler for `trace_callMany`
async fn trace_call_many(
&self,
calls: Vec<(RpcTxReq<Eth::NetworkTypes>, HashSet<TraceType>)>,
block_id: Option<BlockId>,
) -> RpcResult<Vec<TraceResults>> {
let _permit = self.acquire_trace_permit().await;
Ok(Self::trace_call_many(self, calls, block_id).await.map_err(Into::into)?)
}
/// Handler for `trace_rawTransaction`
async fn trace_raw_transaction(
&self,
data: Bytes,
trace_types: HashSet<TraceType>,
block_id: Option<BlockId>,
) -> RpcResult<TraceResults> {
let _permit = self.acquire_trace_permit().await;
Ok(Self::trace_raw_transaction(self, data, trace_types, block_id)
.await
.map_err(Into::into)?)
}
/// Handler for `trace_replayBlockTransactions`
async fn replay_block_transactions(
&self,
block_id: BlockId,
trace_types: HashSet<TraceType>,
) -> RpcResult<Option<Vec<TraceResultsWithTransactionHash>>> {
let _permit = self.acquire_trace_permit().await;
Ok(Self::replay_block_transactions(self, block_id, trace_types)
.await
.map_err(Into::into)?)
}
/// Handler for `trace_replayTransaction`
async fn replay_transaction(
&self,
transaction: B256,
trace_types: HashSet<TraceType>,
) -> RpcResult<TraceResults> {
let _permit = self.acquire_trace_permit().await;
Ok(Self::replay_transaction(self, transaction, trace_types).await.map_err(Into::into)?)
}
/// Handler for `trace_block`
async fn trace_block(
&self,
block_id: BlockId,
) -> RpcResult<Option<Vec<LocalizedTransactionTrace>>> {
let _permit = self.acquire_trace_permit().await;
Ok(Self::trace_block(self, block_id).await.map_err(Into::into)?)
}
/// Handler for `trace_filter`
///
/// This is similar to `eth_getLogs` but for traces.
///
/// # Limitations
/// This currently requires block filter fields, since reth does not have address indices yet.
async fn trace_filter(&self, filter: TraceFilter) -> RpcResult<Vec<LocalizedTransactionTrace>> {
Ok(Self::trace_filter(self, filter).await.map_err(Into::into)?)
}
/// Returns transaction trace at given index.
/// Handler for `trace_get`
async fn trace_get(
&self,
hash: B256,
indices: Vec<Index>,
) -> RpcResult<Option<LocalizedTransactionTrace>> {
let _permit = self.acquire_trace_permit().await;
Ok(Self::trace_get(self, hash, indices.into_iter().map(Into::into).collect())
.await
.map_err(Into::into)?)
}
/// Handler for `trace_transaction`
async fn trace_transaction(
&self,
hash: B256,
) -> RpcResult<Option<Vec<LocalizedTransactionTrace>>> {
let _permit = self.acquire_trace_permit().await;
Ok(Self::trace_transaction(self, hash).await.map_err(Into::into)?)
}
/// Handler for `trace_transactionOpcodeGas`
async fn trace_transaction_opcode_gas(
&self,
tx_hash: B256,
) -> RpcResult<Option<TransactionOpcodeGas>> {
let _permit = self.acquire_trace_permit().await;
Ok(Self::trace_transaction_opcode_gas(self, tx_hash).await.map_err(Into::into)?)
}
/// Handler for `trace_blockOpcodeGas`
async fn trace_block_opcode_gas(&self, block_id: BlockId) -> RpcResult<Option<BlockOpcodeGas>> {
let _permit = self.acquire_trace_permit().await;
Ok(Self::trace_block_opcode_gas(self, block_id).await.map_err(Into::into)?)
}
}
impl<Eth> std::fmt::Debug for TraceApi<Eth> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("TraceApi").finish_non_exhaustive()
}
}
impl<Eth> Clone for TraceApi<Eth> {
fn clone(&self) -> Self {
Self { inner: Arc::clone(&self.inner) }
}
}
struct TraceApiInner<Eth> {
/// Access to commonly used code of the `eth` namespace
eth_api: Eth,
// restrict the number of concurrent calls to `trace_*`
blocking_task_guard: BlockingTaskGuard,
// eth config settings
eth_config: EthConfig,
}
/// Response type for storage tracing that contains all accessed storage slots
/// for a transaction.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct TransactionStorageAccess {
/// Hash of the transaction
pub transaction_hash: B256,
/// Tracks storage slots and access counter.
pub storage_access: HashMap<Address, HashMap<B256, u64>>,
/// Number of unique storage loads
pub unique_loads: u64,
/// Number of warm storage loads
pub warm_loads: u64,
}
/// Response type for storage tracing that contains all accessed storage slots
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct BlockStorageAccess {
/// The block hash
pub block_hash: BlockHash,
/// The block's number
pub block_number: u64,
/// All executed transactions in the block in the order they were executed
pub transactions: Vec<TransactionStorageAccess>,
}
/// Helper to construct a [`LocalizedTransactionTrace`] that describes a reward to the block
/// beneficiary.
fn reward_trace<H: BlockHeader>(header: &H, reward: RewardAction) -> LocalizedTransactionTrace {
LocalizedTransactionTrace {
block_hash: Some(header.hash_slow()),
block_number: Some(header.number()),
transaction_hash: None,
transaction_position: None,
trace: TransactionTrace {
trace_address: vec![],
subtraces: 0,
action: Action::Reward(reward),
error: None,
result: None,
},
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc/src/web3.rs | crates/rpc/rpc/src/web3.rs | use alloy_primitives::{keccak256, Bytes, B256};
use async_trait::async_trait;
use jsonrpsee::core::RpcResult;
use reth_network_api::NetworkInfo;
use reth_rpc_api::Web3ApiServer;
use reth_rpc_server_types::ToRpcResult;
/// `web3` API implementation.
///
/// This type provides the functionality for handling `web3` related requests.
pub struct Web3Api<N> {
/// An interface to interact with the network
network: N,
}
impl<N> Web3Api<N> {
/// Creates a new instance of `Web3Api`.
pub const fn new(network: N) -> Self {
Self { network }
}
}
#[async_trait]
impl<N> Web3ApiServer for Web3Api<N>
where
N: NetworkInfo + 'static,
{
/// Handler for `web3_clientVersion`
async fn client_version(&self) -> RpcResult<String> {
let status = self.network.network_status().await.to_rpc_result()?;
Ok(status.client_version)
}
/// Handler for `web3_sha3`
fn sha3(&self, input: Bytes) -> RpcResult<B256> {
Ok(keccak256(input))
}
}
impl<N> std::fmt::Debug for Web3Api<N> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Web3Api").finish_non_exhaustive()
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc/src/debug.rs | crates/rpc/rpc/src/debug.rs | use alloy_consensus::{transaction::SignerRecoverable, BlockHeader};
use alloy_eips::{eip2718::Encodable2718, BlockId, BlockNumberOrTag};
use alloy_genesis::ChainConfig;
use alloy_primitives::{uint, Address, Bytes, B256};
use alloy_rlp::{Decodable, Encodable};
use alloy_rpc_types_debug::ExecutionWitness;
use alloy_rpc_types_eth::{
state::EvmOverrides, Block as RpcBlock, BlockError, Bundle, StateContext, TransactionInfo,
};
use alloy_rpc_types_trace::geth::{
call::FlatCallFrame, BlockTraceResult, FourByteFrame, GethDebugBuiltInTracerType,
GethDebugTracerType, GethDebugTracingCallOptions, GethDebugTracingOptions, GethTrace,
NoopFrame, TraceResult,
};
use async_trait::async_trait;
use jsonrpsee::core::RpcResult;
use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardforks};
use reth_evm::{execute::Executor, ConfigureEvm, EvmEnvFor, TxEnvFor};
use reth_primitives_traits::{
Block as _, BlockBody, ReceiptWithBloom, RecoveredBlock, SignedTransaction,
};
use reth_revm::{
database::StateProviderDatabase,
db::{CacheDB, State},
witness::ExecutionWitnessRecord,
};
use reth_rpc_api::DebugApiServer;
use reth_rpc_convert::RpcTxReq;
use reth_rpc_eth_api::{
helpers::{EthTransactions, TraceExt},
EthApiTypes, FromEthApiError, RpcNodeCore,
};
use reth_rpc_eth_types::{EthApiError, StateCacheDb};
use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult};
use reth_storage_api::{
BlockIdReader, BlockReaderIdExt, HeaderProvider, ProviderBlock, ReceiptProviderIdExt,
StateProofProvider, StateProviderFactory, StateRootProvider, TransactionVariant,
};
use reth_tasks::pool::BlockingTaskGuard;
use reth_trie_common::{updates::TrieUpdates, HashedPostState};
use revm::{context_interface::Transaction, state::EvmState, DatabaseCommit};
use revm_inspectors::tracing::{
FourByteInspector, MuxInspector, TracingInspector, TracingInspectorConfig, TransactionContext,
};
use std::sync::Arc;
use tokio::sync::{AcquireError, OwnedSemaphorePermit};
/// `debug` API implementation.
///
/// This type provides the functionality for handling `debug` related requests.
pub struct DebugApi<Eth> {
inner: Arc<DebugApiInner<Eth>>,
}
// === impl DebugApi ===
impl<Eth> DebugApi<Eth> {
/// Create a new instance of the [`DebugApi`]
pub fn new(eth_api: Eth, blocking_task_guard: BlockingTaskGuard) -> Self {
let inner = Arc::new(DebugApiInner { eth_api, blocking_task_guard });
Self { inner }
}
/// Access the underlying `Eth` API.
pub fn eth_api(&self) -> &Eth {
&self.inner.eth_api
}
}
impl<Eth: RpcNodeCore> DebugApi<Eth> {
/// Access the underlying provider.
pub fn provider(&self) -> &Eth::Provider {
self.inner.eth_api.provider()
}
}
// === impl DebugApi ===
impl<Eth> DebugApi<Eth>
where
Eth: EthApiTypes + TraceExt + 'static,
{
/// Acquires a permit to execute a tracing call.
async fn acquire_trace_permit(&self) -> Result<OwnedSemaphorePermit, AcquireError> {
self.inner.blocking_task_guard.clone().acquire_owned().await
}
/// Trace the entire block asynchronously
async fn trace_block(
&self,
block: Arc<RecoveredBlock<ProviderBlock<Eth::Provider>>>,
evm_env: EvmEnvFor<Eth::Evm>,
opts: GethDebugTracingOptions,
) -> Result<Vec<TraceResult>, Eth::Error> {
// replay all transactions of the block
let this = self.clone();
self.eth_api()
.spawn_with_state_at_block(block.parent_hash().into(), move |state| {
let mut results = Vec::with_capacity(block.body().transactions().len());
let mut db = CacheDB::new(StateProviderDatabase::new(state));
this.eth_api().apply_pre_execution_changes(&block, &mut db, &evm_env)?;
let mut transactions = block.transactions_recovered().enumerate().peekable();
let mut inspector = None;
while let Some((index, tx)) = transactions.next() {
let tx_hash = *tx.tx_hash();
let tx_env = this.eth_api().evm_config().tx_env(tx);
let (result, state_changes) = this.trace_transaction(
&opts,
evm_env.clone(),
tx_env,
&mut db,
Some(TransactionContext {
block_hash: Some(block.hash()),
tx_hash: Some(tx_hash),
tx_index: Some(index),
}),
&mut inspector,
)?;
inspector = inspector.map(|insp| insp.fused());
results.push(TraceResult::Success { result, tx_hash: Some(tx_hash) });
if transactions.peek().is_some() {
// need to apply the state changes of this transaction before executing the
// next transaction
db.commit(state_changes)
}
}
Ok(results)
})
.await
}
/// Replays the given block and returns the trace of each transaction.
///
/// This expects a rlp encoded block
///
/// Note, the parent of this block must be present, or it will fail.
pub async fn debug_trace_raw_block(
&self,
rlp_block: Bytes,
opts: GethDebugTracingOptions,
) -> Result<Vec<TraceResult>, Eth::Error> {
let block: ProviderBlock<Eth::Provider> = Decodable::decode(&mut rlp_block.as_ref())
.map_err(BlockError::RlpDecodeRawBlock)
.map_err(Eth::Error::from_eth_err)?;
let evm_env = self.eth_api().evm_config().evm_env(block.header());
// Depending on EIP-2 we need to recover the transactions differently
let senders =
if self.provider().chain_spec().is_homestead_active_at_block(block.header().number()) {
block
.body()
.transactions()
.iter()
.map(|tx| tx.recover_signer().map_err(Eth::Error::from_eth_err))
.collect::<Result<Vec<_>, _>>()?
.into_iter()
.collect()
} else {
block
.body()
.transactions()
.iter()
.map(|tx| tx.recover_signer_unchecked().map_err(Eth::Error::from_eth_err))
.collect::<Result<Vec<_>, _>>()?
.into_iter()
.collect()
};
self.trace_block(Arc::new(block.into_recovered_with_signers(senders)), evm_env, opts).await
}
/// Replays a block and returns the trace of each transaction.
pub async fn debug_trace_block(
&self,
block_id: BlockId,
opts: GethDebugTracingOptions,
) -> Result<Vec<TraceResult>, Eth::Error> {
let block_hash = self
.provider()
.block_hash_for_id(block_id)
.map_err(Eth::Error::from_eth_err)?
.ok_or(EthApiError::HeaderNotFound(block_id))?;
let ((evm_env, _), block) = futures::try_join!(
self.eth_api().evm_env_at(block_hash.into()),
self.eth_api().recovered_block(block_hash.into()),
)?;
let block = block.ok_or(EthApiError::HeaderNotFound(block_id))?;
self.trace_block(block, evm_env, opts).await
}
/// Trace the transaction according to the provided options.
///
/// Ref: <https://geth.ethereum.org/docs/developers/evm-tracing/built-in-tracers>
pub async fn debug_trace_transaction(
&self,
tx_hash: B256,
opts: GethDebugTracingOptions,
) -> Result<GethTrace, Eth::Error> {
let (transaction, block) = match self.eth_api().transaction_and_block(tx_hash).await? {
None => return Err(EthApiError::TransactionNotFound.into()),
Some(res) => res,
};
let (evm_env, _) = self.eth_api().evm_env_at(block.hash().into()).await?;
// we need to get the state of the parent block because we're essentially replaying the
// block the transaction is included in
let state_at: BlockId = block.parent_hash().into();
let block_hash = block.hash();
let this = self.clone();
self.eth_api()
.spawn_with_state_at_block(state_at, move |state| {
let block_txs = block.transactions_recovered();
// configure env for the target transaction
let tx = transaction.into_recovered();
let mut db = CacheDB::new(StateProviderDatabase::new(state));
this.eth_api().apply_pre_execution_changes(&block, &mut db, &evm_env)?;
// replay all transactions prior to the targeted transaction
let index = this.eth_api().replay_transactions_until(
&mut db,
evm_env.clone(),
block_txs,
*tx.tx_hash(),
)?;
let tx_env = this.eth_api().evm_config().tx_env(&tx);
this.trace_transaction(
&opts,
evm_env,
tx_env,
&mut db,
Some(TransactionContext {
block_hash: Some(block_hash),
tx_index: Some(index),
tx_hash: Some(*tx.tx_hash()),
}),
&mut None,
)
.map(|(trace, _)| trace)
})
.await
}
/// The `debug_traceCall` method lets you run an `eth_call` within the context of the given
/// block execution using the final state of parent block as the base.
///
/// Differences compare to `eth_call`:
/// - `debug_traceCall` executes with __enabled__ basefee check, `eth_call` does not: <https://github.com/paradigmxyz/reth/issues/6240>
pub async fn debug_trace_call(
&self,
call: RpcTxReq<Eth::NetworkTypes>,
block_id: Option<BlockId>,
opts: GethDebugTracingCallOptions,
) -> Result<GethTrace, Eth::Error> {
let at = block_id.unwrap_or_default();
let GethDebugTracingCallOptions {
tracing_options, state_overrides, block_overrides, ..
} = opts;
let overrides = EvmOverrides::new(state_overrides, block_overrides.map(Box::new));
let GethDebugTracingOptions { config, tracer, tracer_config, .. } = tracing_options;
let this = self.clone();
if let Some(tracer) = tracer {
#[allow(unreachable_patterns)]
return match tracer {
GethDebugTracerType::BuiltInTracer(tracer) => match tracer {
GethDebugBuiltInTracerType::FourByteTracer => {
let mut inspector = FourByteInspector::default();
let inspector = self
.eth_api()
.spawn_with_call_at(call, at, overrides, move |db, evm_env, tx_env| {
this.eth_api().inspect(db, evm_env, tx_env, &mut inspector)?;
Ok(inspector)
})
.await?;
Ok(FourByteFrame::from(&inspector).into())
}
GethDebugBuiltInTracerType::CallTracer => {
let call_config = tracer_config
.into_call_config()
.map_err(|_| EthApiError::InvalidTracerConfig)?;
let mut inspector = TracingInspector::new(
TracingInspectorConfig::from_geth_call_config(&call_config),
);
let frame = self
.eth_api()
.spawn_with_call_at(call, at, overrides, move |db, evm_env, tx_env| {
let gas_limit = tx_env.gas_limit();
let res =
this.eth_api().inspect(db, evm_env, tx_env, &mut inspector)?;
let frame = inspector
.with_transaction_gas_limit(gas_limit)
.into_geth_builder()
.geth_call_traces(call_config, res.result.gas_used());
Ok(frame.into())
})
.await?;
Ok(frame)
}
GethDebugBuiltInTracerType::PreStateTracer => {
let prestate_config = tracer_config
.into_pre_state_config()
.map_err(|_| EthApiError::InvalidTracerConfig)?;
let mut inspector = TracingInspector::new(
TracingInspectorConfig::from_geth_prestate_config(&prestate_config),
);
let frame = self
.eth_api()
.spawn_with_call_at(call, at, overrides, move |db, evm_env, tx_env| {
// wrapper is hack to get around 'higher-ranked lifetime error',
// see <https://github.com/rust-lang/rust/issues/100013>
let db = db.0;
let gas_limit = tx_env.gas_limit();
let res = this.eth_api().inspect(
&mut *db,
evm_env,
tx_env,
&mut inspector,
)?;
let frame = inspector
.with_transaction_gas_limit(gas_limit)
.into_geth_builder()
.geth_prestate_traces(&res, &prestate_config, db)
.map_err(Eth::Error::from_eth_err)?;
Ok(frame)
})
.await?;
Ok(frame.into())
}
GethDebugBuiltInTracerType::NoopTracer => Ok(NoopFrame::default().into()),
GethDebugBuiltInTracerType::MuxTracer => {
let mux_config = tracer_config
.into_mux_config()
.map_err(|_| EthApiError::InvalidTracerConfig)?;
let mut inspector = MuxInspector::try_from_config(mux_config)
.map_err(Eth::Error::from_eth_err)?;
let frame = self
.inner
.eth_api
.spawn_with_call_at(call, at, overrides, move |db, evm_env, tx_env| {
// wrapper is hack to get around 'higher-ranked lifetime error', see
// <https://github.com/rust-lang/rust/issues/100013>
let db = db.0;
let tx_info = TransactionInfo {
block_number: Some(evm_env.block_env.number.saturating_to()),
base_fee: Some(evm_env.block_env.basefee),
hash: None,
block_hash: None,
index: None,
};
let res = this.eth_api().inspect(
&mut *db,
evm_env,
tx_env,
&mut inspector,
)?;
let frame = inspector
.try_into_mux_frame(&res, db, tx_info)
.map_err(Eth::Error::from_eth_err)?;
Ok(frame.into())
})
.await?;
Ok(frame)
}
GethDebugBuiltInTracerType::FlatCallTracer => {
let flat_call_config = tracer_config
.into_flat_call_config()
.map_err(|_| EthApiError::InvalidTracerConfig)?;
let mut inspector = TracingInspector::new(
TracingInspectorConfig::from_flat_call_config(&flat_call_config),
);
let frame: FlatCallFrame = self
.inner
.eth_api
.spawn_with_call_at(call, at, overrides, move |db, evm_env, tx_env| {
let gas_limit = tx_env.gas_limit();
this.eth_api().inspect(db, evm_env, tx_env, &mut inspector)?;
let tx_info = TransactionInfo::default();
let frame: FlatCallFrame = inspector
.with_transaction_gas_limit(gas_limit)
.into_parity_builder()
.into_localized_transaction_traces(tx_info);
Ok(frame)
})
.await?;
Ok(frame.into())
}
},
#[cfg(not(feature = "js-tracer"))]
GethDebugTracerType::JsTracer(_) => {
Err(EthApiError::Unsupported("JS Tracer is not enabled").into())
}
#[cfg(feature = "js-tracer")]
GethDebugTracerType::JsTracer(code) => {
let config = tracer_config.into_json();
let (_, at) = self.eth_api().evm_env_at(at).await?;
let res = self
.eth_api()
.spawn_with_call_at(call, at, overrides, move |db, evm_env, tx_env| {
// wrapper is hack to get around 'higher-ranked lifetime error', see
// <https://github.com/rust-lang/rust/issues/100013>
let db = db.0;
let mut inspector =
revm_inspectors::tracing::js::JsInspector::new(code, config)
.map_err(Eth::Error::from_eth_err)?;
let res = this.eth_api().inspect(
&mut *db,
evm_env.clone(),
tx_env.clone(),
&mut inspector,
)?;
inspector
.json_result(res, &tx_env, &evm_env.block_env, db)
.map_err(Eth::Error::from_eth_err)
})
.await?;
Ok(GethTrace::JS(res))
}
_ => {
// Note: this match is non-exhaustive in case we need to add support for
// additional tracers
Err(EthApiError::Unsupported("unsupported tracer").into())
}
};
}
// default structlog tracer
let inspector_config = TracingInspectorConfig::from_geth_config(&config);
let mut inspector = TracingInspector::new(inspector_config);
let (res, tx_gas_limit, inspector) = self
.eth_api()
.spawn_with_call_at(call, at, overrides, move |db, evm_env, tx_env| {
let gas_limit = tx_env.gas_limit();
let res = this.eth_api().inspect(db, evm_env, tx_env, &mut inspector)?;
Ok((res, gas_limit, inspector))
})
.await?;
let gas_used = res.result.gas_used();
let return_value = res.result.into_output().unwrap_or_default();
let frame = inspector
.with_transaction_gas_limit(tx_gas_limit)
.into_geth_builder()
.geth_traces(gas_used, return_value, config);
Ok(frame.into())
}
/// The `debug_traceCallMany` method lets you run an `eth_callMany` within the context of the
/// given block execution using the first n transactions in the given block as base.
/// Each following bundle increments block number by 1 and block timestamp by 12 seconds
pub async fn debug_trace_call_many(
&self,
bundles: Vec<Bundle<RpcTxReq<Eth::NetworkTypes>>>,
state_context: Option<StateContext>,
opts: Option<GethDebugTracingCallOptions>,
) -> Result<Vec<Vec<GethTrace>>, Eth::Error> {
if bundles.is_empty() {
return Err(EthApiError::InvalidParams(String::from("bundles are empty.")).into());
}
let StateContext { transaction_index, block_number } = state_context.unwrap_or_default();
let transaction_index = transaction_index.unwrap_or_default();
let target_block = block_number.unwrap_or_default();
let ((mut evm_env, _), block) = futures::try_join!(
self.eth_api().evm_env_at(target_block),
self.eth_api().recovered_block(target_block),
)?;
let opts = opts.unwrap_or_default();
let block = block.ok_or(EthApiError::HeaderNotFound(target_block))?;
let GethDebugTracingCallOptions { tracing_options, mut state_overrides, .. } = opts;
// we're essentially replaying the transactions in the block here, hence we need the state
// that points to the beginning of the block, which is the state at the parent block
let mut at = block.parent_hash();
let mut replay_block_txs = true;
// if a transaction index is provided, we need to replay the transactions until the index
let num_txs =
transaction_index.index().unwrap_or_else(|| block.body().transactions().len());
// but if all transactions are to be replayed, we can use the state at the block itself
// this works with the exception of the PENDING block, because its state might not exist if
// built locally
if !target_block.is_pending() && num_txs == block.body().transactions().len() {
at = block.hash();
replay_block_txs = false;
}
let this = self.clone();
self.eth_api()
.spawn_with_state_at_block(at.into(), move |state| {
// the outer vec for the bundles
let mut all_bundles = Vec::with_capacity(bundles.len());
let mut db = CacheDB::new(StateProviderDatabase::new(state));
if replay_block_txs {
// only need to replay the transactions in the block if not all transactions are
// to be replayed
let transactions = block.transactions_recovered().take(num_txs);
// Execute all transactions until index
for tx in transactions {
let tx_env = this.eth_api().evm_config().tx_env(tx);
let res = this.eth_api().transact(&mut db, evm_env.clone(), tx_env)?;
db.commit(res.state);
}
}
// Trace all bundles
let mut bundles = bundles.into_iter().peekable();
while let Some(bundle) = bundles.next() {
let mut results = Vec::with_capacity(bundle.transactions.len());
let Bundle { transactions, block_override } = bundle;
let block_overrides = block_override.map(Box::new);
let mut inspector = None;
let mut transactions = transactions.into_iter().peekable();
while let Some(tx) = transactions.next() {
// apply state overrides only once, before the first transaction
let state_overrides = state_overrides.take();
let overrides = EvmOverrides::new(state_overrides, block_overrides.clone());
let (evm_env, tx_env) = this.eth_api().prepare_call_env(
evm_env.clone(),
tx,
&mut db,
overrides,
)?;
let (trace, state) = this.trace_transaction(
&tracing_options,
evm_env,
tx_env,
&mut db,
None,
&mut inspector,
)?;
inspector = inspector.map(|insp| insp.fused());
// If there is more transactions, commit the database
// If there is no transactions, but more bundles, commit to the database too
if transactions.peek().is_some() || bundles.peek().is_some() {
db.commit(state);
}
results.push(trace);
}
// Increment block_env number and timestamp for the next bundle
evm_env.block_env.number += uint!(1_U256);
evm_env.block_env.timestamp += uint!(12_U256);
all_bundles.push(results);
}
Ok(all_bundles)
})
.await
}
/// Generates an execution witness for the given block hash. see
/// [`Self::debug_execution_witness`] for more info.
pub async fn debug_execution_witness_by_block_hash(
&self,
hash: B256,
) -> Result<ExecutionWitness, Eth::Error> {
let this = self.clone();
let block = this
.eth_api()
.recovered_block(hash.into())
.await?
.ok_or(EthApiError::HeaderNotFound(hash.into()))?;
self.debug_execution_witness_for_block(block).await
}
/// The `debug_executionWitness` method allows for re-execution of a block with the purpose of
/// generating an execution witness. The witness comprises of a map of all hashed trie nodes to
/// their preimages that were required during the execution of the block, including during state
/// root recomputation.
pub async fn debug_execution_witness(
&self,
block_id: BlockNumberOrTag,
) -> Result<ExecutionWitness, Eth::Error> {
let this = self.clone();
let block = this
.eth_api()
.recovered_block(block_id.into())
.await?
.ok_or(EthApiError::HeaderNotFound(block_id.into()))?;
self.debug_execution_witness_for_block(block).await
}
/// Generates an execution witness, using the given recovered block.
pub async fn debug_execution_witness_for_block(
&self,
block: Arc<RecoveredBlock<ProviderBlock<Eth::Provider>>>,
) -> Result<ExecutionWitness, Eth::Error> {
let this = self.clone();
let block_number = block.header().number();
let (mut exec_witness, lowest_block_number) = self
.eth_api()
.spawn_with_state_at_block(block.parent_hash().into(), move |state_provider| {
let db = StateProviderDatabase::new(&state_provider);
let block_executor = this.eth_api().evm_config().executor(db);
let mut witness_record = ExecutionWitnessRecord::default();
let _ = block_executor
.execute_with_state_closure(&block, |statedb: &State<_>| {
witness_record.record_executed_state(statedb);
})
.map_err(|err| EthApiError::Internal(err.into()))?;
let ExecutionWitnessRecord { hashed_state, codes, keys, lowest_block_number } =
witness_record;
let state = state_provider
.witness(Default::default(), hashed_state)
.map_err(EthApiError::from)?;
Ok((
ExecutionWitness { state, codes, keys, ..Default::default() },
lowest_block_number,
))
})
.await?;
let smallest = match lowest_block_number {
Some(smallest) => smallest,
None => {
// Return only the parent header, if there were no calls to the
// BLOCKHASH opcode.
block_number.saturating_sub(1)
}
};
let range = smallest..block_number;
// TODO: Check if headers_range errors when one of the headers in the range is missing
exec_witness.headers = self
.provider()
.headers_range(range)
.map_err(EthApiError::from)?
.into_iter()
.map(|header| {
let mut serialized_header = Vec::new();
header.encode(&mut serialized_header);
serialized_header.into()
})
.collect();
Ok(exec_witness)
}
/// Returns the code associated with a given hash at the specified block ID. If no code is
/// found, it returns None. If no block ID is provided, it defaults to the latest block.
pub async fn debug_code_by_hash(
&self,
hash: B256,
block_id: Option<BlockId>,
) -> Result<Option<Bytes>, Eth::Error> {
Ok(self
.provider()
.state_by_block_id(block_id.unwrap_or_default())
.map_err(Eth::Error::from_eth_err)?
.bytecode_by_hash(&hash)
.map_err(Eth::Error::from_eth_err)?
.map(|b| b.original_bytes()))
}
/// Executes the configured transaction with the environment on the given database.
///
/// It optionally takes fused inspector ([`TracingInspector::fused`]) to avoid re-creating the
/// inspector for each transaction. This is useful when tracing multiple transactions in a
/// block. This is only useful for block tracing which uses the same tracer for all transactions
/// in the block.
///
/// Caution: If the inspector is provided then `opts.tracer_config` is ignored.
///
/// Returns the trace frame and the state that got updated after executing the transaction.
///
/// Note: this does not apply any state overrides if they're configured in the `opts`.
///
/// Caution: this is blocking and should be performed on a blocking task.
fn trace_transaction(
&self,
opts: &GethDebugTracingOptions,
evm_env: EvmEnvFor<Eth::Evm>,
tx_env: TxEnvFor<Eth::Evm>,
db: &mut StateCacheDb<'_>,
transaction_context: Option<TransactionContext>,
fused_inspector: &mut Option<TracingInspector>,
) -> Result<(GethTrace, EvmState), Eth::Error> {
let GethDebugTracingOptions { config, tracer, tracer_config, .. } = opts;
let tx_info = TransactionInfo {
hash: transaction_context.as_ref().map(|c| c.tx_hash).unwrap_or_default(),
index: transaction_context
.as_ref()
.map(|c| c.tx_index.map(|i| i as u64))
.unwrap_or_default(),
block_hash: transaction_context.as_ref().map(|c| c.block_hash).unwrap_or_default(),
block_number: Some(evm_env.block_env.number.saturating_to()),
base_fee: Some(evm_env.block_env.basefee),
};
if let Some(tracer) = tracer {
#[allow(unreachable_patterns)]
return match tracer {
GethDebugTracerType::BuiltInTracer(tracer) => match tracer {
GethDebugBuiltInTracerType::FourByteTracer => {
let mut inspector = FourByteInspector::default();
let res = self.eth_api().inspect(db, evm_env, tx_env, &mut inspector)?;
return Ok((FourByteFrame::from(&inspector).into(), res.state));
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | true |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc/src/admin.rs | crates/rpc/rpc/src/admin.rs | use std::sync::Arc;
use alloy_genesis::ChainConfig;
use alloy_rpc_types_admin::{
EthInfo, EthPeerInfo, EthProtocolInfo, NodeInfo, PeerInfo, PeerNetworkInfo, PeerProtocolInfo,
Ports, ProtocolInfo,
};
use async_trait::async_trait;
use jsonrpsee::core::RpcResult;
use reth_chainspec::{EthChainSpec, EthereumHardfork, EthereumHardforks, ForkCondition};
use reth_network_api::{NetworkInfo, Peers};
use reth_network_peers::{id2pk, AnyNode, NodeRecord};
use reth_network_types::PeerKind;
use reth_rpc_api::AdminApiServer;
use reth_rpc_server_types::ToRpcResult;
/// `admin` API implementation.
///
/// This type provides the functionality for handling `admin` related requests.
pub struct AdminApi<N, ChainSpec> {
/// An interface to interact with the network
network: N,
/// The specification of the blockchain's configuration.
chain_spec: Arc<ChainSpec>,
}
impl<N, ChainSpec> AdminApi<N, ChainSpec> {
/// Creates a new instance of `AdminApi`.
pub const fn new(network: N, chain_spec: Arc<ChainSpec>) -> Self {
Self { network, chain_spec }
}
}
#[async_trait]
impl<N, ChainSpec> AdminApiServer for AdminApi<N, ChainSpec>
where
N: NetworkInfo + Peers + 'static,
ChainSpec: EthChainSpec + EthereumHardforks + Send + Sync + 'static,
{
/// Handler for `admin_addPeer`
fn add_peer(&self, record: NodeRecord) -> RpcResult<bool> {
self.network.add_peer_with_udp(record.id, record.tcp_addr(), record.udp_addr());
Ok(true)
}
/// Handler for `admin_removePeer`
fn remove_peer(&self, record: AnyNode) -> RpcResult<bool> {
self.network.remove_peer(record.peer_id(), PeerKind::Basic);
Ok(true)
}
/// Handler for `admin_addTrustedPeer`
fn add_trusted_peer(&self, record: AnyNode) -> RpcResult<bool> {
if let Some(record) = record.node_record() {
self.network.add_trusted_peer_with_udp(record.id, record.tcp_addr(), record.udp_addr())
}
self.network.add_trusted_peer_id(record.peer_id());
Ok(true)
}
/// Handler for `admin_removeTrustedPeer`
fn remove_trusted_peer(&self, record: AnyNode) -> RpcResult<bool> {
self.network.remove_peer(record.peer_id(), PeerKind::Trusted);
Ok(true)
}
/// Handler for `admin_peers`
async fn peers(&self) -> RpcResult<Vec<PeerInfo>> {
let peers = self.network.get_all_peers().await.to_rpc_result()?;
let mut infos = Vec::with_capacity(peers.len());
for peer in peers {
if let Ok(pk) = id2pk(peer.remote_id) {
infos.push(PeerInfo {
id: pk.to_string(),
name: peer.client_version.to_string(),
enode: peer.enode,
enr: peer.enr,
caps: peer
.capabilities
.capabilities()
.iter()
.map(|cap| cap.to_string())
.collect(),
network: PeerNetworkInfo {
remote_address: peer.remote_addr,
local_address: peer.local_addr.unwrap_or_else(|| self.network.local_addr()),
inbound: peer.direction.is_incoming(),
trusted: peer.kind.is_trusted(),
static_node: peer.kind.is_static(),
},
protocols: PeerProtocolInfo {
eth: Some(EthPeerInfo::Info(EthInfo {
version: peer.status.version as u64,
})),
snap: None,
other: Default::default(),
},
})
}
}
Ok(infos)
}
/// Handler for `admin_nodeInfo`
async fn node_info(&self) -> RpcResult<NodeInfo> {
let enode = self.network.local_node_record();
let status = self.network.network_status().await.to_rpc_result()?;
let mut config = ChainConfig {
chain_id: self.chain_spec.chain().id(),
terminal_total_difficulty_passed: self
.chain_spec
.final_paris_total_difficulty()
.is_some(),
terminal_total_difficulty: self
.chain_spec
.ethereum_fork_activation(EthereumHardfork::Paris)
.ttd(),
deposit_contract_address: self.chain_spec.deposit_contract().map(|dc| dc.address),
..self.chain_spec.genesis().config.clone()
};
// helper macro to set the block or time for a hardfork if known
macro_rules! set_block_or_time {
($config:expr, [$( $field:ident => $fork:ident,)*]) => {
$(
// don't overwrite if already set
if $config.$field.is_none() {
$config.$field = match self.chain_spec.ethereum_fork_activation(EthereumHardfork::$fork) {
ForkCondition::Block(block) => Some(block),
ForkCondition::TTD { fork_block, .. } => fork_block,
ForkCondition::Timestamp(ts) => Some(ts),
ForkCondition::Never => None,
};
}
)*
};
}
set_block_or_time!(config, [
homestead_block => Homestead,
dao_fork_block => Dao,
eip150_block => Tangerine,
eip155_block => SpuriousDragon,
eip158_block => SpuriousDragon,
byzantium_block => Byzantium,
constantinople_block => Constantinople,
petersburg_block => Petersburg,
istanbul_block => Istanbul,
muir_glacier_block => MuirGlacier,
berlin_block => Berlin,
london_block => London,
arrow_glacier_block => ArrowGlacier,
gray_glacier_block => GrayGlacier,
shanghai_time => Shanghai,
cancun_time => Cancun,
prague_time => Prague,
]);
Ok(NodeInfo {
id: id2pk(enode.id)
.map(|pk| pk.to_string())
.unwrap_or_else(|_| alloy_primitives::hex::encode(enode.id.as_slice())),
name: status.client_version,
enode: enode.to_string(),
enr: self.network.local_enr().to_string(),
ip: enode.address,
ports: Ports { discovery: enode.udp_port, listener: enode.tcp_port },
listen_addr: enode.tcp_addr(),
#[expect(deprecated)]
protocols: ProtocolInfo {
eth: Some(EthProtocolInfo {
network: status.eth_protocol_info.network,
genesis: status.eth_protocol_info.genesis,
config,
head: status.eth_protocol_info.head,
difficulty: None,
}),
snap: None,
},
})
}
/// Handler for `admin_peerEvents`
async fn subscribe_peer_events(
&self,
_pending: jsonrpsee::PendingSubscriptionSink,
) -> jsonrpsee::core::SubscriptionResult {
Err("admin_peerEvents is not implemented yet".into())
}
}
impl<N, ChainSpec> std::fmt::Debug for AdminApi<N, ChainSpec> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("AdminApi").finish_non_exhaustive()
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc/src/rpc.rs | crates/rpc/rpc/src/rpc.rs | use alloy_primitives::map::HashMap;
use alloy_rpc_types::RpcModules;
use jsonrpsee::core::RpcResult;
use reth_rpc_api::RpcApiServer;
use std::sync::Arc;
/// `rpc` API implementation.
///
/// This type provides the functionality for handling `rpc` requests
#[derive(Debug, Clone, Default)]
pub struct RPCApi {
/// The implementation of the Arc api
rpc_modules: Arc<RpcModules>,
}
impl RPCApi {
/// Return a new `RPCApi` struct, with given `module_map`
pub fn new(module_map: HashMap<String, String>) -> Self {
Self { rpc_modules: Arc::new(RpcModules::new(module_map)) }
}
}
impl RpcApiServer for RPCApi {
fn rpc_modules(&self) -> RpcResult<RpcModules> {
Ok(self.rpc_modules.as_ref().clone())
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc/src/txpool.rs | crates/rpc/rpc/src/txpool.rs | use core::fmt;
use std::collections::BTreeMap;
use alloy_consensus::Transaction;
use alloy_primitives::Address;
use alloy_rpc_types_txpool::{
TxpoolContent, TxpoolContentFrom, TxpoolInspect, TxpoolInspectSummary, TxpoolStatus,
};
use async_trait::async_trait;
use jsonrpsee::core::RpcResult;
use reth_primitives_traits::NodePrimitives;
use reth_rpc_api::TxPoolApiServer;
use reth_rpc_convert::{RpcConvert, RpcTypes};
use reth_rpc_eth_api::RpcTransaction;
use reth_transaction_pool::{
AllPoolTransactions, PoolConsensusTx, PoolTransaction, TransactionPool,
};
use tracing::trace;
/// `txpool` API implementation.
///
/// This type provides the functionality for handling `txpool` related requests.
#[derive(Clone)]
pub struct TxPoolApi<Pool, Eth> {
/// An interface to interact with the pool
pool: Pool,
tx_resp_builder: Eth,
}
impl<Pool, Eth> TxPoolApi<Pool, Eth> {
/// Creates a new instance of `TxpoolApi`.
pub const fn new(pool: Pool, tx_resp_builder: Eth) -> Self {
Self { pool, tx_resp_builder }
}
}
impl<Pool, Eth> TxPoolApi<Pool, Eth>
where
Pool: TransactionPool<Transaction: PoolTransaction<Consensus: Transaction>> + 'static,
Eth: RpcConvert<Primitives: NodePrimitives<SignedTx = PoolConsensusTx<Pool>>>,
{
fn content(&self) -> Result<TxpoolContent<RpcTransaction<Eth::Network>>, Eth::Error> {
#[inline]
fn insert<Tx, RpcTxB>(
tx: &Tx,
content: &mut BTreeMap<
Address,
BTreeMap<String, <RpcTxB::Network as RpcTypes>::TransactionResponse>,
>,
resp_builder: &RpcTxB,
) -> Result<(), RpcTxB::Error>
where
Tx: PoolTransaction,
RpcTxB: RpcConvert<Primitives: NodePrimitives<SignedTx = Tx::Consensus>>,
{
content.entry(tx.sender()).or_default().insert(
tx.nonce().to_string(),
resp_builder.fill_pending(tx.clone_into_consensus())?,
);
Ok(())
}
let AllPoolTransactions { pending, queued } = self.pool.all_transactions();
let mut content = TxpoolContent::default();
for pending in pending {
insert::<_, Eth>(&pending.transaction, &mut content.pending, &self.tx_resp_builder)?;
}
for queued in queued {
insert::<_, Eth>(&queued.transaction, &mut content.queued, &self.tx_resp_builder)?;
}
Ok(content)
}
}
#[async_trait]
impl<Pool, Eth> TxPoolApiServer<RpcTransaction<Eth::Network>> for TxPoolApi<Pool, Eth>
where
Pool: TransactionPool<Transaction: PoolTransaction<Consensus: Transaction>> + 'static,
Eth: RpcConvert<Primitives: NodePrimitives<SignedTx = PoolConsensusTx<Pool>>> + 'static,
{
/// Returns the number of transactions currently pending for inclusion in the next block(s), as
/// well as the ones that are being scheduled for future execution only.
/// Ref: [Here](https://geth.ethereum.org/docs/rpc/ns-txpool#txpool_status)
///
/// Handler for `txpool_status`
async fn txpool_status(&self) -> RpcResult<TxpoolStatus> {
trace!(target: "rpc::eth", "Serving txpool_status");
let (pending, queued) = self.pool.pending_and_queued_txn_count();
Ok(TxpoolStatus { pending: pending as u64, queued: queued as u64 })
}
/// Returns a summary of all the transactions currently pending for inclusion in the next
/// block(s), as well as the ones that are being scheduled for future execution only.
///
/// See [here](https://geth.ethereum.org/docs/rpc/ns-txpool#txpool_inspect) for more details
///
/// Handler for `txpool_inspect`
async fn txpool_inspect(&self) -> RpcResult<TxpoolInspect> {
trace!(target: "rpc::eth", "Serving txpool_inspect");
#[inline]
fn insert<T: PoolTransaction<Consensus: Transaction>>(
tx: &T,
inspect: &mut BTreeMap<Address, BTreeMap<String, TxpoolInspectSummary>>,
) {
let entry = inspect.entry(tx.sender()).or_default();
let tx = tx.clone_into_consensus();
entry.insert(tx.nonce().to_string(), tx.into_inner().into());
}
let AllPoolTransactions { pending, queued } = self.pool.all_transactions();
Ok(TxpoolInspect {
pending: pending.iter().fold(Default::default(), |mut acc, tx| {
insert(&tx.transaction, &mut acc);
acc
}),
queued: queued.iter().fold(Default::default(), |mut acc, tx| {
insert(&tx.transaction, &mut acc);
acc
}),
})
}
/// Retrieves the transactions contained within the txpool, returning pending as well as queued
/// transactions of this address, grouped by nonce.
///
/// See [here](https://geth.ethereum.org/docs/rpc/ns-txpool#txpool_contentFrom) for more details
/// Handler for `txpool_contentFrom`
async fn txpool_content_from(
&self,
from: Address,
) -> RpcResult<TxpoolContentFrom<RpcTransaction<Eth::Network>>> {
trace!(target: "rpc::eth", ?from, "Serving txpool_contentFrom");
Ok(self.content().map_err(Into::into)?.remove_from(&from))
}
/// Returns the details of all transactions currently pending for inclusion in the next
/// block(s), as well as the ones that are being scheduled for future execution only.
///
/// See [here](https://geth.ethereum.org/docs/rpc/ns-txpool#txpool_content) for more details
/// Handler for `txpool_content`
async fn txpool_content(&self) -> RpcResult<TxpoolContent<RpcTransaction<Eth::Network>>> {
trace!(target: "rpc::eth", "Serving txpool_content");
Ok(self.content().map_err(Into::into)?)
}
}
impl<Pool, Eth> fmt::Debug for TxPoolApi<Pool, Eth> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("TxpoolApi").finish_non_exhaustive()
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc/src/otterscan.rs | crates/rpc/rpc/src/otterscan.rs | use alloy_consensus::{BlockHeader, Typed2718};
use alloy_eips::{eip1898::LenientBlockNumberOrTag, BlockId};
use alloy_network::{ReceiptResponse, TransactionResponse};
use alloy_primitives::{Address, Bytes, TxHash, B256, U256};
use alloy_rpc_types_eth::{BlockTransactions, TransactionReceipt};
use alloy_rpc_types_trace::{
otterscan::{
BlockDetails, ContractCreator, InternalOperation, OperationType, OtsBlockTransactions,
OtsReceipt, OtsTransactionReceipt, TraceEntry, TransactionsWithReceipts,
},
parity::{Action, CreateAction, CreateOutput, TraceOutput},
};
use async_trait::async_trait;
use jsonrpsee::{core::RpcResult, types::ErrorObjectOwned};
use reth_rpc_api::{EthApiServer, OtterscanServer};
use reth_rpc_convert::RpcTxReq;
use reth_rpc_eth_api::{
helpers::{EthTransactions, TraceExt},
FullEthApiTypes, RpcBlock, RpcHeader, RpcReceipt, RpcTransaction,
};
use reth_rpc_eth_types::{utils::binary_search, EthApiError};
use reth_rpc_server_types::result::internal_rpc_err;
use revm::context_interface::result::ExecutionResult;
use revm_inspectors::{
tracing::{types::CallTraceNode, TracingInspectorConfig},
transfer::{TransferInspector, TransferKind},
};
const API_LEVEL: u64 = 8;
/// Otterscan API.
#[derive(Debug)]
pub struct OtterscanApi<Eth> {
eth: Eth,
}
impl<Eth> OtterscanApi<Eth> {
/// Creates a new instance of `Otterscan`.
pub const fn new(eth: Eth) -> Self {
Self { eth }
}
}
impl<Eth> OtterscanApi<Eth>
where
Eth: FullEthApiTypes,
{
/// Constructs a `BlockDetails` from a block and its receipts.
fn block_details(
&self,
block: RpcBlock<Eth::NetworkTypes>,
receipts: Vec<RpcReceipt<Eth::NetworkTypes>>,
) -> RpcResult<BlockDetails<RpcHeader<Eth::NetworkTypes>>> {
// blob fee is burnt, so we don't need to calculate it
let total_fees = receipts
.iter()
.map(|receipt| {
(receipt.gas_used() as u128).saturating_mul(receipt.effective_gas_price())
})
.sum::<u128>();
Ok(BlockDetails::new(block, Default::default(), U256::from(total_fees)))
}
}
#[async_trait]
impl<Eth> OtterscanServer<RpcTransaction<Eth::NetworkTypes>, RpcHeader<Eth::NetworkTypes>>
for OtterscanApi<Eth>
where
Eth: EthApiServer<
RpcTxReq<Eth::NetworkTypes>,
RpcTransaction<Eth::NetworkTypes>,
RpcBlock<Eth::NetworkTypes>,
RpcReceipt<Eth::NetworkTypes>,
RpcHeader<Eth::NetworkTypes>,
> + EthTransactions
+ TraceExt
+ 'static,
{
/// Handler for `ots_getHeaderByNumber` and `erigon_getHeaderByNumber`
async fn get_header_by_number(
&self,
block_number: LenientBlockNumberOrTag,
) -> RpcResult<Option<RpcHeader<Eth::NetworkTypes>>> {
self.eth.header_by_number(block_number.into()).await
}
/// Handler for `ots_hasCode`
async fn has_code(&self, address: Address, block_id: Option<BlockId>) -> RpcResult<bool> {
EthApiServer::get_code(&self.eth, address, block_id).await.map(|code| !code.is_empty())
}
/// Handler for `ots_getApiLevel`
async fn get_api_level(&self) -> RpcResult<u64> {
Ok(API_LEVEL)
}
/// Handler for `ots_getInternalOperations`
async fn get_internal_operations(&self, tx_hash: TxHash) -> RpcResult<Vec<InternalOperation>> {
let internal_operations = self
.eth
.spawn_trace_transaction_in_block_with_inspector(
tx_hash,
TransferInspector::new(false),
|_tx_info, inspector, _, _| Ok(inspector.into_transfers()),
)
.await
.map_err(Into::into)?
.map(|transfer_operations| {
transfer_operations
.iter()
.map(|op| InternalOperation {
from: op.from,
to: op.to,
value: op.value,
r#type: match op.kind {
TransferKind::Call => OperationType::OpTransfer,
TransferKind::Create => OperationType::OpCreate,
TransferKind::Create2 => OperationType::OpCreate2,
TransferKind::EofCreate => OperationType::OpEofCreate,
TransferKind::SelfDestruct => OperationType::OpSelfDestruct,
},
})
.collect::<Vec<_>>()
})
.unwrap_or_default();
Ok(internal_operations)
}
/// Handler for `ots_getTransactionError`
async fn get_transaction_error(&self, tx_hash: TxHash) -> RpcResult<Option<Bytes>> {
let maybe_revert = self
.eth
.spawn_replay_transaction(tx_hash, |_tx_info, res, _| match res.result {
ExecutionResult::Revert { output, .. } => Ok(Some(output)),
_ => Ok(None),
})
.await
.map(Option::flatten)
.map_err(Into::into)?;
Ok(maybe_revert)
}
/// Handler for `ots_traceTransaction`
async fn trace_transaction(&self, tx_hash: TxHash) -> RpcResult<Option<Vec<TraceEntry>>> {
let traces = self
.eth
.spawn_trace_transaction_in_block(
tx_hash,
TracingInspectorConfig::default_parity(),
move |_tx_info, inspector, _, _| Ok(inspector.into_traces().into_nodes()),
)
.await
.map_err(Into::into)?
.map(|traces| {
traces
.into_iter()
.map(|CallTraceNode { trace, .. }| TraceEntry {
r#type: if trace.is_selfdestruct() {
"SELFDESTRUCT".to_string()
} else {
trace.kind.to_string()
},
depth: trace.depth as u32,
from: trace.caller,
to: trace.address,
value: Some(trace.value),
input: trace.data,
output: trace.output,
})
.collect::<Vec<_>>()
});
Ok(traces)
}
/// Handler for `ots_getBlockDetails`
async fn get_block_details(
&self,
block_number: LenientBlockNumberOrTag,
) -> RpcResult<BlockDetails<RpcHeader<Eth::NetworkTypes>>> {
let block_number = block_number.into_inner();
let block = self.eth.block_by_number(block_number, true);
let block_id = block_number.into();
let receipts = self.eth.block_receipts(block_id);
let (block, receipts) = futures::try_join!(block, receipts)?;
self.block_details(
block.ok_or(EthApiError::HeaderNotFound(block_id))?,
receipts.ok_or(EthApiError::ReceiptsNotFound(block_id))?,
)
}
/// Handler for `ots_getBlockDetailsByHash`
async fn get_block_details_by_hash(
&self,
block_hash: B256,
) -> RpcResult<BlockDetails<RpcHeader<Eth::NetworkTypes>>> {
let block = self.eth.block_by_hash(block_hash, true);
let block_id = block_hash.into();
let receipts = self.eth.block_receipts(block_id);
let (block, receipts) = futures::try_join!(block, receipts)?;
self.block_details(
block.ok_or(EthApiError::HeaderNotFound(block_id))?,
receipts.ok_or(EthApiError::ReceiptsNotFound(block_id))?,
)
}
/// Handler for `ots_getBlockTransactions`
async fn get_block_transactions(
&self,
block_number: LenientBlockNumberOrTag,
page_number: usize,
page_size: usize,
) -> RpcResult<
OtsBlockTransactions<RpcTransaction<Eth::NetworkTypes>, RpcHeader<Eth::NetworkTypes>>,
> {
let block_number = block_number.into_inner();
// retrieve full block and its receipts
let block = self.eth.block_by_number(block_number, true);
let block_id = block_number.into();
let receipts = self.eth.block_receipts(block_id);
let (block, receipts) = futures::try_join!(block, receipts)?;
let mut block = block.ok_or(EthApiError::HeaderNotFound(block_id))?;
let mut receipts = receipts.ok_or(EthApiError::ReceiptsNotFound(block_id))?;
// check if the number of transactions matches the number of receipts
let tx_len = block.transactions.len();
if tx_len != receipts.len() {
return Err(internal_rpc_err(
"the number of transactions does not match the number of receipts",
))
}
// make sure the block is full
let BlockTransactions::Full(transactions) = &mut block.transactions else {
return Err(internal_rpc_err("block is not full"));
};
// Crop page
let page_end = tx_len.saturating_sub(page_number * page_size);
let page_start = page_end.saturating_sub(page_size);
// Crop transactions
*transactions = transactions.drain(page_start..page_end).collect::<Vec<_>>();
// Crop receipts and transform them into OtsTransactionReceipt
let timestamp = Some(block.header.timestamp());
let receipts = receipts
.drain(page_start..page_end)
.zip(transactions.iter().map(Typed2718::ty))
.map(|(receipt, tx_ty)| {
let inner = OtsReceipt {
status: receipt.status(),
cumulative_gas_used: receipt.cumulative_gas_used(),
logs: None,
logs_bloom: None,
r#type: tx_ty,
};
let receipt = TransactionReceipt {
inner,
transaction_hash: receipt.transaction_hash(),
transaction_index: receipt.transaction_index(),
block_hash: receipt.block_hash(),
block_number: receipt.block_number(),
gas_used: receipt.gas_used(),
effective_gas_price: receipt.effective_gas_price(),
blob_gas_used: receipt.blob_gas_used(),
blob_gas_price: receipt.blob_gas_price(),
from: receipt.from(),
to: receipt.to(),
contract_address: receipt.contract_address(),
};
OtsTransactionReceipt { receipt, timestamp }
})
.collect();
// use `transaction_count` to indicate the paginate information
let mut block = OtsBlockTransactions { fullblock: block.into(), receipts };
block.fullblock.transaction_count = tx_len;
Ok(block)
}
/// Handler for `ots_searchTransactionsBefore`
async fn search_transactions_before(
&self,
_address: Address,
_block_number: LenientBlockNumberOrTag,
_page_size: usize,
) -> RpcResult<TransactionsWithReceipts> {
Err(internal_rpc_err("unimplemented"))
}
/// Handler for `ots_searchTransactionsAfter`
async fn search_transactions_after(
&self,
_address: Address,
_block_number: LenientBlockNumberOrTag,
_page_size: usize,
) -> RpcResult<TransactionsWithReceipts> {
Err(internal_rpc_err("unimplemented"))
}
/// Handler for `ots_getTransactionBySenderAndNonce`
async fn get_transaction_by_sender_and_nonce(
&self,
sender: Address,
nonce: u64,
) -> RpcResult<Option<TxHash>> {
Ok(self
.eth
.get_transaction_by_sender_and_nonce(sender, nonce, false)
.await
.map_err(Into::into)?
.map(|tx| tx.tx_hash()))
}
/// Handler for `ots_getContractCreator`
async fn get_contract_creator(&self, address: Address) -> RpcResult<Option<ContractCreator>> {
if !self.has_code(address, None).await? {
return Ok(None);
}
let num = binary_search::<_, _, ErrorObjectOwned>(
1,
self.eth.block_number()?.saturating_to(),
|mid| {
Box::pin(async move {
Ok(!EthApiServer::get_code(&self.eth, address, Some(mid.into()))
.await?
.is_empty())
})
},
)
.await?;
let traces = self
.eth
.trace_block_with(
num.into(),
None,
TracingInspectorConfig::default_parity(),
|tx_info, mut ctx| {
Ok(ctx
.take_inspector()
.into_parity_builder()
.into_localized_transaction_traces(tx_info))
},
)
.await
.map_err(Into::into)?
.map(|traces| {
traces
.into_iter()
.flatten()
.map(|tx_trace| {
let trace = tx_trace.trace;
Ok(match (trace.action, trace.result, trace.error) {
(
Action::Create(CreateAction { from: creator, .. }),
Some(TraceOutput::Create(CreateOutput {
address: contract, ..
})),
None,
) if contract == address => Some(ContractCreator {
hash: tx_trace
.transaction_hash
.ok_or(EthApiError::TransactionNotFound)?,
creator,
}),
_ => None,
})
})
.filter_map(Result::transpose)
.collect::<Result<Vec<_>, EthApiError>>()
})
.transpose()?;
// A contract maybe created and then destroyed in multiple transactions, here we
// return the first found transaction, this behavior is consistent with etherscan's
let found = traces.and_then(|traces| traces.first().copied());
Ok(found)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc/src/reth.rs | crates/rpc/rpc/src/reth.rs | use std::{collections::HashMap, future::Future, sync::Arc};
use alloy_eips::BlockId;
use alloy_primitives::{Address, U256};
use async_trait::async_trait;
use futures::StreamExt;
use jsonrpsee::{core::RpcResult, PendingSubscriptionSink, SubscriptionMessage, SubscriptionSink};
use jsonrpsee_types::ErrorObject;
use reth_chain_state::{CanonStateNotificationStream, CanonStateSubscriptions};
use reth_errors::RethResult;
use reth_primitives_traits::NodePrimitives;
use reth_rpc_api::RethApiServer;
use reth_rpc_eth_types::{EthApiError, EthResult};
use reth_rpc_server_types::result::internal_rpc_err;
use reth_storage_api::{BlockReaderIdExt, ChangeSetReader, StateProviderFactory};
use reth_tasks::TaskSpawner;
use tokio::sync::oneshot;
/// `reth` API implementation.
///
/// This type provides the functionality for handling `reth` prototype RPC requests.
pub struct RethApi<Provider> {
inner: Arc<RethApiInner<Provider>>,
}
// === impl RethApi ===
impl<Provider> RethApi<Provider> {
/// The provider that can interact with the chain.
pub fn provider(&self) -> &Provider {
&self.inner.provider
}
/// Create a new instance of the [`RethApi`]
pub fn new(provider: Provider, task_spawner: Box<dyn TaskSpawner>) -> Self {
let inner = Arc::new(RethApiInner { provider, task_spawner });
Self { inner }
}
}
impl<Provider> RethApi<Provider>
where
Provider: BlockReaderIdExt + ChangeSetReader + StateProviderFactory + 'static,
{
/// Executes the future on a new blocking task.
async fn on_blocking_task<C, F, R>(&self, c: C) -> EthResult<R>
where
C: FnOnce(Self) -> F,
F: Future<Output = EthResult<R>> + Send + 'static,
R: Send + 'static,
{
let (tx, rx) = oneshot::channel();
let this = self.clone();
let f = c(this);
self.inner.task_spawner.spawn_blocking(Box::pin(async move {
let res = f.await;
let _ = tx.send(res);
}));
rx.await.map_err(|_| EthApiError::InternalEthError)?
}
/// Returns a map of addresses to changed account balanced for a particular block.
pub async fn balance_changes_in_block(
&self,
block_id: BlockId,
) -> EthResult<HashMap<Address, U256>> {
self.on_blocking_task(|this| async move { this.try_balance_changes_in_block(block_id) })
.await
}
fn try_balance_changes_in_block(&self, block_id: BlockId) -> EthResult<HashMap<Address, U256>> {
let Some(block_number) = self.provider().block_number_for_id(block_id)? else {
return Err(EthApiError::HeaderNotFound(block_id))
};
let state = self.provider().state_by_block_id(block_id)?;
let accounts_before = self.provider().account_block_changeset(block_number)?;
let hash_map = accounts_before.iter().try_fold(
HashMap::default(),
|mut hash_map, account_before| -> RethResult<_> {
let current_balance = state.account_balance(&account_before.address)?;
let prev_balance = account_before.info.map(|info| info.balance);
if current_balance != prev_balance {
hash_map.insert(account_before.address, current_balance.unwrap_or_default());
}
Ok(hash_map)
},
)?;
Ok(hash_map)
}
}
#[async_trait]
impl<Provider> RethApiServer for RethApi<Provider>
where
Provider: BlockReaderIdExt
+ ChangeSetReader
+ StateProviderFactory
+ CanonStateSubscriptions
+ 'static,
{
/// Handler for `reth_getBalanceChangesInBlock`
async fn reth_get_balance_changes_in_block(
&self,
block_id: BlockId,
) -> RpcResult<HashMap<Address, U256>> {
Ok(Self::balance_changes_in_block(self, block_id).await?)
}
/// Handler for `reth_subscribeChainNotifications`
async fn reth_subscribe_chain_notifications(
&self,
pending: PendingSubscriptionSink,
) -> jsonrpsee::core::SubscriptionResult {
let sink = pending.accept().await?;
let stream = self.provider().canonical_state_stream();
self.inner.task_spawner.spawn(Box::pin(async move {
let _ = pipe_from_stream(sink, stream).await;
}));
Ok(())
}
}
/// Pipes all stream items to the subscription sink.
async fn pipe_from_stream<N: NodePrimitives>(
sink: SubscriptionSink,
mut stream: CanonStateNotificationStream<N>,
) -> Result<(), ErrorObject<'static>> {
loop {
tokio::select! {
_ = sink.closed() => {
// connection dropped
break Ok(())
}
maybe_item = stream.next() => {
let item = match maybe_item {
Some(item) => item,
None => {
// stream ended
break Ok(())
},
};
let msg = SubscriptionMessage::new(sink.method_name(), sink.subscription_id(), &item)
.map_err(|e| internal_rpc_err(e.to_string()))?;
if sink.send(msg).await.is_err() {
break Ok(());
}
}
}
}
}
impl<Provider> std::fmt::Debug for RethApi<Provider> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("RethApi").finish_non_exhaustive()
}
}
impl<Provider> Clone for RethApi<Provider> {
fn clone(&self) -> Self {
Self { inner: Arc::clone(&self.inner) }
}
}
struct RethApiInner<Provider> {
/// The provider that can interact with the chain.
provider: Provider,
/// The type that can spawn tasks which would otherwise block.
task_spawner: Box<dyn TaskSpawner>,
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc/src/net.rs | crates/rpc/rpc/src/net.rs | use alloy_primitives::U64;
use jsonrpsee::core::RpcResult as Result;
use reth_network_api::PeersInfo;
use reth_rpc_api::NetApiServer;
use reth_rpc_eth_api::helpers::EthApiSpec;
/// `Net` API implementation.
///
/// This type provides the functionality for handling `net` related requests.
pub struct NetApi<Net, Eth> {
/// An interface to interact with the network
network: Net,
/// The implementation of `eth` API
eth: Eth,
}
// === impl NetApi ===
impl<Net, Eth> NetApi<Net, Eth> {
/// Returns a new instance with the given network and eth interface implementations
pub const fn new(network: Net, eth: Eth) -> Self {
Self { network, eth }
}
}
/// Net rpc implementation
impl<Net, Eth> NetApiServer for NetApi<Net, Eth>
where
Net: PeersInfo + 'static,
Eth: EthApiSpec + 'static,
{
/// Handler for `net_version`
fn version(&self) -> Result<String> {
// Note: net_version is numeric: <https://github.com/paradigmxyz/reth/issues/5569>
Ok(self.eth.chain_id().to::<u64>().to_string())
}
/// Handler for `net_peerCount`
fn peer_count(&self) -> Result<U64> {
Ok(U64::from(self.network.num_connected_peers()))
}
/// Handler for `net_listening`
fn is_listening(&self) -> Result<bool> {
Ok(true)
}
}
impl<Net, Eth> std::fmt::Debug for NetApi<Net, Eth> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("NetApi").finish_non_exhaustive()
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc/src/eth/builder.rs | crates/rpc/rpc/src/eth/builder.rs | //! `EthApiBuilder` implementation
use crate::{eth::core::EthApiInner, EthApi};
use alloy_network::Ethereum;
use reth_chain_state::CanonStateSubscriptions;
use reth_chainspec::ChainSpecProvider;
use reth_primitives_traits::HeaderTy;
use reth_rpc_convert::{RpcConvert, RpcConverter};
use reth_rpc_eth_api::{
helpers::pending_block::PendingEnvBuilder, node::RpcNodeCoreAdapter, RpcNodeCore,
};
use reth_rpc_eth_types::{
builder::config::PendingBlockKind, fee_history::fee_history_cache_new_blocks_task,
receipt::EthReceiptConverter, EthStateCache, EthStateCacheConfig, FeeHistoryCache,
FeeHistoryCacheConfig, ForwardConfig, GasCap, GasPriceOracle, GasPriceOracleConfig,
};
use reth_rpc_server_types::constants::{
DEFAULT_ETH_PROOF_WINDOW, DEFAULT_MAX_SIMULATE_BLOCKS, DEFAULT_PROOF_PERMITS,
};
use reth_tasks::{pool::BlockingTaskPool, TaskSpawner, TokioTaskExecutor};
use std::sync::Arc;
/// A helper to build the `EthApi` handler instance.
///
/// This builder type contains all settings to create an [`EthApiInner`] or an [`EthApi`] instance
/// directly.
#[derive(Debug)]
pub struct EthApiBuilder<N: RpcNodeCore, Rpc, NextEnv = ()> {
components: N,
rpc_converter: Rpc,
gas_cap: GasCap,
max_simulate_blocks: u64,
eth_proof_window: u64,
fee_history_cache_config: FeeHistoryCacheConfig,
proof_permits: usize,
eth_state_cache_config: EthStateCacheConfig,
eth_cache: Option<EthStateCache<N::Primitives>>,
gas_oracle_config: GasPriceOracleConfig,
gas_oracle: Option<GasPriceOracle<N::Provider>>,
blocking_task_pool: Option<BlockingTaskPool>,
task_spawner: Box<dyn TaskSpawner + 'static>,
next_env: NextEnv,
max_batch_size: usize,
pending_block_kind: PendingBlockKind,
raw_tx_forwarder: ForwardConfig,
}
impl<Provider, Pool, Network, EvmConfig, ChainSpec>
EthApiBuilder<
RpcNodeCoreAdapter<Provider, Pool, Network, EvmConfig>,
RpcConverter<Ethereum, EvmConfig, EthReceiptConverter<ChainSpec>>,
>
where
RpcNodeCoreAdapter<Provider, Pool, Network, EvmConfig>:
RpcNodeCore<Provider: ChainSpecProvider<ChainSpec = ChainSpec>, Evm = EvmConfig>,
{
/// Creates a new `EthApiBuilder` instance.
pub fn new(provider: Provider, pool: Pool, network: Network, evm_config: EvmConfig) -> Self {
Self::new_with_components(RpcNodeCoreAdapter::new(provider, pool, network, evm_config))
}
}
impl<N: RpcNodeCore, Rpc, NextEnv> EthApiBuilder<N, Rpc, NextEnv> {
/// Converts the RPC converter type of this builder
pub fn map_converter<F, R>(self, f: F) -> EthApiBuilder<N, R, NextEnv>
where
F: FnOnce(Rpc) -> R,
{
let Self {
components,
rpc_converter,
gas_cap,
max_simulate_blocks,
eth_proof_window,
fee_history_cache_config,
proof_permits,
eth_state_cache_config,
eth_cache,
gas_oracle_config,
gas_oracle,
blocking_task_pool,
task_spawner,
next_env,
max_batch_size,
pending_block_kind,
raw_tx_forwarder,
} = self;
EthApiBuilder {
components,
rpc_converter: f(rpc_converter),
gas_cap,
max_simulate_blocks,
eth_proof_window,
fee_history_cache_config,
proof_permits,
eth_state_cache_config,
eth_cache,
gas_oracle_config,
gas_oracle,
blocking_task_pool,
task_spawner,
next_env,
max_batch_size,
pending_block_kind,
raw_tx_forwarder,
}
}
}
impl<N, ChainSpec> EthApiBuilder<N, RpcConverter<Ethereum, N::Evm, EthReceiptConverter<ChainSpec>>>
where
N: RpcNodeCore<Provider: ChainSpecProvider<ChainSpec = ChainSpec>>,
{
/// Creates a new `EthApiBuilder` instance with the provided components.
pub fn new_with_components(components: N) -> Self {
let rpc_converter =
RpcConverter::new(EthReceiptConverter::new(components.provider().chain_spec()));
Self {
components,
rpc_converter,
eth_cache: None,
gas_oracle: None,
gas_cap: GasCap::default(),
max_simulate_blocks: DEFAULT_MAX_SIMULATE_BLOCKS,
eth_proof_window: DEFAULT_ETH_PROOF_WINDOW,
blocking_task_pool: None,
fee_history_cache_config: FeeHistoryCacheConfig::default(),
proof_permits: DEFAULT_PROOF_PERMITS,
task_spawner: TokioTaskExecutor::default().boxed(),
gas_oracle_config: Default::default(),
eth_state_cache_config: Default::default(),
next_env: Default::default(),
max_batch_size: 1,
pending_block_kind: PendingBlockKind::Full,
raw_tx_forwarder: ForwardConfig::default(),
}
}
}
impl<N, Rpc, NextEnv> EthApiBuilder<N, Rpc, NextEnv>
where
N: RpcNodeCore,
{
/// Configures the task spawner used to spawn additional tasks.
pub fn task_spawner(mut self, spawner: impl TaskSpawner + 'static) -> Self {
self.task_spawner = Box::new(spawner);
self
}
/// Changes the configured converter.
pub fn with_rpc_converter<RpcNew>(
self,
rpc_converter: RpcNew,
) -> EthApiBuilder<N, RpcNew, NextEnv> {
let Self {
components,
rpc_converter: _,
gas_cap,
max_simulate_blocks,
eth_proof_window,
fee_history_cache_config,
proof_permits,
eth_state_cache_config,
eth_cache,
gas_oracle,
blocking_task_pool,
task_spawner,
gas_oracle_config,
next_env,
max_batch_size,
pending_block_kind,
raw_tx_forwarder,
} = self;
EthApiBuilder {
components,
rpc_converter,
gas_cap,
max_simulate_blocks,
eth_proof_window,
fee_history_cache_config,
proof_permits,
eth_state_cache_config,
eth_cache,
gas_oracle,
blocking_task_pool,
task_spawner,
gas_oracle_config,
next_env,
max_batch_size,
pending_block_kind,
raw_tx_forwarder,
}
}
/// Changes the configured pending environment builder.
pub fn with_pending_env_builder<NextEnvNew>(
self,
next_env: NextEnvNew,
) -> EthApiBuilder<N, Rpc, NextEnvNew> {
let Self {
components,
rpc_converter,
gas_cap,
max_simulate_blocks,
eth_proof_window,
fee_history_cache_config,
proof_permits,
eth_state_cache_config,
eth_cache,
gas_oracle,
blocking_task_pool,
task_spawner,
gas_oracle_config,
next_env: _,
max_batch_size,
pending_block_kind,
raw_tx_forwarder,
} = self;
EthApiBuilder {
components,
rpc_converter,
gas_cap,
max_simulate_blocks,
eth_proof_window,
fee_history_cache_config,
proof_permits,
eth_state_cache_config,
eth_cache,
gas_oracle,
blocking_task_pool,
task_spawner,
gas_oracle_config,
next_env,
max_batch_size,
pending_block_kind,
raw_tx_forwarder,
}
}
/// Sets `eth_cache` config for the cache that will be used if no [`EthStateCache`] is
/// configured.
pub const fn eth_state_cache_config(
mut self,
eth_state_cache_config: EthStateCacheConfig,
) -> Self {
self.eth_state_cache_config = eth_state_cache_config;
self
}
/// Sets `eth_cache` instance
pub fn eth_cache(mut self, eth_cache: EthStateCache<N::Primitives>) -> Self {
self.eth_cache = Some(eth_cache);
self
}
/// Sets `gas_oracle` config for the gas oracle that will be used if no [`GasPriceOracle`] is
/// configured.
pub const fn gas_oracle_config(mut self, gas_oracle_config: GasPriceOracleConfig) -> Self {
self.gas_oracle_config = gas_oracle_config;
self
}
/// Sets `gas_oracle` instance
pub fn gas_oracle(mut self, gas_oracle: GasPriceOracle<N::Provider>) -> Self {
self.gas_oracle = Some(gas_oracle);
self
}
/// Sets the gas cap.
pub const fn gas_cap(mut self, gas_cap: GasCap) -> Self {
self.gas_cap = gas_cap;
self
}
/// Sets the maximum number of blocks for `eth_simulateV1`.
pub const fn max_simulate_blocks(mut self, max_simulate_blocks: u64) -> Self {
self.max_simulate_blocks = max_simulate_blocks;
self
}
/// Sets the maximum number of blocks into the past for generating state proofs.
pub const fn eth_proof_window(mut self, eth_proof_window: u64) -> Self {
self.eth_proof_window = eth_proof_window;
self
}
/// Sets the blocking task pool.
pub fn blocking_task_pool(mut self, blocking_task_pool: BlockingTaskPool) -> Self {
self.blocking_task_pool = Some(blocking_task_pool);
self
}
/// Sets the fee history cache.
pub const fn fee_history_cache_config(
mut self,
fee_history_cache_config: FeeHistoryCacheConfig,
) -> Self {
self.fee_history_cache_config = fee_history_cache_config;
self
}
/// Sets the proof permits.
pub const fn proof_permits(mut self, proof_permits: usize) -> Self {
self.proof_permits = proof_permits;
self
}
/// Sets the max batch size for batching transaction insertions.
pub const fn max_batch_size(mut self, max_batch_size: usize) -> Self {
self.max_batch_size = max_batch_size;
self
}
/// Sets the pending block kind
pub const fn pending_block_kind(mut self, pending_block_kind: PendingBlockKind) -> Self {
self.pending_block_kind = pending_block_kind;
self
}
/// Sets the raw transaction forwarder.
pub fn raw_tx_forwarder(mut self, tx_forwarder: ForwardConfig) -> Self {
self.raw_tx_forwarder = tx_forwarder;
self
}
/// Builds the [`EthApiInner`] instance.
///
/// If not configured, this will spawn the cache backend: [`EthStateCache::spawn`].
///
/// # Panics
///
/// This function panics if the blocking task pool cannot be built.
/// This will panic if called outside the context of a Tokio runtime.
pub fn build_inner(self) -> EthApiInner<N, Rpc>
where
Rpc: RpcConvert,
NextEnv: PendingEnvBuilder<N::Evm>,
{
let Self {
components,
rpc_converter,
eth_state_cache_config,
gas_oracle_config,
eth_cache,
gas_oracle,
gas_cap,
max_simulate_blocks,
eth_proof_window,
blocking_task_pool,
fee_history_cache_config,
proof_permits,
task_spawner,
next_env,
max_batch_size,
pending_block_kind,
raw_tx_forwarder,
} = self;
let provider = components.provider().clone();
let eth_cache = eth_cache
.unwrap_or_else(|| EthStateCache::spawn(provider.clone(), eth_state_cache_config));
let gas_oracle = gas_oracle.unwrap_or_else(|| {
GasPriceOracle::new(provider.clone(), gas_oracle_config, eth_cache.clone())
});
let fee_history_cache =
FeeHistoryCache::<HeaderTy<N::Primitives>>::new(fee_history_cache_config);
let new_canonical_blocks = provider.canonical_state_stream();
let fhc = fee_history_cache.clone();
let cache = eth_cache.clone();
task_spawner.spawn_critical(
"cache canonical blocks for fee history task",
Box::pin(async move {
fee_history_cache_new_blocks_task(fhc, new_canonical_blocks, provider, cache).await;
}),
);
EthApiInner::new(
components,
eth_cache,
gas_oracle,
gas_cap,
max_simulate_blocks,
eth_proof_window,
blocking_task_pool.unwrap_or_else(|| {
BlockingTaskPool::build().expect("failed to build blocking task pool")
}),
fee_history_cache,
task_spawner,
proof_permits,
rpc_converter,
next_env,
max_batch_size,
pending_block_kind,
raw_tx_forwarder.forwarder_client(),
)
}
/// Builds the [`EthApi`] instance.
///
/// If not configured, this will spawn the cache backend: [`EthStateCache::spawn`].
///
/// # Panics
///
/// This function panics if the blocking task pool cannot be built.
/// This will panic if called outside the context of a Tokio runtime.
pub fn build(self) -> EthApi<N, Rpc>
where
Rpc: RpcConvert,
NextEnv: PendingEnvBuilder<N::Evm>,
{
EthApi { inner: Arc::new(self.build_inner()) }
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc/src/eth/bundle.rs | crates/rpc/rpc/src/eth/bundle.rs | //! `Eth` bundle implementation and helpers.
use alloy_consensus::{EnvKzgSettings, Transaction as _};
use alloy_eips::eip7840::BlobParams;
use alloy_primitives::{Keccak256, U256};
use alloy_rpc_types_mev::{EthCallBundle, EthCallBundleResponse, EthCallBundleTransactionResult};
use jsonrpsee::core::RpcResult;
use reth_chainspec::{ChainSpecProvider, EthChainSpec};
use reth_evm::{ConfigureEvm, Evm};
use reth_primitives_traits::SignedTransaction;
use reth_revm::{database::StateProviderDatabase, db::CacheDB};
use reth_rpc_eth_api::{
helpers::{Call, EthTransactions, LoadPendingBlock},
EthCallBundleApiServer, FromEthApiError, FromEvmError,
};
use reth_rpc_eth_types::{utils::recover_raw_transaction, EthApiError, RpcInvalidTransactionError};
use reth_tasks::pool::BlockingTaskGuard;
use reth_transaction_pool::{
EthBlobTransactionSidecar, EthPoolTransaction, PoolPooledTx, PoolTransaction, TransactionPool,
};
use revm::{context_interface::result::ResultAndState, DatabaseCommit, DatabaseRef};
use std::sync::Arc;
/// `Eth` bundle implementation.
pub struct EthBundle<Eth> {
/// All nested fields bundled together.
inner: Arc<EthBundleInner<Eth>>,
}
impl<Eth> EthBundle<Eth> {
/// Create a new `EthBundle` instance.
pub fn new(eth_api: Eth, blocking_task_guard: BlockingTaskGuard) -> Self {
Self { inner: Arc::new(EthBundleInner { eth_api, blocking_task_guard }) }
}
/// Access the underlying `Eth` API.
pub fn eth_api(&self) -> &Eth {
&self.inner.eth_api
}
}
impl<Eth> EthBundle<Eth>
where
Eth: EthTransactions + LoadPendingBlock + Call + 'static,
{
/// Simulates a bundle of transactions at the top of a given block number with the state of
/// another (or the same) block. This can be used to simulate future blocks with the current
/// state, or it can be used to simulate a past block. The sender is responsible for signing the
/// transactions and using the correct nonce and ensuring validity
pub async fn call_bundle(
&self,
bundle: EthCallBundle,
) -> Result<EthCallBundleResponse, Eth::Error> {
let EthCallBundle {
txs,
block_number,
coinbase,
state_block_number,
timeout: _,
timestamp,
gas_limit,
difficulty,
base_fee,
..
} = bundle;
if txs.is_empty() {
return Err(EthApiError::InvalidParams(
EthBundleError::EmptyBundleTransactions.to_string(),
)
.into())
}
if block_number == 0 {
return Err(EthApiError::InvalidParams(
EthBundleError::BundleMissingBlockNumber.to_string(),
)
.into())
}
let transactions = txs
.into_iter()
.map(|tx| recover_raw_transaction::<PoolPooledTx<Eth::Pool>>(&tx))
.collect::<Result<Vec<_>, _>>()?
.into_iter()
.collect::<Vec<_>>();
let block_id: alloy_rpc_types_eth::BlockId = state_block_number.into();
// Note: the block number is considered the `parent` block: <https://github.com/flashbots/mev-geth/blob/fddf97beec5877483f879a77b7dea2e58a58d653/internal/ethapi/api.go#L2104>
let (mut evm_env, at) = self.eth_api().evm_env_at(block_id).await?;
if let Some(coinbase) = coinbase {
evm_env.block_env.beneficiary = coinbase;
}
// need to adjust the timestamp for the next block
if let Some(timestamp) = timestamp {
evm_env.block_env.timestamp = U256::from(timestamp);
} else {
let increment: u64 = if cfg!(feature = "timestamp-in-seconds") { 12 } else { 12000 };
evm_env.block_env.timestamp += U256::from(increment);
}
if let Some(difficulty) = difficulty {
evm_env.block_env.difficulty = U256::from(difficulty);
}
// Validate that the bundle does not contain more than MAX_BLOB_NUMBER_PER_BLOCK blob
// transactions.
let blob_gas_used = transactions.iter().filter_map(|tx| tx.blob_gas_used()).sum::<u64>();
if blob_gas_used > 0 {
let blob_params = self
.eth_api()
.provider()
.chain_spec()
.blob_params_at_timestamp(if cfg!(feature = "timestamp-in-seconds") {
evm_env.block_env.timestamp.saturating_to()
} else {
(evm_env.block_env.timestamp / U256::from(1000)).saturating_to()
})
.unwrap_or_else(BlobParams::cancun);
if transactions.iter().filter_map(|tx| tx.blob_gas_used()).sum::<u64>() >
blob_params.max_blob_gas_per_block()
{
return Err(EthApiError::InvalidParams(
EthBundleError::Eip4844BlobGasExceeded(blob_params.max_blob_gas_per_block())
.to_string(),
)
.into())
}
}
// default to call gas limit unless user requests a smaller limit
evm_env.block_env.gas_limit = self.inner.eth_api.call_gas_limit();
if let Some(gas_limit) = gas_limit {
if gas_limit > evm_env.block_env.gas_limit {
return Err(
EthApiError::InvalidTransaction(RpcInvalidTransactionError::GasTooHigh).into()
)
}
evm_env.block_env.gas_limit = gas_limit;
}
if let Some(base_fee) = base_fee {
evm_env.block_env.basefee = base_fee.try_into().unwrap_or(u64::MAX);
}
let state_block_number = evm_env.block_env.number;
// use the block number of the request
evm_env.block_env.number = U256::from(block_number);
let eth_api = self.eth_api().clone();
self.eth_api()
.spawn_with_state_at_block(at, move |state| {
let coinbase = evm_env.block_env.beneficiary;
let basefee = evm_env.block_env.basefee;
let db = CacheDB::new(StateProviderDatabase::new(state));
let initial_coinbase = db
.basic_ref(coinbase)
.map_err(Eth::Error::from_eth_err)?
.map(|acc| acc.balance)
.unwrap_or_default();
let mut coinbase_balance_before_tx = initial_coinbase;
let mut coinbase_balance_after_tx = initial_coinbase;
let mut total_gas_used = 0u64;
let mut total_gas_fees = U256::ZERO;
let mut hasher = Keccak256::new();
let mut evm = eth_api.evm_config().evm_with_env(db, evm_env);
let mut results = Vec::with_capacity(transactions.len());
let mut transactions = transactions.into_iter().peekable();
while let Some(tx) = transactions.next() {
let signer = tx.signer();
let tx = {
let mut tx = <Eth::Pool as TransactionPool>::Transaction::from_pooled(tx);
if let EthBlobTransactionSidecar::Present(sidecar) = tx.take_blob() {
tx.validate_blob(&sidecar, EnvKzgSettings::Default.get()).map_err(
|e| {
Eth::Error::from_eth_err(EthApiError::InvalidParams(
e.to_string(),
))
},
)?;
}
tx.into_consensus()
};
hasher.update(*tx.tx_hash());
let ResultAndState { result, state } = evm
.transact(eth_api.evm_config().tx_env(&tx))
.map_err(Eth::Error::from_evm_err)?;
let gas_price = tx
.effective_tip_per_gas(basefee)
.expect("fee is always valid; execution succeeded");
let gas_used = result.gas_used();
total_gas_used += gas_used;
let gas_fees = U256::from(gas_used) * U256::from(gas_price);
total_gas_fees += gas_fees;
// coinbase is always present in the result state
coinbase_balance_after_tx =
state.get(&coinbase).map(|acc| acc.info.balance).unwrap_or_default();
let coinbase_diff =
coinbase_balance_after_tx.saturating_sub(coinbase_balance_before_tx);
let eth_sent_to_coinbase = coinbase_diff.saturating_sub(gas_fees);
// update the coinbase balance
coinbase_balance_before_tx = coinbase_balance_after_tx;
// set the return data for the response
let (value, revert) = if result.is_success() {
let value = result.into_output().unwrap_or_default();
(Some(value), None)
} else {
let revert = result.into_output().unwrap_or_default();
(None, Some(revert))
};
let tx_res = EthCallBundleTransactionResult {
coinbase_diff,
eth_sent_to_coinbase,
from_address: signer,
gas_fees,
gas_price: U256::from(gas_price),
gas_used,
to_address: tx.to(),
tx_hash: *tx.tx_hash(),
value,
revert,
};
results.push(tx_res);
// need to apply the state changes of this call before executing the
// next call
if transactions.peek().is_some() {
// need to apply the state changes of this call before executing
// the next call
evm.db_mut().commit(state)
}
}
// populate the response
let coinbase_diff = coinbase_balance_after_tx.saturating_sub(initial_coinbase);
let eth_sent_to_coinbase = coinbase_diff.saturating_sub(total_gas_fees);
let bundle_gas_price =
coinbase_diff.checked_div(U256::from(total_gas_used)).unwrap_or_default();
let res = EthCallBundleResponse {
bundle_gas_price,
bundle_hash: hasher.finalize(),
coinbase_diff,
eth_sent_to_coinbase,
gas_fees: total_gas_fees,
results,
state_block_number: state_block_number.to(),
total_gas_used,
};
Ok(res)
})
.await
}
}
#[async_trait::async_trait]
impl<Eth> EthCallBundleApiServer for EthBundle<Eth>
where
Eth: EthTransactions + LoadPendingBlock + Call + 'static,
{
async fn call_bundle(&self, request: EthCallBundle) -> RpcResult<EthCallBundleResponse> {
Self::call_bundle(self, request).await.map_err(Into::into)
}
}
/// Container type for `EthBundle` internals
#[derive(Debug)]
struct EthBundleInner<Eth> {
/// Access to commonly used code of the `eth` namespace
eth_api: Eth,
// restrict the number of concurrent tracing calls.
#[expect(dead_code)]
blocking_task_guard: BlockingTaskGuard,
}
impl<Eth> std::fmt::Debug for EthBundle<Eth> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("EthBundle").finish_non_exhaustive()
}
}
impl<Eth> Clone for EthBundle<Eth> {
fn clone(&self) -> Self {
Self { inner: Arc::clone(&self.inner) }
}
}
/// [`EthBundle`] specific errors.
#[derive(Debug, thiserror::Error)]
pub enum EthBundleError {
/// Thrown if the bundle does not contain any transactions.
#[error("bundle missing txs")]
EmptyBundleTransactions,
/// Thrown if the bundle does not contain a block number, or block number is 0.
#[error("bundle missing blockNumber")]
BundleMissingBlockNumber,
/// Thrown when the blob gas usage of the blob transactions in a bundle exceed the maximum.
#[error("blob gas usage exceeds the limit of {0} gas per block.")]
Eip4844BlobGasExceeded(u64),
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc/src/eth/filter.rs | crates/rpc/rpc/src/eth/filter.rs | //! `eth_` `Filter` RPC handler implementation
use alloy_consensus::BlockHeader;
use alloy_primitives::{Sealable, TxHash};
use alloy_rpc_types_eth::{
BlockNumHash, Filter, FilterBlockOption, FilterChanges, FilterId, Log,
PendingTransactionFilterKind,
};
use async_trait::async_trait;
use futures::{
future::TryFutureExt,
stream::{FuturesOrdered, StreamExt},
Future,
};
use itertools::Itertools;
use jsonrpsee::{core::RpcResult, server::IdProvider};
use reth_errors::ProviderError;
use reth_primitives_traits::{NodePrimitives, SealedHeader};
use reth_rpc_eth_api::{
EngineEthFilter, EthApiTypes, EthFilterApiServer, FullEthApiTypes, QueryLimits, RpcConvert,
RpcNodeCoreExt, RpcTransaction,
};
use reth_rpc_eth_types::{
logs_utils::{self, append_matching_block_logs, ProviderOrBlock},
EthApiError, EthFilterConfig, EthStateCache, EthSubscriptionIdProvider,
};
use reth_rpc_server_types::{result::rpc_error_with_code, ToRpcResult};
use reth_storage_api::{
BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, HeaderProvider, ProviderBlock,
ProviderReceipt, ReceiptProvider,
};
use reth_tasks::TaskSpawner;
use reth_transaction_pool::{NewSubpoolTransactionStream, PoolTransaction, TransactionPool};
use std::{
collections::{HashMap, VecDeque},
fmt,
iter::{Peekable, StepBy},
ops::RangeInclusive,
pin::Pin,
sync::Arc,
time::{Duration, Instant},
};
use tokio::{
sync::{mpsc::Receiver, oneshot, Mutex},
time::MissedTickBehavior,
};
use tracing::{debug, error, trace};
impl<Eth> EngineEthFilter for EthFilter<Eth>
where
Eth: FullEthApiTypes + RpcNodeCoreExt<Provider: BlockIdReader> + 'static,
{
/// Returns logs matching given filter object, no query limits
fn logs(
&self,
filter: Filter,
limits: QueryLimits,
) -> impl Future<Output = RpcResult<Vec<Log>>> + Send {
trace!(target: "rpc::eth", "Serving eth_getLogs");
self.logs_for_filter(filter, limits).map_err(|e| e.into())
}
}
/// Threshold for deciding between cached and range mode processing
const CACHED_MODE_BLOCK_THRESHOLD: u64 = 250;
/// Threshold for bloom filter matches that triggers reduced caching
const HIGH_BLOOM_MATCH_THRESHOLD: usize = 20;
/// Threshold for bloom filter matches that triggers moderately reduced caching
const MODERATE_BLOOM_MATCH_THRESHOLD: usize = 10;
/// Minimum block count to apply bloom filter match adjustments
const BLOOM_ADJUSTMENT_MIN_BLOCKS: u64 = 100;
/// The maximum number of headers we read at once when handling a range filter.
const MAX_HEADERS_RANGE: u64 = 1_000; // with ~530bytes per header this is ~500kb
/// Threshold for enabling parallel processing in range mode
const PARALLEL_PROCESSING_THRESHOLD: usize = 1000;
/// Default concurrency for parallel processing
const DEFAULT_PARALLEL_CONCURRENCY: usize = 4;
/// `Eth` filter RPC implementation.
///
/// This type handles `eth_` rpc requests related to filters (`eth_getLogs`).
pub struct EthFilter<Eth: EthApiTypes> {
/// All nested fields bundled together
inner: Arc<EthFilterInner<Eth>>,
}
impl<Eth> Clone for EthFilter<Eth>
where
Eth: EthApiTypes,
{
fn clone(&self) -> Self {
Self { inner: self.inner.clone() }
}
}
impl<Eth> EthFilter<Eth>
where
Eth: EthApiTypes + 'static,
{
/// Creates a new, shareable instance.
///
/// This uses the given pool to get notified about new transactions, the provider to interact
/// with the blockchain, the cache to fetch cacheable data, like the logs.
///
/// See also [`EthFilterConfig`].
///
/// This also spawns a task that periodically clears stale filters.
///
/// # Create a new instance with [`EthApi`](crate::EthApi)
///
/// ```no_run
/// use reth_evm_ethereum::EthEvmConfig;
/// use reth_network_api::noop::NoopNetwork;
/// use reth_provider::noop::NoopProvider;
/// use reth_rpc::{EthApi, EthFilter};
/// use reth_tasks::TokioTaskExecutor;
/// use reth_transaction_pool::noop::NoopTransactionPool;
/// let eth_api = EthApi::builder(
/// NoopProvider::default(),
/// NoopTransactionPool::default(),
/// NoopNetwork::default(),
/// EthEvmConfig::mainnet(),
/// )
/// .build();
/// let filter = EthFilter::new(eth_api, Default::default(), TokioTaskExecutor::default().boxed());
/// ```
pub fn new(eth_api: Eth, config: EthFilterConfig, task_spawner: Box<dyn TaskSpawner>) -> Self {
let EthFilterConfig { max_blocks_per_filter, max_logs_per_response, stale_filter_ttl } =
config;
let inner = EthFilterInner {
eth_api,
active_filters: ActiveFilters::new(),
id_provider: Arc::new(EthSubscriptionIdProvider::default()),
max_headers_range: MAX_HEADERS_RANGE,
task_spawner,
stale_filter_ttl,
query_limits: QueryLimits { max_blocks_per_filter, max_logs_per_response },
};
let eth_filter = Self { inner: Arc::new(inner) };
let this = eth_filter.clone();
eth_filter.inner.task_spawner.spawn_critical(
"eth-filters_stale-filters-clean",
Box::pin(async move {
this.watch_and_clear_stale_filters().await;
}),
);
eth_filter
}
/// Returns all currently active filters
pub fn active_filters(&self) -> &ActiveFilters<RpcTransaction<Eth::NetworkTypes>> {
&self.inner.active_filters
}
/// Endless future that [`Self::clear_stale_filters`] every `stale_filter_ttl` interval.
/// Nonetheless, this endless future frees the thread at every await point.
async fn watch_and_clear_stale_filters(&self) {
let mut interval = tokio::time::interval_at(
tokio::time::Instant::now() + self.inner.stale_filter_ttl,
self.inner.stale_filter_ttl,
);
interval.set_missed_tick_behavior(MissedTickBehavior::Delay);
loop {
interval.tick().await;
self.clear_stale_filters(Instant::now()).await;
}
}
/// Clears all filters that have not been polled for longer than the configured
/// `stale_filter_ttl` at the given instant.
pub async fn clear_stale_filters(&self, now: Instant) {
trace!(target: "rpc::eth", "clear stale filters");
self.active_filters().inner.lock().await.retain(|id, filter| {
let is_valid = (now - filter.last_poll_timestamp) < self.inner.stale_filter_ttl;
if !is_valid {
trace!(target: "rpc::eth", "evict filter with id: {:?}", id);
}
is_valid
})
}
}
impl<Eth> EthFilter<Eth>
where
Eth: FullEthApiTypes<Provider: BlockReader + BlockIdReader> + RpcNodeCoreExt + 'static,
{
/// Access the underlying provider.
fn provider(&self) -> &Eth::Provider {
self.inner.eth_api.provider()
}
/// Access the underlying pool.
fn pool(&self) -> &Eth::Pool {
self.inner.eth_api.pool()
}
/// Returns all the filter changes for the given id, if any
pub async fn filter_changes(
&self,
id: FilterId,
) -> Result<FilterChanges<RpcTransaction<Eth::NetworkTypes>>, EthFilterError> {
let info = self.provider().chain_info()?;
let best_number = info.best_number;
// start_block is the block from which we should start fetching changes, the next block from
// the last time changes were polled, in other words the best block at last poll + 1
let (start_block, kind) = {
let mut filters = self.inner.active_filters.inner.lock().await;
let filter = filters.get_mut(&id).ok_or(EthFilterError::FilterNotFound(id))?;
if filter.block > best_number {
// no new blocks since the last poll
return Ok(FilterChanges::Empty)
}
// update filter
// we fetch all changes from [filter.block..best_block], so we advance the filter's
// block to `best_block +1`, the next from which we should start fetching changes again
let mut block = best_number + 1;
std::mem::swap(&mut filter.block, &mut block);
filter.last_poll_timestamp = Instant::now();
(block, filter.kind.clone())
};
match kind {
FilterKind::PendingTransaction(filter) => Ok(filter.drain().await),
FilterKind::Block => {
// Note: we need to fetch the block hashes from inclusive range
// [start_block..best_block]
let end_block = best_number + 1;
let block_hashes =
self.provider().canonical_hashes_range(start_block, end_block).map_err(
|_| EthApiError::HeaderRangeNotFound(start_block.into(), end_block.into()),
)?;
Ok(FilterChanges::Hashes(block_hashes))
}
FilterKind::Log(filter) => {
let (from_block_number, to_block_number) = match filter.block_option {
FilterBlockOption::Range { from_block, to_block } => {
let from = from_block
.map(|num| self.provider().convert_block_number(num))
.transpose()?
.flatten();
let to = to_block
.map(|num| self.provider().convert_block_number(num))
.transpose()?
.flatten();
logs_utils::get_filter_block_range(from, to, start_block, info)
}
FilterBlockOption::AtBlockHash(_) => {
// blockHash is equivalent to fromBlock = toBlock = the block number with
// hash blockHash
// get_logs_in_block_range is inclusive
(start_block, best_number)
}
};
let logs = self
.inner
.clone()
.get_logs_in_block_range(
*filter,
from_block_number,
to_block_number,
self.inner.query_limits,
)
.await?;
Ok(FilterChanges::Logs(logs))
}
}
}
/// Returns an array of all logs matching filter with given id.
///
/// Returns an error if no matching log filter exists.
///
/// Handler for `eth_getFilterLogs`
pub async fn filter_logs(&self, id: FilterId) -> Result<Vec<Log>, EthFilterError> {
let filter = {
let filters = self.inner.active_filters.inner.lock().await;
if let FilterKind::Log(ref filter) =
filters.get(&id).ok_or_else(|| EthFilterError::FilterNotFound(id.clone()))?.kind
{
*filter.clone()
} else {
// Not a log filter
return Err(EthFilterError::FilterNotFound(id))
}
};
self.logs_for_filter(filter, self.inner.query_limits).await
}
/// Returns logs matching given filter object.
async fn logs_for_filter(
&self,
filter: Filter,
limits: QueryLimits,
) -> Result<Vec<Log>, EthFilterError> {
self.inner.clone().logs_for_filter(filter, limits).await
}
}
#[async_trait]
impl<Eth> EthFilterApiServer<RpcTransaction<Eth::NetworkTypes>> for EthFilter<Eth>
where
Eth: FullEthApiTypes + RpcNodeCoreExt + 'static,
{
/// Handler for `eth_newFilter`
async fn new_filter(&self, filter: Filter) -> RpcResult<FilterId> {
trace!(target: "rpc::eth", "Serving eth_newFilter");
self.inner
.install_filter(FilterKind::<RpcTransaction<Eth::NetworkTypes>>::Log(Box::new(filter)))
.await
}
/// Handler for `eth_newBlockFilter`
async fn new_block_filter(&self) -> RpcResult<FilterId> {
trace!(target: "rpc::eth", "Serving eth_newBlockFilter");
self.inner.install_filter(FilterKind::<RpcTransaction<Eth::NetworkTypes>>::Block).await
}
/// Handler for `eth_newPendingTransactionFilter`
async fn new_pending_transaction_filter(
&self,
kind: Option<PendingTransactionFilterKind>,
) -> RpcResult<FilterId> {
trace!(target: "rpc::eth", "Serving eth_newPendingTransactionFilter");
let transaction_kind = match kind.unwrap_or_default() {
PendingTransactionFilterKind::Hashes => {
let receiver = self.pool().pending_transactions_listener();
let pending_txs_receiver = PendingTransactionsReceiver::new(receiver);
FilterKind::PendingTransaction(PendingTransactionKind::Hashes(pending_txs_receiver))
}
PendingTransactionFilterKind::Full => {
let stream = self.pool().new_pending_pool_transactions_listener();
let full_txs_receiver = FullTransactionsReceiver::new(
stream,
self.inner.eth_api.tx_resp_builder().clone(),
);
FilterKind::PendingTransaction(PendingTransactionKind::FullTransaction(Arc::new(
full_txs_receiver,
)))
}
};
//let filter = FilterKind::PendingTransaction(transaction_kind);
// Install the filter and propagate any errors
self.inner.install_filter(transaction_kind).await
}
/// Handler for `eth_getFilterChanges`
async fn filter_changes(
&self,
id: FilterId,
) -> RpcResult<FilterChanges<RpcTransaction<Eth::NetworkTypes>>> {
trace!(target: "rpc::eth", "Serving eth_getFilterChanges");
Ok(Self::filter_changes(self, id).await?)
}
/// Returns an array of all logs matching filter with given id.
///
/// Returns an error if no matching log filter exists.
///
/// Handler for `eth_getFilterLogs`
async fn filter_logs(&self, id: FilterId) -> RpcResult<Vec<Log>> {
trace!(target: "rpc::eth", "Serving eth_getFilterLogs");
Ok(Self::filter_logs(self, id).await?)
}
/// Handler for `eth_uninstallFilter`
async fn uninstall_filter(&self, id: FilterId) -> RpcResult<bool> {
trace!(target: "rpc::eth", "Serving eth_uninstallFilter");
let mut filters = self.inner.active_filters.inner.lock().await;
if filters.remove(&id).is_some() {
trace!(target: "rpc::eth::filter", ?id, "uninstalled filter");
Ok(true)
} else {
Ok(false)
}
}
/// Returns logs matching given filter object.
///
/// Handler for `eth_getLogs`
async fn logs(&self, filter: Filter) -> RpcResult<Vec<Log>> {
trace!(target: "rpc::eth", "Serving eth_getLogs");
Ok(self.logs_for_filter(filter, self.inner.query_limits).await?)
}
}
impl<Eth> std::fmt::Debug for EthFilter<Eth>
where
Eth: EthApiTypes,
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("EthFilter").finish_non_exhaustive()
}
}
/// Container type `EthFilter`
#[derive(Debug)]
struct EthFilterInner<Eth: EthApiTypes> {
/// Inner `eth` API implementation.
eth_api: Eth,
/// All currently installed filters.
active_filters: ActiveFilters<RpcTransaction<Eth::NetworkTypes>>,
/// Provides ids to identify filters
id_provider: Arc<dyn IdProvider>,
/// limits for logs queries
query_limits: QueryLimits,
/// maximum number of headers to read at once for range filter
max_headers_range: u64,
/// The type that can spawn tasks.
task_spawner: Box<dyn TaskSpawner>,
/// Duration since the last filter poll, after which the filter is considered stale
stale_filter_ttl: Duration,
}
impl<Eth> EthFilterInner<Eth>
where
Eth: RpcNodeCoreExt<Provider: BlockIdReader, Pool: TransactionPool>
+ EthApiTypes<NetworkTypes: reth_rpc_eth_api::types::RpcTypes>
+ 'static,
{
/// Access the underlying provider.
fn provider(&self) -> &Eth::Provider {
self.eth_api.provider()
}
/// Access the underlying [`EthStateCache`].
fn eth_cache(&self) -> &EthStateCache<Eth::Primitives> {
self.eth_api.cache()
}
/// Returns logs matching given filter object.
async fn logs_for_filter(
self: Arc<Self>,
filter: Filter,
limits: QueryLimits,
) -> Result<Vec<Log>, EthFilterError> {
match filter.block_option {
FilterBlockOption::AtBlockHash(block_hash) => {
// for all matching logs in the block
// get the block header with the hash
let header = self
.provider()
.header_by_hash_or_number(block_hash.into())?
.ok_or_else(|| ProviderError::HeaderNotFound(block_hash.into()))?;
let block_num_hash = BlockNumHash::new(header.number(), block_hash);
// we also need to ensure that the receipts are available and return an error if
// not, in case the block hash been reorged
let (receipts, maybe_block) = self
.eth_cache()
.get_receipts_and_maybe_block(block_num_hash.hash)
.await?
.ok_or(EthApiError::HeaderNotFound(block_hash.into()))?;
let mut all_logs = Vec::new();
append_matching_block_logs(
&mut all_logs,
maybe_block
.map(ProviderOrBlock::Block)
.unwrap_or_else(|| ProviderOrBlock::Provider(self.provider())),
&filter,
block_num_hash,
&receipts,
false,
header.timestamp(),
)?;
Ok(all_logs)
}
FilterBlockOption::Range { from_block, to_block } => {
// compute the range
let info = self.provider().chain_info()?;
// we start at the most recent block if unset in filter
let start_block = info.best_number;
let from = from_block
.map(|num| self.provider().convert_block_number(num))
.transpose()?
.flatten();
let to = to_block
.map(|num| self.provider().convert_block_number(num))
.transpose()?
.flatten();
let (from_block_number, to_block_number) =
logs_utils::get_filter_block_range(from, to, start_block, info);
self.get_logs_in_block_range(filter, from_block_number, to_block_number, limits)
.await
}
}
}
/// Installs a new filter and returns the new identifier.
async fn install_filter(
&self,
kind: FilterKind<RpcTransaction<Eth::NetworkTypes>>,
) -> RpcResult<FilterId> {
let last_poll_block_number = self.provider().best_block_number().to_rpc_result()?;
let subscription_id = self.id_provider.next_id();
let id = match subscription_id {
jsonrpsee_types::SubscriptionId::Num(n) => FilterId::Num(n),
jsonrpsee_types::SubscriptionId::Str(s) => FilterId::Str(s.into_owned()),
};
let mut filters = self.active_filters.inner.lock().await;
filters.insert(
id.clone(),
ActiveFilter {
block: last_poll_block_number,
last_poll_timestamp: Instant::now(),
kind,
},
);
Ok(id)
}
/// Returns all logs in the given _inclusive_ range that match the filter
///
/// Returns an error if:
/// - underlying database error
/// - amount of matches exceeds configured limit
async fn get_logs_in_block_range(
self: Arc<Self>,
filter: Filter,
from_block: u64,
to_block: u64,
limits: QueryLimits,
) -> Result<Vec<Log>, EthFilterError> {
trace!(target: "rpc::eth::filter", from=from_block, to=to_block, ?filter, "finding logs in range");
// perform boundary checks first
if to_block < from_block {
return Err(EthFilterError::InvalidBlockRangeParams)
}
if let Some(max_blocks_per_filter) =
limits.max_blocks_per_filter.filter(|limit| to_block - from_block > *limit)
{
return Err(EthFilterError::QueryExceedsMaxBlocks(max_blocks_per_filter))
}
let (tx, rx) = oneshot::channel();
let this = self.clone();
self.task_spawner.spawn_blocking(Box::pin(async move {
let res =
this.get_logs_in_block_range_inner(&filter, from_block, to_block, limits).await;
let _ = tx.send(res);
}));
rx.await.map_err(|_| EthFilterError::InternalError)?
}
/// Returns all logs in the given _inclusive_ range that match the filter
///
/// Note: This function uses a mix of blocking db operations for fetching indices and header
/// ranges and utilizes the rpc cache for optimistically fetching receipts and blocks.
/// This function is considered blocking and should thus be spawned on a blocking task.
///
/// Returns an error if:
/// - underlying database error
async fn get_logs_in_block_range_inner(
self: Arc<Self>,
filter: &Filter,
from_block: u64,
to_block: u64,
limits: QueryLimits,
) -> Result<Vec<Log>, EthFilterError> {
let mut all_logs = Vec::new();
let mut matching_headers = Vec::new();
// get current chain tip to determine processing mode
let chain_tip = self.provider().best_block_number()?;
// first collect all headers that match the bloom filter for cached mode decision
for (from, to) in
BlockRangeInclusiveIter::new(from_block..=to_block, self.max_headers_range)
{
let headers = self.provider().headers_range(from..=to)?;
let mut headers_iter = headers.into_iter().peekable();
while let Some(header) = headers_iter.next() {
if !filter.matches_bloom(header.logs_bloom()) {
continue
}
let current_number = header.number();
let block_hash = match headers_iter.peek() {
Some(next_header) if next_header.number() == current_number + 1 => {
// Headers are consecutive, use the more efficient parent_hash
next_header.parent_hash()
}
_ => {
// Headers not consecutive or last header, calculate hash
header.hash_slow()
}
};
matching_headers.push(SealedHeader::new(header, block_hash));
}
}
// initialize the appropriate range mode based on collected headers
let mut range_mode = RangeMode::new(
self.clone(),
matching_headers,
from_block,
to_block,
self.max_headers_range,
chain_tip,
);
// iterate through the range mode to get receipts and blocks
while let Some(ReceiptBlockResult { receipts, recovered_block, header }) =
range_mode.next().await?
{
let num_hash = header.num_hash();
append_matching_block_logs(
&mut all_logs,
recovered_block
.map(ProviderOrBlock::Block)
.unwrap_or_else(|| ProviderOrBlock::Provider(self.provider())),
filter,
num_hash,
&receipts,
false,
header.timestamp(),
)?;
// size check but only if range is multiple blocks, so we always return all
// logs of a single block
let is_multi_block_range = from_block != to_block;
if let Some(max_logs_per_response) = limits.max_logs_per_response {
if is_multi_block_range && all_logs.len() > max_logs_per_response {
debug!(
target: "rpc::eth::filter",
logs_found = all_logs.len(),
max_logs_per_response,
from_block,
to_block = num_hash.number.saturating_sub(1),
"Query exceeded max logs per response limit"
);
return Err(EthFilterError::QueryExceedsMaxResults {
max_logs: max_logs_per_response,
from_block,
to_block: num_hash.number.saturating_sub(1),
});
}
}
}
Ok(all_logs)
}
}
/// All active filters
#[derive(Debug, Clone, Default)]
pub struct ActiveFilters<T> {
inner: Arc<Mutex<HashMap<FilterId, ActiveFilter<T>>>>,
}
impl<T> ActiveFilters<T> {
/// Returns an empty instance.
pub fn new() -> Self {
Self { inner: Arc::new(Mutex::new(HashMap::default())) }
}
}
/// An installed filter
#[derive(Debug)]
struct ActiveFilter<T> {
/// At which block the filter was polled last.
block: u64,
/// Last time this filter was polled.
last_poll_timestamp: Instant,
/// What kind of filter it is.
kind: FilterKind<T>,
}
/// A receiver for pending transactions that returns all new transactions since the last poll.
#[derive(Debug, Clone)]
struct PendingTransactionsReceiver {
txs_receiver: Arc<Mutex<Receiver<TxHash>>>,
}
impl PendingTransactionsReceiver {
fn new(receiver: Receiver<TxHash>) -> Self {
Self { txs_receiver: Arc::new(Mutex::new(receiver)) }
}
/// Returns all new pending transactions received since the last poll.
async fn drain<T>(&self) -> FilterChanges<T> {
let mut pending_txs = Vec::new();
let mut prepared_stream = self.txs_receiver.lock().await;
while let Ok(tx_hash) = prepared_stream.try_recv() {
pending_txs.push(tx_hash);
}
// Convert the vector of hashes into FilterChanges::Hashes
FilterChanges::Hashes(pending_txs)
}
}
/// A structure to manage and provide access to a stream of full transaction details.
#[derive(Debug, Clone)]
struct FullTransactionsReceiver<T: PoolTransaction, TxCompat> {
txs_stream: Arc<Mutex<NewSubpoolTransactionStream<T>>>,
tx_resp_builder: TxCompat,
}
impl<T, TxCompat> FullTransactionsReceiver<T, TxCompat>
where
T: PoolTransaction + 'static,
TxCompat: RpcConvert<Primitives: NodePrimitives<SignedTx = T::Consensus>>,
{
/// Creates a new `FullTransactionsReceiver` encapsulating the provided transaction stream.
fn new(stream: NewSubpoolTransactionStream<T>, tx_resp_builder: TxCompat) -> Self {
Self { txs_stream: Arc::new(Mutex::new(stream)), tx_resp_builder }
}
/// Returns all new pending transactions received since the last poll.
async fn drain(&self) -> FilterChanges<RpcTransaction<TxCompat::Network>> {
let mut pending_txs = Vec::new();
let mut prepared_stream = self.txs_stream.lock().await;
while let Ok(tx) = prepared_stream.try_recv() {
match self.tx_resp_builder.fill_pending(tx.transaction.to_consensus()) {
Ok(tx) => pending_txs.push(tx),
Err(err) => {
error!(target: "rpc",
%err,
"Failed to fill txn with block context"
);
}
}
}
FilterChanges::Transactions(pending_txs)
}
}
/// Helper trait for [`FullTransactionsReceiver`] to erase the `Transaction` type.
#[async_trait]
trait FullTransactionsFilter<T>: fmt::Debug + Send + Sync + Unpin + 'static {
async fn drain(&self) -> FilterChanges<T>;
}
#[async_trait]
impl<T, TxCompat> FullTransactionsFilter<RpcTransaction<TxCompat::Network>>
for FullTransactionsReceiver<T, TxCompat>
where
T: PoolTransaction + 'static,
TxCompat: RpcConvert<Primitives: NodePrimitives<SignedTx = T::Consensus>> + 'static,
{
async fn drain(&self) -> FilterChanges<RpcTransaction<TxCompat::Network>> {
Self::drain(self).await
}
}
/// Represents the kind of pending transaction data that can be retrieved.
///
/// This enum differentiates between two kinds of pending transaction data:
/// - Just the transaction hashes.
/// - Full transaction details.
#[derive(Debug, Clone)]
enum PendingTransactionKind<T> {
Hashes(PendingTransactionsReceiver),
FullTransaction(Arc<dyn FullTransactionsFilter<T>>),
}
impl<T: 'static> PendingTransactionKind<T> {
async fn drain(&self) -> FilterChanges<T> {
match self {
Self::Hashes(receiver) => receiver.drain().await,
Self::FullTransaction(receiver) => receiver.drain().await,
}
}
}
#[derive(Clone, Debug)]
enum FilterKind<T> {
Log(Box<Filter>),
Block,
PendingTransaction(PendingTransactionKind<T>),
}
/// An iterator that yields _inclusive_ block ranges of a given step size
#[derive(Debug)]
struct BlockRangeInclusiveIter {
iter: StepBy<RangeInclusive<u64>>,
step: u64,
end: u64,
}
impl BlockRangeInclusiveIter {
fn new(range: RangeInclusive<u64>, step: u64) -> Self {
Self { end: *range.end(), iter: range.step_by(step as usize + 1), step }
}
}
impl Iterator for BlockRangeInclusiveIter {
type Item = (u64, u64);
fn next(&mut self) -> Option<Self::Item> {
let start = self.iter.next()?;
let end = (start + self.step).min(self.end);
if start > end {
return None
}
Some((start, end))
}
}
/// Errors that can occur in the handler implementation
#[derive(Debug, thiserror::Error)]
pub enum EthFilterError {
/// Filter not found.
#[error("filter not found")]
FilterNotFound(FilterId),
/// Invalid block range.
#[error("invalid block range params")]
InvalidBlockRangeParams,
/// Query scope is too broad.
#[error("query exceeds max block range {0}")]
QueryExceedsMaxBlocks(u64),
/// Query result is too large.
#[error("query exceeds max results {max_logs}, retry with the range {from_block}-{to_block}")]
QueryExceedsMaxResults {
/// Maximum number of logs allowed per response
max_logs: usize,
/// Start block of the suggested retry range
from_block: u64,
/// End block of the suggested retry range (last successfully processed block)
to_block: u64,
},
/// Error serving request in `eth_` namespace.
#[error(transparent)]
EthAPIError(#[from] EthApiError),
/// Error thrown when a spawned task failed to deliver a response.
#[error("internal filter error")]
InternalError,
}
impl From<EthFilterError> for jsonrpsee::types::error::ErrorObject<'static> {
fn from(err: EthFilterError) -> Self {
match err {
EthFilterError::FilterNotFound(_) => rpc_error_with_code(
jsonrpsee::types::error::INVALID_PARAMS_CODE,
"filter not found",
),
err @ EthFilterError::InternalError => {
rpc_error_with_code(jsonrpsee::types::error::INTERNAL_ERROR_CODE, err.to_string())
}
EthFilterError::EthAPIError(err) => err.into(),
err @ (EthFilterError::InvalidBlockRangeParams |
EthFilterError::QueryExceedsMaxBlocks(_) |
EthFilterError::QueryExceedsMaxResults { .. }) => {
rpc_error_with_code(jsonrpsee::types::error::INVALID_PARAMS_CODE, err.to_string())
}
}
}
}
impl From<ProviderError> for EthFilterError {
fn from(err: ProviderError) -> Self {
Self::EthAPIError(err.into())
}
}
/// Helper type for the common pattern of returning receipts, block and the original header that is
/// a match for the filter.
struct ReceiptBlockResult<P>
where
P: ReceiptProvider + BlockReader,
{
/// We always need the entire receipts for the matching block.
receipts: Arc<Vec<ProviderReceipt<P>>>,
/// Block can be optional and we can fetch it lazily when needed.
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | true |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc/src/eth/sim_bundle.rs | crates/rpc/rpc/src/eth/sim_bundle.rs | //! `Eth` Sim bundle implementation and helpers.
use alloy_consensus::BlockHeader;
use alloy_eips::BlockNumberOrTag;
use alloy_evm::overrides::apply_block_overrides;
use alloy_primitives::U256;
use alloy_rpc_types_eth::BlockId;
use alloy_rpc_types_mev::{
BundleItem, Inclusion, MevSendBundle, Privacy, RefundConfig, SimBundleLogs, SimBundleOverrides,
SimBundleResponse, Validity,
};
use jsonrpsee::core::RpcResult;
use reth_evm::{ConfigureEvm, Evm};
use reth_primitives_traits::{Recovered, SignedTransaction};
use reth_revm::{database::StateProviderDatabase, db::CacheDB};
use reth_rpc_api::MevSimApiServer;
use reth_rpc_eth_api::{
helpers::{block::LoadBlock, Call, EthTransactions},
FromEthApiError, FromEvmError,
};
use reth_rpc_eth_types::{utils::recover_raw_transaction, EthApiError};
use reth_storage_api::ProviderTx;
use reth_tasks::pool::BlockingTaskGuard;
use reth_transaction_pool::{PoolPooledTx, PoolTransaction, TransactionPool};
use revm::{context_interface::result::ResultAndState, DatabaseCommit, DatabaseRef};
use std::{sync::Arc, time::Duration};
use tracing::trace;
/// Maximum bundle depth
const MAX_NESTED_BUNDLE_DEPTH: usize = 5;
/// Maximum body size
const MAX_BUNDLE_BODY_SIZE: usize = 50;
/// Default simulation timeout
const DEFAULT_SIM_TIMEOUT: Duration = Duration::from_secs(5);
/// Maximum simulation timeout
const MAX_SIM_TIMEOUT: Duration = Duration::from_secs(30);
/// Maximum payout cost
const SBUNDLE_PAYOUT_MAX_COST: u64 = 30_000;
/// A flattened representation of a bundle item containing transaction and associated metadata.
#[derive(Clone, Debug)]
pub struct FlattenedBundleItem<T> {
/// The signed transaction
pub tx: Recovered<T>,
/// Whether the transaction is allowed to revert
pub can_revert: bool,
/// Item-level inclusion constraints
pub inclusion: Inclusion,
/// Optional validity constraints for the bundle item
pub validity: Option<Validity>,
/// Optional privacy settings for the bundle item
pub privacy: Option<Privacy>,
/// Optional refund percent for the bundle item
pub refund_percent: Option<u64>,
/// Optional refund configs for the bundle item
pub refund_configs: Option<Vec<RefundConfig>>,
}
/// `Eth` sim bundle implementation.
pub struct EthSimBundle<Eth> {
/// All nested fields bundled together.
inner: Arc<EthSimBundleInner<Eth>>,
}
impl<Eth> EthSimBundle<Eth> {
/// Create a new `EthSimBundle` instance.
pub fn new(eth_api: Eth, blocking_task_guard: BlockingTaskGuard) -> Self {
Self { inner: Arc::new(EthSimBundleInner { eth_api, blocking_task_guard }) }
}
/// Access the underlying `Eth` API.
pub fn eth_api(&self) -> &Eth {
&self.inner.eth_api
}
}
impl<Eth> EthSimBundle<Eth>
where
Eth: EthTransactions + LoadBlock + Call + 'static,
{
/// Flattens a potentially nested bundle into a list of individual transactions in a
/// `FlattenedBundleItem` with their associated metadata. This handles recursive bundle
/// processing up to `MAX_NESTED_BUNDLE_DEPTH` and `MAX_BUNDLE_BODY_SIZE`, preserving
/// inclusion, validity and privacy settings from parent bundles.
fn parse_and_flatten_bundle(
&self,
request: &MevSendBundle,
) -> Result<Vec<FlattenedBundleItem<ProviderTx<Eth::Provider>>>, EthApiError> {
let mut items = Vec::new();
// Stack for processing bundles
let mut stack = Vec::new();
// Start with initial bundle, index 0, and depth 1
stack.push((request, 0, 1));
while let Some((current_bundle, mut idx, depth)) = stack.pop() {
// Check max depth
if depth > MAX_NESTED_BUNDLE_DEPTH {
return Err(EthApiError::InvalidParams(EthSimBundleError::MaxDepth.to_string()));
}
// Determine inclusion, validity, and privacy
let inclusion = ¤t_bundle.inclusion;
let validity = ¤t_bundle.validity;
let privacy = ¤t_bundle.privacy;
// Validate inclusion parameters
let block_number = inclusion.block_number();
let max_block_number = inclusion.max_block_number().unwrap_or(block_number);
if max_block_number < block_number || block_number == 0 {
return Err(EthApiError::InvalidParams(
EthSimBundleError::InvalidInclusion.to_string(),
));
}
// Validate bundle body size
if current_bundle.bundle_body.len() > MAX_BUNDLE_BODY_SIZE {
return Err(EthApiError::InvalidParams(
EthSimBundleError::BundleTooLarge.to_string(),
));
}
// Validate validity and refund config
if let Some(validity) = ¤t_bundle.validity {
// Validate refund entries
if let Some(refunds) = &validity.refund {
let mut total_percent = 0;
for refund in refunds {
if refund.body_idx as usize >= current_bundle.bundle_body.len() {
return Err(EthApiError::InvalidParams(
EthSimBundleError::InvalidValidity.to_string(),
));
}
if 100 - total_percent < refund.percent {
return Err(EthApiError::InvalidParams(
EthSimBundleError::InvalidValidity.to_string(),
));
}
total_percent += refund.percent;
}
}
// Validate refund configs
if let Some(refund_configs) = &validity.refund_config {
let mut total_percent = 0;
for refund_config in refund_configs {
if 100 - total_percent < refund_config.percent {
return Err(EthApiError::InvalidParams(
EthSimBundleError::InvalidValidity.to_string(),
));
}
total_percent += refund_config.percent;
}
}
}
let body = ¤t_bundle.bundle_body;
// Process items in the current bundle
while idx < body.len() {
match &body[idx] {
BundleItem::Tx { tx, can_revert } => {
let tx = recover_raw_transaction::<PoolPooledTx<Eth::Pool>>(tx)?;
let tx = tx.map(
<Eth::Pool as TransactionPool>::Transaction::pooled_into_consensus,
);
let refund_percent =
validity.as_ref().and_then(|v| v.refund.as_ref()).and_then(|refunds| {
refunds.iter().find_map(|refund| {
(refund.body_idx as usize == idx).then_some(refund.percent)
})
});
let refund_configs =
validity.as_ref().and_then(|v| v.refund_config.clone());
// Create FlattenedBundleItem with current inclusion, validity, and privacy
let flattened_item = FlattenedBundleItem {
tx,
can_revert: *can_revert,
inclusion: inclusion.clone(),
validity: validity.clone(),
privacy: privacy.clone(),
refund_percent,
refund_configs,
};
// Add to items
items.push(flattened_item);
idx += 1;
}
BundleItem::Bundle { bundle } => {
// Push the current bundle and next index onto the stack to resume later
stack.push((current_bundle, idx + 1, depth));
// process the nested bundle next
stack.push((bundle, 0, depth + 1));
break;
}
BundleItem::Hash { hash: _ } => {
// Hash-only items are not allowed
return Err(EthApiError::InvalidParams(
EthSimBundleError::InvalidBundle.to_string(),
));
}
}
}
}
Ok(items)
}
async fn sim_bundle_inner(
&self,
request: MevSendBundle,
overrides: SimBundleOverrides,
logs: bool,
) -> Result<SimBundleResponse, Eth::Error> {
let SimBundleOverrides { parent_block, block_overrides, .. } = overrides;
// Parse and validate bundle
// Also, flatten the bundle here so that its easier to process
let flattened_bundle = self.parse_and_flatten_bundle(&request)?;
let block_id = parent_block.unwrap_or(BlockId::Number(BlockNumberOrTag::Latest));
let (mut evm_env, current_block_id) = self.eth_api().evm_env_at(block_id).await?;
let current_block = self.eth_api().recovered_block(current_block_id).await?;
let current_block = current_block.ok_or(EthApiError::HeaderNotFound(block_id))?;
let eth_api = self.inner.eth_api.clone();
let sim_response = self
.inner
.eth_api
.spawn_with_state_at_block(current_block_id, move |state| {
// Setup environment
let current_block_number = current_block.number();
let coinbase = evm_env.block_env.beneficiary;
let basefee = evm_env.block_env.basefee;
let mut db = CacheDB::new(StateProviderDatabase::new(state));
// apply overrides
apply_block_overrides(block_overrides, &mut db, &mut evm_env.block_env);
let initial_coinbase_balance = DatabaseRef::basic_ref(&db, coinbase)
.map_err(EthApiError::from_eth_err)?
.map(|acc| acc.balance)
.unwrap_or_default();
let mut coinbase_balance_before_tx = initial_coinbase_balance;
let mut total_gas_used = 0;
let mut total_profit = U256::ZERO;
let mut refundable_value = U256::ZERO;
let mut body_logs: Vec<SimBundleLogs> = Vec::new();
let mut evm = eth_api.evm_config().evm_with_env(db, evm_env);
let mut log_index = 0;
for (tx_index, item) in flattened_bundle.iter().enumerate() {
// Check inclusion constraints
let block_number = item.inclusion.block_number();
let max_block_number =
item.inclusion.max_block_number().unwrap_or(block_number);
if current_block_number < block_number ||
current_block_number > max_block_number
{
return Err(EthApiError::InvalidParams(
EthSimBundleError::InvalidInclusion.to_string(),
)
.into());
}
let ResultAndState { result, state } = evm
.transact(eth_api.evm_config().tx_env(&item.tx))
.map_err(Eth::Error::from_evm_err)?;
if !result.is_success() && !item.can_revert {
return Err(EthApiError::InvalidParams(
EthSimBundleError::BundleTransactionFailed.to_string(),
)
.into());
}
let gas_used = result.gas_used();
total_gas_used += gas_used;
// coinbase is always present in the result state
let coinbase_balance_after_tx =
state.get(&coinbase).map(|acc| acc.info.balance).unwrap_or_default();
let coinbase_diff =
coinbase_balance_after_tx.saturating_sub(coinbase_balance_before_tx);
total_profit += coinbase_diff;
// Add to refundable value if this tx does not have a refund percent
if item.refund_percent.is_none() {
refundable_value += coinbase_diff;
}
// Update coinbase balance before next tx
coinbase_balance_before_tx = coinbase_balance_after_tx;
// Collect logs if requested
// TODO: since we are looping over iteratively, we are not collecting bundle
// logs. We should collect bundle logs when we are processing the bundle items.
if logs {
let tx_logs = result
.logs()
.iter()
.map(|log| {
let full_log = alloy_rpc_types_eth::Log {
inner: log.clone(),
block_hash: None,
block_number: None,
block_timestamp: None,
transaction_hash: Some(*item.tx.tx_hash()),
transaction_index: Some(tx_index as u64),
log_index: Some(log_index),
removed: false,
};
log_index += 1;
full_log
})
.collect();
let sim_bundle_logs =
SimBundleLogs { tx_logs: Some(tx_logs), bundle_logs: None };
body_logs.push(sim_bundle_logs);
}
// Apply state changes
evm.db_mut().commit(state);
}
// After processing all transactions, process refunds
for item in &flattened_bundle {
if let Some(refund_percent) = item.refund_percent {
// Get refund configurations
let refund_configs = item.refund_configs.clone().unwrap_or_else(|| {
vec![RefundConfig { address: item.tx.signer(), percent: 100 }]
});
// Calculate payout transaction fee
let payout_tx_fee = U256::from(basefee) *
U256::from(SBUNDLE_PAYOUT_MAX_COST) *
U256::from(refund_configs.len() as u64);
// Add gas used for payout transactions
total_gas_used += SBUNDLE_PAYOUT_MAX_COST * refund_configs.len() as u64;
// Calculate allocated refundable value (payout value)
let payout_value =
refundable_value * U256::from(refund_percent) / U256::from(100);
if payout_tx_fee > payout_value {
return Err(EthApiError::InvalidParams(
EthSimBundleError::NegativeProfit.to_string(),
)
.into());
}
// Subtract payout value from total profit
total_profit = total_profit.checked_sub(payout_value).ok_or(
EthApiError::InvalidParams(
EthSimBundleError::NegativeProfit.to_string(),
),
)?;
// Adjust refundable value
refundable_value = refundable_value.checked_sub(payout_value).ok_or(
EthApiError::InvalidParams(
EthSimBundleError::NegativeProfit.to_string(),
),
)?;
}
}
// Calculate mev gas price
let mev_gas_price = if total_gas_used != 0 {
total_profit / U256::from(total_gas_used)
} else {
U256::ZERO
};
Ok(SimBundleResponse {
success: true,
state_block: current_block_number,
error: None,
logs: Some(body_logs),
gas_used: total_gas_used,
mev_gas_price,
profit: total_profit,
refundable_value,
exec_error: None,
revert: None,
})
})
.await?;
Ok(sim_response)
}
}
#[async_trait::async_trait]
impl<Eth> MevSimApiServer for EthSimBundle<Eth>
where
Eth: EthTransactions + LoadBlock + Call + 'static,
{
async fn sim_bundle(
&self,
request: MevSendBundle,
overrides: SimBundleOverrides,
) -> RpcResult<SimBundleResponse> {
trace!("mev_simBundle called, request: {:?}, overrides: {:?}", request, overrides);
let override_timeout = overrides.timeout;
let timeout = override_timeout
.map(Duration::from_secs)
.filter(|&custom_duration| custom_duration <= MAX_SIM_TIMEOUT)
.unwrap_or(DEFAULT_SIM_TIMEOUT);
let bundle_res =
tokio::time::timeout(timeout, Self::sim_bundle_inner(self, request, overrides, true))
.await
.map_err(|_| {
EthApiError::InvalidParams(EthSimBundleError::BundleTimeout.to_string())
})?;
bundle_res.map_err(Into::into)
}
}
/// Container type for `EthSimBundle` internals
#[derive(Debug)]
struct EthSimBundleInner<Eth> {
/// Access to commonly used code of the `eth` namespace
eth_api: Eth,
// restrict the number of concurrent tracing calls.
#[expect(dead_code)]
blocking_task_guard: BlockingTaskGuard,
}
impl<Eth> std::fmt::Debug for EthSimBundle<Eth> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("EthSimBundle").finish_non_exhaustive()
}
}
impl<Eth> Clone for EthSimBundle<Eth> {
fn clone(&self) -> Self {
Self { inner: Arc::clone(&self.inner) }
}
}
/// [`EthSimBundle`] specific errors.
#[derive(Debug, thiserror::Error)]
pub enum EthSimBundleError {
/// Thrown when max depth is reached
#[error("max depth reached")]
MaxDepth,
/// Thrown when a bundle is unmatched
#[error("unmatched bundle")]
UnmatchedBundle,
/// Thrown when a bundle is too large
#[error("bundle too large")]
BundleTooLarge,
/// Thrown when validity is invalid
#[error("invalid validity")]
InvalidValidity,
/// Thrown when inclusion is invalid
#[error("invalid inclusion")]
InvalidInclusion,
/// Thrown when a bundle is invalid
#[error("invalid bundle")]
InvalidBundle,
/// Thrown when a bundle simulation times out
#[error("bundle simulation timed out")]
BundleTimeout,
/// Thrown when a transaction is reverted in a bundle
#[error("bundle transaction failed")]
BundleTransactionFailed,
/// Thrown when a bundle simulation returns negative profit
#[error("bundle simulation returned negative profit")]
NegativeProfit,
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc/src/eth/core.rs | crates/rpc/rpc/src/eth/core.rs | //! Implementation of the [`jsonrpsee`] generated [`EthApiServer`](crate::EthApi) trait
//! Handles RPC requests for the `eth_` namespace.
use std::sync::Arc;
use crate::{eth::helpers::types::EthRpcConverter, EthApiBuilder};
use alloy_consensus::BlockHeader;
use alloy_eips::BlockNumberOrTag;
use alloy_network::Ethereum;
use alloy_primitives::{Bytes, U256};
use alloy_rpc_client::RpcClient;
use derive_more::Deref;
use reth_chainspec::{ChainSpec, ChainSpecProvider};
use reth_evm_ethereum::EthEvmConfig;
use reth_network_api::noop::NoopNetwork;
use reth_node_api::{FullNodeComponents, FullNodeTypes};
use reth_rpc_convert::{RpcConvert, RpcConverter};
use reth_rpc_eth_api::{
helpers::{pending_block::PendingEnvBuilder, spec::SignersForRpc, SpawnBlocking},
node::{RpcNodeCoreAdapter, RpcNodeCoreExt},
EthApiTypes, RpcNodeCore,
};
use reth_rpc_eth_types::{
builder::config::PendingBlockKind, receipt::EthReceiptConverter, tx_forward::ForwardConfig,
EthApiError, EthStateCache, FeeHistoryCache, GasCap, GasPriceOracle, PendingBlock,
};
use reth_storage_api::{noop::NoopProvider, BlockReaderIdExt, ProviderHeader};
use reth_tasks::{
pool::{BlockingTaskGuard, BlockingTaskPool},
TaskSpawner, TokioTaskExecutor,
};
use reth_transaction_pool::{
noop::NoopTransactionPool, AddedTransactionOutcome, BatchTxProcessor, BatchTxRequest,
TransactionPool,
};
use tokio::sync::{broadcast, mpsc, Mutex};
const DEFAULT_BROADCAST_CAPACITY: usize = 2000;
/// Helper type alias for [`RpcConverter`] with components from the given [`FullNodeComponents`].
pub type EthRpcConverterFor<N, NetworkT = Ethereum> = RpcConverter<
NetworkT,
<N as FullNodeComponents>::Evm,
EthReceiptConverter<<<N as FullNodeTypes>::Provider as ChainSpecProvider>::ChainSpec>,
>;
/// Helper type alias for [`EthApi`] with components from the given [`FullNodeComponents`].
pub type EthApiFor<N, NetworkT = Ethereum> = EthApi<N, EthRpcConverterFor<N, NetworkT>>;
/// Helper type alias for [`EthApi`] with components from the given [`FullNodeComponents`].
pub type EthApiBuilderFor<N, NetworkT = Ethereum> =
EthApiBuilder<N, EthRpcConverterFor<N, NetworkT>>;
/// `Eth` API implementation.
///
/// This type provides the functionality for handling `eth_` related requests.
/// These are implemented two-fold: Core functionality is implemented as
/// [`EthApiSpec`](reth_rpc_eth_api::helpers::EthApiSpec) trait. Additionally, the required server
/// implementations (e.g. [`EthApiServer`](reth_rpc_eth_api::EthApiServer)) are implemented
/// separately in submodules. The rpc handler implementation can then delegate to the main impls.
/// This way [`EthApi`] is not limited to [`jsonrpsee`] and can be used standalone or in other
/// network handlers (for example ipc).
///
/// ## Trait requirements
///
/// While this type requires various unrestricted generic components, trait bounds are enforced when
/// additional traits are implemented for this type.
#[derive(Deref)]
pub struct EthApi<N: RpcNodeCore, Rpc: RpcConvert> {
/// All nested fields bundled together.
#[deref]
pub(super) inner: Arc<EthApiInner<N, Rpc>>,
}
impl<N, Rpc> Clone for EthApi<N, Rpc>
where
N: RpcNodeCore,
Rpc: RpcConvert,
{
fn clone(&self) -> Self {
Self { inner: self.inner.clone() }
}
}
impl
EthApi<
RpcNodeCoreAdapter<NoopProvider, NoopTransactionPool, NoopNetwork, EthEvmConfig>,
EthRpcConverter<ChainSpec>,
>
{
/// Convenience fn to obtain a new [`EthApiBuilder`] instance with mandatory components.
///
/// Creating an [`EthApi`] requires a few mandatory components:
/// - provider: The type responsible for fetching requested data from disk.
/// - transaction pool: To interact with the pool, submitting new transactions (e.g.
/// `eth_sendRawTransactions`).
/// - network: required to handle requests related to network state (e.g. `eth_syncing`).
/// - evm config: Knows how create a new EVM instance to transact,estimate,call,trace.
///
/// # Create an instance with noop ethereum implementations
///
/// ```no_run
/// use alloy_network::Ethereum;
/// use reth_evm_ethereum::EthEvmConfig;
/// use reth_network_api::noop::NoopNetwork;
/// use reth_provider::noop::NoopProvider;
/// use reth_rpc::EthApi;
/// use reth_transaction_pool::noop::NoopTransactionPool;
/// let eth_api = EthApi::builder(
/// NoopProvider::default(),
/// NoopTransactionPool::default(),
/// NoopNetwork::default(),
/// EthEvmConfig::mainnet(),
/// )
/// .build();
/// ```
#[expect(clippy::type_complexity)]
pub fn builder<Provider, Pool, Network, EvmConfig, ChainSpec>(
provider: Provider,
pool: Pool,
network: Network,
evm_config: EvmConfig,
) -> EthApiBuilder<
RpcNodeCoreAdapter<Provider, Pool, Network, EvmConfig>,
RpcConverter<Ethereum, EvmConfig, EthReceiptConverter<ChainSpec>>,
>
where
RpcNodeCoreAdapter<Provider, Pool, Network, EvmConfig>:
RpcNodeCore<Provider: ChainSpecProvider<ChainSpec = ChainSpec>, Evm = EvmConfig>,
{
EthApiBuilder::new(provider, pool, network, evm_config)
}
}
impl<N, Rpc> EthApi<N, Rpc>
where
N: RpcNodeCore,
Rpc: RpcConvert,
(): PendingEnvBuilder<N::Evm>,
{
/// Creates a new, shareable instance using the default tokio task spawner.
#[expect(clippy::too_many_arguments)]
pub fn new(
components: N,
eth_cache: EthStateCache<N::Primitives>,
gas_oracle: GasPriceOracle<N::Provider>,
gas_cap: impl Into<GasCap>,
max_simulate_blocks: u64,
eth_proof_window: u64,
blocking_task_pool: BlockingTaskPool,
fee_history_cache: FeeHistoryCache<ProviderHeader<N::Provider>>,
proof_permits: usize,
rpc_converter: Rpc,
max_batch_size: usize,
pending_block_kind: PendingBlockKind,
raw_tx_forwarder: ForwardConfig,
) -> Self {
let inner = EthApiInner::new(
components,
eth_cache,
gas_oracle,
gas_cap,
max_simulate_blocks,
eth_proof_window,
blocking_task_pool,
fee_history_cache,
TokioTaskExecutor::default().boxed(),
proof_permits,
rpc_converter,
(),
max_batch_size,
pending_block_kind,
raw_tx_forwarder.forwarder_client(),
);
Self { inner: Arc::new(inner) }
}
}
impl<N, Rpc> EthApiTypes for EthApi<N, Rpc>
where
N: RpcNodeCore,
Rpc: RpcConvert,
{
type Error = EthApiError;
type NetworkTypes = Rpc::Network;
type RpcConvert = Rpc;
fn tx_resp_builder(&self) -> &Self::RpcConvert {
&self.tx_resp_builder
}
}
impl<N, Rpc> RpcNodeCore for EthApi<N, Rpc>
where
N: RpcNodeCore,
Rpc: RpcConvert,
{
type Primitives = N::Primitives;
type Provider = N::Provider;
type Pool = N::Pool;
type Evm = N::Evm;
type Network = N::Network;
fn pool(&self) -> &Self::Pool {
self.inner.pool()
}
fn evm_config(&self) -> &Self::Evm {
self.inner.evm_config()
}
fn network(&self) -> &Self::Network {
self.inner.network()
}
fn provider(&self) -> &Self::Provider {
self.inner.provider()
}
}
impl<N, Rpc> RpcNodeCoreExt for EthApi<N, Rpc>
where
N: RpcNodeCore,
Rpc: RpcConvert,
{
#[inline]
fn cache(&self) -> &EthStateCache<N::Primitives> {
self.inner.cache()
}
}
impl<N, Rpc> std::fmt::Debug for EthApi<N, Rpc>
where
N: RpcNodeCore,
Rpc: RpcConvert,
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("EthApi").finish_non_exhaustive()
}
}
impl<N, Rpc> SpawnBlocking for EthApi<N, Rpc>
where
N: RpcNodeCore,
Rpc: RpcConvert,
{
#[inline]
fn io_task_spawner(&self) -> impl TaskSpawner {
self.inner.task_spawner()
}
#[inline]
fn tracing_task_pool(&self) -> &BlockingTaskPool {
self.inner.blocking_task_pool()
}
#[inline]
fn tracing_task_guard(&self) -> &BlockingTaskGuard {
self.inner.blocking_task_guard()
}
}
/// Container type `EthApi`
#[expect(missing_debug_implementations)]
pub struct EthApiInner<N: RpcNodeCore, Rpc: RpcConvert> {
/// The components of the node.
components: N,
/// All configured Signers
signers: SignersForRpc<N::Provider, Rpc::Network>,
/// The async cache frontend for eth related data
eth_cache: EthStateCache<N::Primitives>,
/// The async gas oracle frontend for gas price suggestions
gas_oracle: GasPriceOracle<N::Provider>,
/// Maximum gas limit for `eth_call` and call tracing RPC methods.
gas_cap: u64,
/// Maximum number of blocks for `eth_simulateV1`.
max_simulate_blocks: u64,
/// The maximum number of blocks into the past for generating state proofs.
eth_proof_window: u64,
/// The block number at which the node started
starting_block: U256,
/// The type that can spawn tasks which would otherwise block.
task_spawner: Box<dyn TaskSpawner>,
/// Cached pending block if any
pending_block: Mutex<Option<PendingBlock<N::Primitives>>>,
/// A pool dedicated to CPU heavy blocking tasks.
blocking_task_pool: BlockingTaskPool,
/// Cache for block fees history
fee_history_cache: FeeHistoryCache<ProviderHeader<N::Provider>>,
/// Guard for getproof calls
blocking_task_guard: BlockingTaskGuard,
/// Transaction broadcast channel
raw_tx_sender: broadcast::Sender<Bytes>,
/// Raw transaction forwarder
raw_tx_forwarder: Option<RpcClient>,
/// Converter for RPC types.
tx_resp_builder: Rpc,
/// Builder for pending block environment.
next_env_builder: Box<dyn PendingEnvBuilder<N::Evm>>,
/// Transaction batch sender for batching tx insertions
tx_batch_sender:
mpsc::UnboundedSender<BatchTxRequest<<N::Pool as TransactionPool>::Transaction>>,
/// Configuration for pending block construction.
pending_block_kind: PendingBlockKind,
}
impl<N, Rpc> EthApiInner<N, Rpc>
where
N: RpcNodeCore,
Rpc: RpcConvert,
{
/// Creates a new, shareable instance using the default tokio task spawner.
#[expect(clippy::too_many_arguments)]
pub fn new(
components: N,
eth_cache: EthStateCache<N::Primitives>,
gas_oracle: GasPriceOracle<N::Provider>,
gas_cap: impl Into<GasCap>,
max_simulate_blocks: u64,
eth_proof_window: u64,
blocking_task_pool: BlockingTaskPool,
fee_history_cache: FeeHistoryCache<ProviderHeader<N::Provider>>,
task_spawner: Box<dyn TaskSpawner + 'static>,
proof_permits: usize,
tx_resp_builder: Rpc,
next_env: impl PendingEnvBuilder<N::Evm>,
max_batch_size: usize,
pending_block_kind: PendingBlockKind,
raw_tx_forwarder: Option<RpcClient>,
) -> Self {
let signers = parking_lot::RwLock::new(Default::default());
// get the block number of the latest block
let starting_block = U256::from(
components
.provider()
.header_by_number_or_tag(BlockNumberOrTag::Latest)
.ok()
.flatten()
.map(|header| header.number())
.unwrap_or_default(),
);
let (raw_tx_sender, _) = broadcast::channel(DEFAULT_BROADCAST_CAPACITY);
// Create tx pool insertion batcher
let (processor, tx_batch_sender) =
BatchTxProcessor::new(components.pool().clone(), max_batch_size);
task_spawner.spawn_critical("tx-batcher", Box::pin(processor));
Self {
components,
signers,
eth_cache,
gas_oracle,
gas_cap: gas_cap.into().into(),
max_simulate_blocks,
eth_proof_window,
starting_block,
task_spawner,
pending_block: Default::default(),
blocking_task_pool,
fee_history_cache,
blocking_task_guard: BlockingTaskGuard::new(proof_permits),
raw_tx_sender,
raw_tx_forwarder,
tx_resp_builder,
next_env_builder: Box::new(next_env),
tx_batch_sender,
pending_block_kind,
}
}
}
impl<N, Rpc> EthApiInner<N, Rpc>
where
N: RpcNodeCore,
Rpc: RpcConvert,
{
/// Returns a handle to data on disk.
#[inline]
pub fn provider(&self) -> &N::Provider {
self.components.provider()
}
/// Returns a handle to the transaction response builder.
#[inline]
pub const fn tx_resp_builder(&self) -> &Rpc {
&self.tx_resp_builder
}
/// Returns a handle to data in memory.
#[inline]
pub const fn cache(&self) -> &EthStateCache<N::Primitives> {
&self.eth_cache
}
/// Returns a handle to the pending block.
#[inline]
pub const fn pending_block(&self) -> &Mutex<Option<PendingBlock<N::Primitives>>> {
&self.pending_block
}
/// Returns a type that knows how to build a [`reth_evm::ConfigureEvm::NextBlockEnvCtx`] for a
/// pending block.
#[inline]
pub const fn pending_env_builder(&self) -> &dyn PendingEnvBuilder<N::Evm> {
&*self.next_env_builder
}
/// Returns a handle to the task spawner.
#[inline]
pub const fn task_spawner(&self) -> &dyn TaskSpawner {
&*self.task_spawner
}
/// Returns a handle to the blocking thread pool.
#[inline]
pub const fn blocking_task_pool(&self) -> &BlockingTaskPool {
&self.blocking_task_pool
}
/// Returns a handle to the EVM config.
#[inline]
pub fn evm_config(&self) -> &N::Evm {
self.components.evm_config()
}
/// Returns a handle to the transaction pool.
#[inline]
pub fn pool(&self) -> &N::Pool {
self.components.pool()
}
/// Returns the gas cap.
#[inline]
pub const fn gas_cap(&self) -> u64 {
self.gas_cap
}
/// Returns the `max_simulate_blocks`.
#[inline]
pub const fn max_simulate_blocks(&self) -> u64 {
self.max_simulate_blocks
}
/// Returns a handle to the gas oracle.
#[inline]
pub const fn gas_oracle(&self) -> &GasPriceOracle<N::Provider> {
&self.gas_oracle
}
/// Returns a handle to the fee history cache.
#[inline]
pub const fn fee_history_cache(&self) -> &FeeHistoryCache<ProviderHeader<N::Provider>> {
&self.fee_history_cache
}
/// Returns a handle to the signers.
#[inline]
pub const fn signers(&self) -> &SignersForRpc<N::Provider, Rpc::Network> {
&self.signers
}
/// Returns the starting block.
#[inline]
pub const fn starting_block(&self) -> U256 {
self.starting_block
}
/// Returns the inner `Network`
#[inline]
pub fn network(&self) -> &N::Network {
self.components.network()
}
/// The maximum number of blocks into the past for generating state proofs.
#[inline]
pub const fn eth_proof_window(&self) -> u64 {
self.eth_proof_window
}
/// Returns reference to [`BlockingTaskGuard`].
#[inline]
pub const fn blocking_task_guard(&self) -> &BlockingTaskGuard {
&self.blocking_task_guard
}
/// Returns [`broadcast::Receiver`] of new raw transactions
#[inline]
pub fn subscribe_to_raw_transactions(&self) -> broadcast::Receiver<Bytes> {
self.raw_tx_sender.subscribe()
}
/// Broadcasts raw transaction if there are active subscribers.
#[inline]
pub fn broadcast_raw_transaction(&self, raw_tx: Bytes) {
let _ = self.raw_tx_sender.send(raw_tx);
}
/// Returns the transaction batch sender
#[inline]
const fn tx_batch_sender(
&self,
) -> &mpsc::UnboundedSender<BatchTxRequest<<N::Pool as TransactionPool>::Transaction>> {
&self.tx_batch_sender
}
/// Adds an _unvalidated_ transaction into the pool via the transaction batch sender.
#[inline]
pub async fn add_pool_transaction(
&self,
transaction: <N::Pool as TransactionPool>::Transaction,
) -> Result<AddedTransactionOutcome, EthApiError> {
let (response_tx, response_rx) = tokio::sync::oneshot::channel();
let request = reth_transaction_pool::BatchTxRequest::new(transaction, response_tx);
self.tx_batch_sender()
.send(request)
.map_err(|_| reth_rpc_eth_types::EthApiError::BatchTxSendError)?;
Ok(response_rx.await??)
}
/// Returns the pending block kind
#[inline]
pub const fn pending_block_kind(&self) -> PendingBlockKind {
self.pending_block_kind
}
/// Returns a handle to the raw transaction forwarder.
#[inline]
pub const fn raw_tx_forwarder(&self) -> Option<&RpcClient> {
self.raw_tx_forwarder.as_ref()
}
}
#[cfg(test)]
mod tests {
use crate::{eth::helpers::types::EthRpcConverter, EthApi, EthApiBuilder};
use alloy_consensus::{Block, BlockBody, Header};
use alloy_eips::BlockNumberOrTag;
use alloy_primitives::{Signature, B256, U64};
use alloy_rpc_types::FeeHistory;
use jsonrpsee_types::error::INVALID_PARAMS_CODE;
use rand::Rng;
use reth_chain_state::CanonStateSubscriptions;
use reth_chainspec::{ChainSpec, ChainSpecProvider, EthChainSpec};
use reth_ethereum_primitives::TransactionSigned;
use reth_evm_ethereum::EthEvmConfig;
use reth_network_api::noop::NoopNetwork;
use reth_provider::{
test_utils::{MockEthProvider, NoopProvider},
StageCheckpointReader,
};
use reth_rpc_eth_api::{node::RpcNodeCoreAdapter, EthApiServer};
use reth_storage_api::{BlockReader, BlockReaderIdExt, StateProviderFactory};
use reth_testing_utils::generators;
use reth_transaction_pool::test_utils::{testing_pool, TestPool};
type FakeEthApi<P = MockEthProvider> = EthApi<
RpcNodeCoreAdapter<P, TestPool, NoopNetwork, EthEvmConfig>,
EthRpcConverter<ChainSpec>,
>;
fn build_test_eth_api<
P: BlockReaderIdExt<
Block = reth_ethereum_primitives::Block,
Receipt = reth_ethereum_primitives::Receipt,
Header = alloy_consensus::Header,
Transaction = reth_ethereum_primitives::TransactionSigned,
> + BlockReader
+ ChainSpecProvider<ChainSpec = ChainSpec>
+ StateProviderFactory
+ CanonStateSubscriptions<Primitives = reth_ethereum_primitives::EthPrimitives>
+ StageCheckpointReader
+ Unpin
+ Clone
+ 'static,
>(
provider: P,
) -> FakeEthApi<P> {
EthApiBuilder::new(
provider.clone(),
testing_pool(),
NoopNetwork::default(),
EthEvmConfig::new(provider.chain_spec()),
)
.build()
}
// Function to prepare the EthApi with mock data
fn prepare_eth_api(
newest_block: u64,
mut oldest_block: Option<B256>,
block_count: u64,
mock_provider: MockEthProvider,
) -> (FakeEthApi, Vec<u128>, Vec<f64>) {
let mut rng = generators::rng();
// Build mock data
let mut gas_used_ratios = Vec::with_capacity(block_count as usize);
let mut base_fees_per_gas = Vec::with_capacity(block_count as usize);
let mut last_header = None;
let mut parent_hash = B256::default();
for i in (0..block_count).rev() {
let hash = rng.random();
// Note: Generates saner values to avoid invalid overflows later
let gas_limit = rng.random::<u32>() as u64;
let base_fee_per_gas: Option<u64> =
rng.random::<bool>().then(|| rng.random::<u32>() as u64);
let gas_used = rng.random::<u32>() as u64;
let header = Header {
number: newest_block - i,
gas_limit,
gas_used,
base_fee_per_gas,
parent_hash,
..Default::default()
};
last_header = Some(header.clone());
parent_hash = hash;
const TOTAL_TRANSACTIONS: usize = 100;
let mut transactions = Vec::with_capacity(TOTAL_TRANSACTIONS);
for _ in 0..TOTAL_TRANSACTIONS {
let random_fee: u128 = rng.random();
if let Some(base_fee_per_gas) = header.base_fee_per_gas {
let transaction = TransactionSigned::new_unhashed(
reth_ethereum_primitives::Transaction::Eip1559(
alloy_consensus::TxEip1559 {
max_priority_fee_per_gas: random_fee,
max_fee_per_gas: random_fee + base_fee_per_gas as u128,
..Default::default()
},
),
Signature::test_signature(),
);
transactions.push(transaction);
} else {
let transaction = TransactionSigned::new_unhashed(
reth_ethereum_primitives::Transaction::Legacy(Default::default()),
Signature::test_signature(),
);
transactions.push(transaction);
}
}
mock_provider.add_block(
hash,
Block {
header: header.clone(),
body: BlockBody { transactions, ..Default::default() },
},
);
mock_provider.add_header(hash, header);
oldest_block.get_or_insert(hash);
gas_used_ratios.push(gas_used as f64 / gas_limit as f64);
base_fees_per_gas.push(base_fee_per_gas.map(|fee| fee as u128).unwrap_or_default());
}
// Add final base fee (for the next block outside of the request)
let last_header = last_header.unwrap();
let spec = mock_provider.chain_spec();
base_fees_per_gas.push(
spec.next_block_base_fee(&last_header, last_header.timestamp).unwrap_or_default()
as u128,
);
let eth_api = build_test_eth_api(mock_provider);
(eth_api, base_fees_per_gas, gas_used_ratios)
}
/// Invalid block range
#[tokio::test]
async fn test_fee_history_empty() {
let response = <EthApi<_, _> as EthApiServer<_, _, _, _, _>>::fee_history(
&build_test_eth_api(NoopProvider::default()),
U64::from(1),
BlockNumberOrTag::Latest,
None,
)
.await;
assert!(response.is_err());
let error_object = response.unwrap_err();
assert_eq!(error_object.code(), INVALID_PARAMS_CODE);
}
#[tokio::test]
/// Invalid block range (request is before genesis)
async fn test_fee_history_invalid_block_range_before_genesis() {
let block_count = 10;
let newest_block = 1337;
let oldest_block = None;
let (eth_api, _, _) =
prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default());
let response = <EthApi<_, _> as EthApiServer<_, _, _, _, _>>::fee_history(
ð_api,
U64::from(newest_block + 1),
newest_block.into(),
Some(vec![10.0]),
)
.await;
assert!(response.is_err());
let error_object = response.unwrap_err();
assert_eq!(error_object.code(), INVALID_PARAMS_CODE);
}
#[tokio::test]
/// Invalid block range (request is in the future)
async fn test_fee_history_invalid_block_range_in_future() {
let block_count = 10;
let newest_block = 1337;
let oldest_block = None;
let (eth_api, _, _) =
prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default());
let response = <EthApi<_, _> as EthApiServer<_, _, _, _, _>>::fee_history(
ð_api,
U64::from(1),
(newest_block + 1000).into(),
Some(vec![10.0]),
)
.await;
assert!(response.is_err());
let error_object = response.unwrap_err();
assert_eq!(error_object.code(), INVALID_PARAMS_CODE);
}
#[tokio::test]
/// Requesting no block should result in a default response
async fn test_fee_history_no_block_requested() {
let block_count = 10;
let newest_block = 1337;
let oldest_block = None;
let (eth_api, _, _) =
prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default());
let response = <EthApi<_, _> as EthApiServer<_, _, _, _, _>>::fee_history(
ð_api,
U64::from(0),
newest_block.into(),
None,
)
.await
.unwrap();
assert_eq!(
response,
FeeHistory::default(),
"none: requesting no block should yield a default response"
);
}
#[tokio::test]
/// Requesting a single block should return 1 block (+ base fee for the next block over)
async fn test_fee_history_single_block() {
let block_count = 10;
let newest_block = 1337;
let oldest_block = None;
let (eth_api, base_fees_per_gas, gas_used_ratios) =
prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default());
let fee_history =
eth_api.fee_history(U64::from(1), newest_block.into(), None).await.unwrap();
assert_eq!(
fee_history.base_fee_per_gas,
&base_fees_per_gas[base_fees_per_gas.len() - 2..],
"one: base fee per gas is incorrect"
);
assert_eq!(
fee_history.base_fee_per_gas.len(),
2,
"one: should return base fee of the next block as well"
);
assert_eq!(
&fee_history.gas_used_ratio,
&gas_used_ratios[gas_used_ratios.len() - 1..],
"one: gas used ratio is incorrect"
);
assert_eq!(fee_history.oldest_block, newest_block, "one: oldest block is incorrect");
assert!(
fee_history.reward.is_none(),
"one: no percentiles were requested, so there should be no rewards result"
);
}
/// Requesting all blocks should be ok
#[tokio::test]
async fn test_fee_history_all_blocks() {
let block_count = 10;
let newest_block = 1337;
let oldest_block = None;
let (eth_api, base_fees_per_gas, gas_used_ratios) =
prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default());
let fee_history =
eth_api.fee_history(U64::from(block_count), newest_block.into(), None).await.unwrap();
assert_eq!(
&fee_history.base_fee_per_gas, &base_fees_per_gas,
"all: base fee per gas is incorrect"
);
assert_eq!(
fee_history.base_fee_per_gas.len() as u64,
block_count + 1,
"all: should return base fee of the next block as well"
);
assert_eq!(
&fee_history.gas_used_ratio, &gas_used_ratios,
"all: gas used ratio is incorrect"
);
assert_eq!(
fee_history.oldest_block,
newest_block - block_count + 1,
"all: oldest block is incorrect"
);
assert!(
fee_history.reward.is_none(),
"all: no percentiles were requested, so there should be no rewards result"
);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc/src/eth/pubsub.rs | crates/rpc/rpc/src/eth/pubsub.rs | //! `eth_` `PubSub` RPC handler implementation
use std::sync::Arc;
use alloy_primitives::{TxHash, U256};
use alloy_rpc_types_eth::{
pubsub::{Params, PubSubSyncStatus, SubscriptionKind, SyncStatusMetadata},
Filter, Header, Log,
};
use futures::StreamExt;
use jsonrpsee::{
server::SubscriptionMessage, types::ErrorObject, PendingSubscriptionSink, SubscriptionSink,
};
use reth_chain_state::CanonStateSubscriptions;
use reth_network_api::NetworkInfo;
use reth_primitives_traits::NodePrimitives;
use reth_rpc_eth_api::{
pubsub::EthPubSubApiServer, EthApiTypes, RpcConvert, RpcNodeCore, RpcTransaction,
};
use reth_rpc_eth_types::logs_utils;
use reth_rpc_server_types::result::{internal_rpc_err, invalid_params_rpc_err};
use reth_storage_api::BlockNumReader;
use reth_tasks::{TaskSpawner, TokioTaskExecutor};
use reth_transaction_pool::{NewTransactionEvent, PoolConsensusTx, TransactionPool};
use serde::Serialize;
use tokio_stream::{
wrappers::{BroadcastStream, ReceiverStream},
Stream,
};
use tracing::error;
/// `Eth` pubsub RPC implementation.
///
/// This handles `eth_subscribe` RPC calls.
#[derive(Clone)]
pub struct EthPubSub<Eth> {
/// All nested fields bundled together.
inner: Arc<EthPubSubInner<Eth>>,
}
// === impl EthPubSub ===
impl<Eth> EthPubSub<Eth> {
/// Creates a new, shareable instance.
///
/// Subscription tasks are spawned via [`tokio::task::spawn`]
pub fn new(eth_api: Eth) -> Self {
Self::with_spawner(eth_api, Box::<TokioTaskExecutor>::default())
}
/// Creates a new, shareable instance.
pub fn with_spawner(eth_api: Eth, subscription_task_spawner: Box<dyn TaskSpawner>) -> Self {
let inner = EthPubSubInner { eth_api, subscription_task_spawner };
Self { inner: Arc::new(inner) }
}
}
impl<N: NodePrimitives, Eth> EthPubSub<Eth>
where
Eth: RpcNodeCore<
Provider: BlockNumReader + CanonStateSubscriptions<Primitives = N>,
Pool: TransactionPool,
Network: NetworkInfo,
> + EthApiTypes<
RpcConvert: RpcConvert<
Primitives: NodePrimitives<SignedTx = PoolConsensusTx<Eth::Pool>>,
>,
>,
{
/// Returns the current sync status for the `syncing` subscription
pub fn sync_status(&self, is_syncing: bool) -> PubSubSyncStatus {
self.inner.sync_status(is_syncing)
}
/// Returns a stream that yields all transaction hashes emitted by the txpool.
pub fn pending_transaction_hashes_stream(&self) -> impl Stream<Item = TxHash> {
self.inner.pending_transaction_hashes_stream()
}
/// Returns a stream that yields all transactions emitted by the txpool.
pub fn full_pending_transaction_stream(
&self,
) -> impl Stream<Item = NewTransactionEvent<<Eth::Pool as TransactionPool>::Transaction>> {
self.inner.full_pending_transaction_stream()
}
/// Returns a stream that yields all new RPC blocks.
pub fn new_headers_stream(&self) -> impl Stream<Item = Header<N::BlockHeader>> {
self.inner.new_headers_stream()
}
/// Returns a stream that yields all logs that match the given filter.
pub fn log_stream(&self, filter: Filter) -> impl Stream<Item = Log> {
self.inner.log_stream(filter)
}
/// The actual handler for an accepted [`EthPubSub::subscribe`] call.
pub async fn handle_accepted(
&self,
accepted_sink: SubscriptionSink,
kind: SubscriptionKind,
params: Option<Params>,
) -> Result<(), ErrorObject<'static>> {
match kind {
SubscriptionKind::NewHeads => {
pipe_from_stream(accepted_sink, self.new_headers_stream()).await
}
SubscriptionKind::Logs => {
// if no params are provided, used default filter params
let filter = match params {
Some(Params::Logs(filter)) => *filter,
Some(Params::Bool(_)) => {
return Err(invalid_params_rpc_err("Invalid params for logs"))
}
_ => Default::default(),
};
pipe_from_stream(accepted_sink, self.log_stream(filter)).await
}
SubscriptionKind::NewPendingTransactions => {
if let Some(params) = params {
match params {
Params::Bool(true) => {
// full transaction objects requested
let stream = self.full_pending_transaction_stream().filter_map(|tx| {
let tx_value = match self
.inner
.eth_api
.tx_resp_builder()
.fill_pending(tx.transaction.to_consensus())
{
Ok(tx) => Some(tx),
Err(err) => {
error!(target = "rpc",
%err,
"Failed to fill transaction with block context"
);
None
}
};
std::future::ready(tx_value)
});
return pipe_from_stream(accepted_sink, stream).await
}
Params::Bool(false) | Params::None => {
// only hashes requested
}
Params::Logs(_) => {
return Err(invalid_params_rpc_err(
"Invalid params for newPendingTransactions",
))
}
}
}
pipe_from_stream(accepted_sink, self.pending_transaction_hashes_stream()).await
}
SubscriptionKind::Syncing => {
// get new block subscription
let mut canon_state = BroadcastStream::new(
self.inner.eth_api.provider().subscribe_to_canonical_state(),
);
// get current sync status
let mut initial_sync_status = self.inner.eth_api.network().is_syncing();
let current_sub_res = self.sync_status(initial_sync_status);
// send the current status immediately
let msg = SubscriptionMessage::new(
accepted_sink.method_name(),
accepted_sink.subscription_id(),
¤t_sub_res,
)
.map_err(SubscriptionSerializeError::new)?;
if accepted_sink.send(msg).await.is_err() {
return Ok(())
}
while canon_state.next().await.is_some() {
let current_syncing = self.inner.eth_api.network().is_syncing();
// Only send a new response if the sync status has changed
if current_syncing != initial_sync_status {
// Update the sync status on each new block
initial_sync_status = current_syncing;
// send a new message now that the status changed
let sync_status = self.sync_status(current_syncing);
let msg = SubscriptionMessage::new(
accepted_sink.method_name(),
accepted_sink.subscription_id(),
&sync_status,
)
.map_err(SubscriptionSerializeError::new)?;
if accepted_sink.send(msg).await.is_err() {
break
}
}
}
Ok(())
}
}
}
}
#[async_trait::async_trait]
impl<Eth> EthPubSubApiServer<RpcTransaction<Eth::NetworkTypes>> for EthPubSub<Eth>
where
Eth: RpcNodeCore<
Provider: BlockNumReader + CanonStateSubscriptions,
Pool: TransactionPool,
Network: NetworkInfo,
> + EthApiTypes<
RpcConvert: RpcConvert<
Primitives: NodePrimitives<SignedTx = PoolConsensusTx<Eth::Pool>>,
>,
> + 'static,
{
/// Handler for `eth_subscribe`
async fn subscribe(
&self,
pending: PendingSubscriptionSink,
kind: SubscriptionKind,
params: Option<Params>,
) -> jsonrpsee::core::SubscriptionResult {
let sink = pending.accept().await?;
let pubsub = self.clone();
self.inner.subscription_task_spawner.spawn(Box::pin(async move {
let _ = pubsub.handle_accepted(sink, kind, params).await;
}));
Ok(())
}
}
/// Helper to convert a serde error into an [`ErrorObject`]
#[derive(Debug, thiserror::Error)]
#[error("Failed to serialize subscription item: {0}")]
pub struct SubscriptionSerializeError(#[from] serde_json::Error);
impl SubscriptionSerializeError {
const fn new(err: serde_json::Error) -> Self {
Self(err)
}
}
impl From<SubscriptionSerializeError> for ErrorObject<'static> {
fn from(value: SubscriptionSerializeError) -> Self {
internal_rpc_err(value.to_string())
}
}
/// Pipes all stream items to the subscription sink.
async fn pipe_from_stream<T, St>(
sink: SubscriptionSink,
mut stream: St,
) -> Result<(), ErrorObject<'static>>
where
St: Stream<Item = T> + Unpin,
T: Serialize,
{
loop {
tokio::select! {
_ = sink.closed() => {
// connection dropped
break Ok(())
},
maybe_item = stream.next() => {
let item = match maybe_item {
Some(item) => item,
None => {
// stream ended
break Ok(())
},
};
let msg = SubscriptionMessage::new(
sink.method_name(),
sink.subscription_id(),
&item
).map_err(SubscriptionSerializeError::new)?;
if sink.send(msg).await.is_err() {
break Ok(());
}
}
}
}
}
impl<Eth> std::fmt::Debug for EthPubSub<Eth> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("EthPubSub").finish_non_exhaustive()
}
}
/// Container type `EthPubSub`
#[derive(Clone)]
struct EthPubSubInner<EthApi> {
/// The `eth` API.
eth_api: EthApi,
/// The type that's used to spawn subscription tasks.
subscription_task_spawner: Box<dyn TaskSpawner>,
}
// == impl EthPubSubInner ===
impl<Eth> EthPubSubInner<Eth>
where
Eth: RpcNodeCore<Provider: BlockNumReader>,
{
/// Returns the current sync status for the `syncing` subscription
fn sync_status(&self, is_syncing: bool) -> PubSubSyncStatus {
if is_syncing {
let current_block = self
.eth_api
.provider()
.chain_info()
.map(|info| info.best_number)
.unwrap_or_default();
PubSubSyncStatus::Detailed(SyncStatusMetadata {
syncing: true,
starting_block: 0,
current_block,
highest_block: Some(current_block),
})
} else {
PubSubSyncStatus::Simple(false)
}
}
}
impl<Eth> EthPubSubInner<Eth>
where
Eth: RpcNodeCore<Pool: TransactionPool>,
{
/// Returns a stream that yields all transaction hashes emitted by the txpool.
fn pending_transaction_hashes_stream(&self) -> impl Stream<Item = TxHash> {
ReceiverStream::new(self.eth_api.pool().pending_transactions_listener())
}
/// Returns a stream that yields all transactions emitted by the txpool.
fn full_pending_transaction_stream(
&self,
) -> impl Stream<Item = NewTransactionEvent<<Eth::Pool as TransactionPool>::Transaction>> {
self.eth_api.pool().new_pending_pool_transactions_listener()
}
}
impl<N: NodePrimitives, Eth> EthPubSubInner<Eth>
where
Eth: RpcNodeCore<Provider: CanonStateSubscriptions<Primitives = N>>,
{
/// Returns a stream that yields all new RPC blocks.
fn new_headers_stream(&self) -> impl Stream<Item = Header<N::BlockHeader>> {
self.eth_api.provider().canonical_state_stream().flat_map(|new_chain| {
let headers = new_chain
.committed()
.blocks_iter()
.map(|block| {
Header::from_consensus(
block.clone_sealed_header().into(),
None,
Some(U256::from(block.rlp_length())),
)
})
.collect::<Vec<_>>();
futures::stream::iter(headers)
})
}
/// Returns a stream that yields all logs that match the given filter.
fn log_stream(&self, filter: Filter) -> impl Stream<Item = Log> {
BroadcastStream::new(self.eth_api.provider().subscribe_to_canonical_state())
.map(move |canon_state| {
canon_state.expect("new block subscription never ends").block_receipts()
})
.flat_map(futures::stream::iter)
.flat_map(move |(block_receipts, removed)| {
let all_logs = logs_utils::matching_block_logs_with_tx_hashes(
&filter,
block_receipts.block,
block_receipts.timestamp,
block_receipts.tx_receipts.iter().map(|(tx, receipt)| (*tx, receipt)),
removed,
);
futures::stream::iter(all_logs)
})
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc/src/eth/mod.rs | crates/rpc/rpc/src/eth/mod.rs | //! Server implementation of `eth` namespace API.
pub mod builder;
pub mod bundle;
pub mod core;
pub mod filter;
pub mod helpers;
pub mod pubsub;
pub mod sim_bundle;
/// Implementation of `eth` namespace API.
pub use builder::EthApiBuilder;
pub use bundle::EthBundle;
pub use core::{EthApi, EthApiFor};
pub use filter::EthFilter;
pub use pubsub::EthPubSub;
pub use helpers::{signer::DevSigner, sync_listener::SyncListener};
pub use reth_rpc_eth_api::{EthApiServer, EthApiTypes, FullEthApiServer, RpcNodeCore};
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc/src/eth/helpers/pending_block.rs | crates/rpc/rpc/src/eth/helpers/pending_block.rs | //! Support for building a pending block with transactions from local view of mempool.
use crate::EthApi;
use reth_rpc_convert::RpcConvert;
use reth_rpc_eth_api::{
helpers::{pending_block::PendingEnvBuilder, LoadPendingBlock},
FromEvmError, RpcNodeCore,
};
use reth_rpc_eth_types::{builder::config::PendingBlockKind, EthApiError, PendingBlock};
impl<N, Rpc> LoadPendingBlock for EthApi<N, Rpc>
where
N: RpcNodeCore,
EthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<Primitives = N::Primitives>,
{
#[inline]
fn pending_block(&self) -> &tokio::sync::Mutex<Option<PendingBlock<Self::Primitives>>> {
self.inner.pending_block()
}
#[inline]
fn pending_env_builder(&self) -> &dyn PendingEnvBuilder<Self::Evm> {
self.inner.pending_env_builder()
}
#[inline]
fn pending_block_kind(&self) -> PendingBlockKind {
self.inner.pending_block_kind()
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc/src/eth/helpers/signer.rs | crates/rpc/rpc/src/eth/helpers/signer.rs | //! An abstraction over ethereum signers.
use std::collections::HashMap;
use crate::EthApi;
use alloy_dyn_abi::TypedData;
use alloy_eips::eip2718::Decodable2718;
use alloy_primitives::{eip191_hash_message, Address, Signature, B256};
use alloy_signer::SignerSync;
use alloy_signer_local::PrivateKeySigner;
use reth_rpc_convert::{RpcConvert, RpcTypes, SignableTxRequest};
use reth_rpc_eth_api::{
helpers::{signer::Result, AddDevSigners, EthSigner},
FromEvmError, RpcNodeCore,
};
use reth_rpc_eth_types::{EthApiError, SignError};
use reth_storage_api::ProviderTx;
impl<N, Rpc> AddDevSigners for EthApi<N, Rpc>
where
N: RpcNodeCore,
EthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<
Network: RpcTypes<TransactionRequest: SignableTxRequest<ProviderTx<N::Provider>>>,
>,
{
fn with_dev_accounts(&self) {
*self.inner.signers().write() = DevSigner::random_signers(20)
}
}
/// Holds developer keys
#[derive(Debug, Clone)]
pub struct DevSigner {
addresses: Vec<Address>,
accounts: HashMap<Address, PrivateKeySigner>,
}
impl DevSigner {
/// Generates provided number of random dev signers
/// which satisfy [`EthSigner`] trait
pub fn random_signers<T: Decodable2718, TxReq: SignableTxRequest<T>>(
num: u32,
) -> Vec<Box<dyn EthSigner<T, TxReq> + 'static>> {
let mut signers = Vec::with_capacity(num as usize);
for _ in 0..num {
let sk = PrivateKeySigner::random();
let address = sk.address();
let addresses = vec![address];
let accounts = HashMap::from([(address, sk)]);
signers.push(Box::new(Self { addresses, accounts }) as Box<dyn EthSigner<T, TxReq>>);
}
signers
}
fn get_key(&self, account: Address) -> Result<&PrivateKeySigner> {
self.accounts.get(&account).ok_or(SignError::NoAccount)
}
fn sign_hash(&self, hash: B256, account: Address) -> Result<Signature> {
let signature = self.get_key(account)?.sign_hash_sync(&hash);
signature.map_err(|_| SignError::CouldNotSign)
}
}
#[async_trait::async_trait]
impl<T: Decodable2718, TxReq: SignableTxRequest<T>> EthSigner<T, TxReq> for DevSigner {
fn accounts(&self) -> Vec<Address> {
self.addresses.clone()
}
fn is_signer_for(&self, addr: &Address) -> bool {
self.accounts.contains_key(addr)
}
async fn sign(&self, address: Address, message: &[u8]) -> Result<Signature> {
// Hash message according to EIP 191:
// https://ethereum.org/es/developers/docs/apis/json-rpc/#eth_sign
let hash = eip191_hash_message(message);
self.sign_hash(hash, address)
}
async fn sign_transaction(&self, request: TxReq, address: &Address) -> Result<T> {
// create local signer wallet from signing key
let signer = self.accounts.get(address).ok_or(SignError::NoAccount)?.clone();
// build and sign transaction with signer
let tx = request
.try_build_and_sign(&signer)
.await
.map_err(|_| SignError::InvalidTransactionRequest)?;
Ok(tx)
}
fn sign_typed_data(&self, address: Address, payload: &TypedData) -> Result<Signature> {
let encoded = payload.eip712_signing_hash().map_err(|_| SignError::InvalidTypedData)?;
self.sign_hash(encoded, address)
}
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_consensus::Transaction;
use alloy_primitives::{Bytes, U256};
use alloy_rpc_types_eth::{TransactionInput, TransactionRequest};
use reth_ethereum_primitives::TransactionSigned;
use revm_primitives::TxKind;
fn build_signer() -> DevSigner {
let signer: PrivateKeySigner =
"4646464646464646464646464646464646464646464646464646464646464646".parse().unwrap();
let address = signer.address();
let accounts = HashMap::from([(address, signer)]);
let addresses = vec![address];
DevSigner { addresses, accounts }
}
#[tokio::test]
async fn test_sign_type_data() {
let eip_712_example = r#"{
"types": {
"EIP712Domain": [
{
"name": "name",
"type": "string"
},
{
"name": "version",
"type": "string"
},
{
"name": "chainId",
"type": "uint256"
},
{
"name": "verifyingContract",
"type": "address"
}
],
"Person": [
{
"name": "name",
"type": "string"
},
{
"name": "wallet",
"type": "address"
}
],
"Mail": [
{
"name": "from",
"type": "Person"
},
{
"name": "to",
"type": "Person"
},
{
"name": "contents",
"type": "string"
}
]
},
"primaryType": "Mail",
"domain": {
"name": "Ether Mail",
"version": "1",
"chainId": 1,
"verifyingContract": "0xCcCCccccCCCCcCCCCCCcCcCccCcCCCcCcccccccC"
},
"message": {
"from": {
"name": "Cow",
"wallet": "0xCD2a3d9F938E13CD947Ec05AbC7FE734Df8DD826"
},
"to": {
"name": "Bob",
"wallet": "0xbBbBBBBbbBBBbbbBbbBbbbbBBbBbbbbBbBbbBBbB"
},
"contents": "Hello, Bob!"
}
}"#;
let data: TypedData = serde_json::from_str(eip_712_example).unwrap();
let signer = build_signer();
let from = *signer.addresses.first().unwrap();
let sig = EthSigner::<reth_ethereum_primitives::TransactionSigned>::sign_typed_data(
&signer, from, &data,
)
.unwrap();
let expected = Signature::new(
U256::from_str_radix(
"5318aee9942b84885761bb20e768372b76e7ee454fc4d39b59ce07338d15a06c",
16,
)
.unwrap(),
U256::from_str_radix(
"5e585a2f4882ec3228a9303244798b47a9102e4be72f48159d890c73e4511d79",
16,
)
.unwrap(),
false,
);
assert_eq!(sig, expected)
}
#[tokio::test]
async fn test_signer() {
let message = b"Test message";
let signer = build_signer();
let from = *signer.addresses.first().unwrap();
let sig =
EthSigner::<reth_ethereum_primitives::TransactionSigned>::sign(&signer, from, message)
.await
.unwrap();
let expected = Signature::new(
U256::from_str_radix(
"54313da7432e4058b8d22491b2e7dbb19c7186c35c24155bec0820a8a2bfe0c1",
16,
)
.unwrap(),
U256::from_str_radix(
"687250f11a3d4435004c04a4cb60e846bc27997271d67f21c6c8170f17a25e10",
16,
)
.unwrap(),
true,
);
assert_eq!(sig, expected)
}
#[tokio::test]
async fn test_sign_transaction() {
let message = b"Test message";
let signer = build_signer();
let from = *signer.addresses.first().unwrap();
let request = TransactionRequest {
chain_id: Some(1u64),
from: Some(from),
to: Some(TxKind::Create),
gas: Some(1000),
gas_price: Some(1000u128),
value: Some(U256::from(1000)),
input: TransactionInput {
data: Some(Bytes::from(message.to_vec())),
input: Some(Bytes::from(message.to_vec())),
},
nonce: Some(0u64),
..Default::default()
};
let txn_signed: std::result::Result<TransactionSigned, SignError> =
signer.sign_transaction(request, &from).await;
assert!(txn_signed.is_ok());
assert_eq!(Bytes::from(message.to_vec()), txn_signed.unwrap().input().0);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc/src/eth/helpers/call.rs | crates/rpc/rpc/src/eth/helpers/call.rs | //! Contains RPC handler implementations specific to endpoints that call/execute within evm.
use crate::EthApi;
use reth_evm::{SpecFor, TxEnvFor};
use reth_rpc_convert::RpcConvert;
use reth_rpc_eth_api::{
helpers::{estimate::EstimateCall, Call, EthCall},
FromEvmError, RpcNodeCore,
};
use reth_rpc_eth_types::EthApiError;
impl<N, Rpc> EthCall for EthApi<N, Rpc>
where
N: RpcNodeCore,
EthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<
Primitives = N::Primitives,
Error = EthApiError,
TxEnv = TxEnvFor<N::Evm>,
Spec = SpecFor<N::Evm>,
>,
{
}
impl<N, Rpc> Call for EthApi<N, Rpc>
where
N: RpcNodeCore,
EthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<
Primitives = N::Primitives,
Error = EthApiError,
TxEnv = TxEnvFor<N::Evm>,
Spec = SpecFor<N::Evm>,
>,
{
#[inline]
fn call_gas_limit(&self) -> u64 {
self.inner.gas_cap()
}
#[inline]
fn max_simulate_blocks(&self) -> u64 {
self.inner.max_simulate_blocks()
}
}
impl<N, Rpc> EstimateCall for EthApi<N, Rpc>
where
N: RpcNodeCore,
EthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<
Primitives = N::Primitives,
Error = EthApiError,
TxEnv = TxEnvFor<N::Evm>,
Spec = SpecFor<N::Evm>,
>,
{
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc/src/eth/helpers/sync_listener.rs | crates/rpc/rpc/src/eth/helpers/sync_listener.rs | //! A utility Future to asynchronously wait until a node has finished syncing.
use futures::Stream;
use pin_project::pin_project;
use reth_network_api::NetworkInfo;
use std::{
future::Future,
pin::Pin,
task::{ready, Context, Poll},
};
/// This future resolves once the node is no longer syncing: [`NetworkInfo::is_syncing`].
#[must_use = "futures do nothing unless polled"]
#[pin_project]
#[derive(Debug)]
pub struct SyncListener<N, St> {
#[pin]
tick: St,
network_info: N,
}
impl<N, St> SyncListener<N, St> {
/// Create a new [`SyncListener`] using the given tick stream.
pub const fn new(network_info: N, tick: St) -> Self {
Self { tick, network_info }
}
}
impl<N, St, Out> Future for SyncListener<N, St>
where
N: NetworkInfo,
St: Stream<Item = Out> + Unpin,
{
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut this = self.project();
if !this.network_info.is_syncing() {
return Poll::Ready(());
}
loop {
let tick_event = ready!(this.tick.as_mut().poll_next(cx));
match tick_event {
Some(_) => {
if !this.network_info.is_syncing() {
return Poll::Ready(());
}
}
None => return Poll::Ready(()),
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_rpc_types_admin::EthProtocolInfo;
use futures::stream;
use reth_network_api::{NetworkError, NetworkStatus};
use std::{
net::{IpAddr, SocketAddr},
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
};
#[derive(Clone)]
struct TestNetwork {
syncing: Arc<AtomicBool>,
}
impl NetworkInfo for TestNetwork {
fn local_addr(&self) -> SocketAddr {
(IpAddr::from([0, 0, 0, 0]), 0).into()
}
async fn network_status(&self) -> Result<NetworkStatus, NetworkError> {
#[allow(deprecated)]
Ok(NetworkStatus {
client_version: "test".to_string(),
protocol_version: 5,
eth_protocol_info: EthProtocolInfo {
network: 1,
difficulty: None,
genesis: Default::default(),
config: Default::default(),
head: Default::default(),
},
capabilities: vec![],
})
}
fn chain_id(&self) -> u64 {
1
}
fn is_syncing(&self) -> bool {
self.syncing.load(Ordering::SeqCst)
}
fn is_initially_syncing(&self) -> bool {
self.is_syncing()
}
}
#[tokio::test]
async fn completes_immediately_if_not_syncing() {
let network = TestNetwork { syncing: Arc::new(AtomicBool::new(false)) };
let fut = SyncListener::new(network, stream::pending::<()>());
fut.await;
}
#[tokio::test]
async fn resolves_when_syncing_stops() {
use tokio::sync::mpsc::unbounded_channel;
use tokio_stream::wrappers::UnboundedReceiverStream;
let syncing = Arc::new(AtomicBool::new(true));
let network = TestNetwork { syncing: syncing.clone() };
let (tx, rx) = unbounded_channel();
let listener = SyncListener::new(network, UnboundedReceiverStream::new(rx));
let handle = tokio::spawn(listener);
syncing.store(false, Ordering::Relaxed);
let _ = tx.send(());
handle.await.unwrap();
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc/src/eth/helpers/trace.rs | crates/rpc/rpc/src/eth/helpers/trace.rs | //! Contains RPC handler implementations specific to tracing.
use reth_rpc_convert::RpcConvert;
use reth_rpc_eth_api::{helpers::Trace, FromEvmError, RpcNodeCore};
use reth_rpc_eth_types::EthApiError;
use crate::EthApi;
impl<N, Rpc> Trace for EthApi<N, Rpc>
where
N: RpcNodeCore,
EthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<Primitives = N::Primitives>,
{
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc/src/eth/helpers/state.rs | crates/rpc/rpc/src/eth/helpers/state.rs | //! Contains RPC handler implementations specific to state.
use crate::EthApi;
use reth_rpc_convert::RpcConvert;
use reth_rpc_eth_api::{
helpers::{EthState, LoadPendingBlock, LoadState},
RpcNodeCore,
};
impl<N, Rpc> EthState for EthApi<N, Rpc>
where
N: RpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives>,
Self: LoadPendingBlock,
{
fn max_proof_window(&self) -> u64 {
self.inner.eth_proof_window()
}
}
impl<N, Rpc> LoadState for EthApi<N, Rpc>
where
N: RpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives>,
Self: LoadPendingBlock,
{
}
#[cfg(test)]
mod tests {
use crate::eth::helpers::types::EthRpcConverter;
use super::*;
use alloy_primitives::{Address, StorageKey, U256};
use reth_chainspec::ChainSpec;
use reth_evm_ethereum::EthEvmConfig;
use reth_network_api::noop::NoopNetwork;
use reth_provider::{
test_utils::{ExtendedAccount, MockEthProvider, NoopProvider},
ChainSpecProvider,
};
use reth_rpc_eth_api::{helpers::EthState, node::RpcNodeCoreAdapter};
use reth_transaction_pool::test_utils::{testing_pool, TestPool};
use std::collections::HashMap;
use revm::state::FlaggedStorage;
fn noop_eth_api() -> EthApi<
RpcNodeCoreAdapter<NoopProvider, TestPool, NoopNetwork, EthEvmConfig>,
EthRpcConverter<ChainSpec>,
> {
let provider = NoopProvider::default();
let pool = testing_pool();
let evm_config = EthEvmConfig::mainnet();
EthApi::builder(provider, pool, NoopNetwork::default(), evm_config).build()
}
fn mock_eth_api(
accounts: HashMap<Address, ExtendedAccount>,
) -> EthApi<
RpcNodeCoreAdapter<MockEthProvider, TestPool, NoopNetwork, EthEvmConfig>,
EthRpcConverter<ChainSpec>,
> {
let pool = testing_pool();
let mock_provider = MockEthProvider::default();
let evm_config = EthEvmConfig::new(mock_provider.chain_spec());
mock_provider.extend_accounts(accounts);
EthApi::builder(mock_provider, pool, NoopNetwork::default(), evm_config).build()
}
#[tokio::test]
async fn test_storage() {
// === Noop ===
let eth_api = noop_eth_api();
let address = Address::random();
let storage = eth_api.storage_at(address, U256::ZERO.into(), None).await.unwrap();
assert_eq!(storage, U256::ZERO.to_be_bytes());
// === Mock ===
let storage_value = FlaggedStorage::new_from_value(1337);
let storage_key = StorageKey::random();
let storage = HashMap::from([(storage_key, storage_value)]);
let accounts =
HashMap::from([(address, ExtendedAccount::new(0, U256::ZERO).extend_storage(storage))]);
let eth_api = mock_eth_api(accounts);
let storage_key: U256 = storage_key.into();
let storage = eth_api.storage_at(address, storage_key.into(), None).await.unwrap();
assert_eq!(storage, storage_value.value.to_be_bytes());
}
#[tokio::test]
async fn test_get_account_missing() {
let eth_api = noop_eth_api();
let address = Address::random();
let account = eth_api.get_account(address, Default::default()).await.unwrap();
assert!(account.is_none());
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc/src/eth/helpers/block.rs | crates/rpc/rpc/src/eth/helpers/block.rs | //! Contains RPC handler implementations specific to blocks.
use reth_rpc_convert::RpcConvert;
use reth_rpc_eth_api::{
helpers::{EthBlocks, LoadBlock, LoadPendingBlock},
FromEvmError, RpcNodeCore,
};
use reth_rpc_eth_types::EthApiError;
use crate::EthApi;
impl<N, Rpc> EthBlocks for EthApi<N, Rpc>
where
N: RpcNodeCore,
EthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{
}
impl<N, Rpc> LoadBlock for EthApi<N, Rpc>
where
Self: LoadPendingBlock,
N: RpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc/src/eth/helpers/receipt.rs | crates/rpc/rpc/src/eth/helpers/receipt.rs | //! Builds an RPC receipt response w.r.t. data layout of network.
use crate::EthApi;
use reth_rpc_convert::RpcConvert;
use reth_rpc_eth_api::{helpers::LoadReceipt, FromEvmError, RpcNodeCore};
use reth_rpc_eth_types::EthApiError;
impl<N, Rpc> LoadReceipt for EthApi<N, Rpc>
where
N: RpcNodeCore,
EthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc/src/eth/helpers/types.rs | crates/rpc/rpc/src/eth/helpers/types.rs | //! L1 `eth` API types.
use alloy_network::Ethereum;
use reth_evm_ethereum::EthEvmConfig;
use reth_rpc_convert::RpcConverter;
use reth_rpc_eth_types::receipt::EthReceiptConverter;
/// An [`RpcConverter`] with its generics set to Ethereum specific.
pub type EthRpcConverter<ChainSpec> =
RpcConverter<Ethereum, EthEvmConfig, EthReceiptConverter<ChainSpec>>;
//tests for simulate
#[cfg(test)]
mod tests {
use super::*;
use alloy_consensus::{Transaction, TxType};
use alloy_rpc_types_eth::TransactionRequest;
use reth_chainspec::MAINNET;
use reth_rpc_eth_types::simulate::resolve_transaction;
use revm::database::CacheDB;
#[test]
fn test_resolve_transaction_empty_request() {
let builder = EthRpcConverter::new(EthReceiptConverter::new(MAINNET.clone()));
let mut db = CacheDB::<reth_revm::db::EmptyDBTyped<reth_errors::ProviderError>>::default();
let tx = TransactionRequest::default();
let result = resolve_transaction(tx, 21000, 0, 1, &mut db, &builder).unwrap();
// For an empty request, we should get a valid transaction with defaults
let tx = result.into_inner();
assert_eq!(tx.max_fee_per_gas(), 0);
assert_eq!(tx.max_priority_fee_per_gas(), Some(0));
assert_eq!(tx.gas_price(), None);
}
#[test]
fn test_resolve_transaction_legacy() {
let mut db = CacheDB::<reth_revm::db::EmptyDBTyped<reth_errors::ProviderError>>::default();
let builder = EthRpcConverter::new(EthReceiptConverter::new(MAINNET.clone()));
let tx = TransactionRequest { gas_price: Some(100), ..Default::default() };
let tx = resolve_transaction(tx, 21000, 0, 1, &mut db, &builder).unwrap();
assert_eq!(tx.tx_type(), TxType::Legacy);
let tx = tx.into_inner();
assert_eq!(tx.gas_price(), Some(100));
assert_eq!(tx.max_priority_fee_per_gas(), None);
}
#[test]
fn test_resolve_transaction_partial_eip1559() {
let mut db = CacheDB::<reth_revm::db::EmptyDBTyped<reth_errors::ProviderError>>::default();
let rpc_converter = EthRpcConverter::new(EthReceiptConverter::new(MAINNET.clone()));
let tx = TransactionRequest {
max_fee_per_gas: Some(200),
max_priority_fee_per_gas: Some(10),
..Default::default()
};
let result = resolve_transaction(tx, 21000, 0, 1, &mut db, &rpc_converter).unwrap();
assert_eq!(result.tx_type(), TxType::Eip1559);
let tx = result.into_inner();
assert_eq!(tx.max_fee_per_gas(), 200);
assert_eq!(tx.max_priority_fee_per_gas(), Some(10));
assert_eq!(tx.gas_price(), None);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc/src/eth/helpers/mod.rs | crates/rpc/rpc/src/eth/helpers/mod.rs | //! The entire implementation of the namespace is quite large, hence it is divided across several
//! files.
pub mod signer;
pub mod sync_listener;
pub mod types;
mod block;
mod call;
mod fees;
mod pending_block;
mod receipt;
mod spec;
mod state;
mod trace;
mod transaction;
pub use sync_listener::SyncListener;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc/src/eth/helpers/transaction.rs | crates/rpc/rpc/src/eth/helpers/transaction.rs | //! Contains RPC handler implementations specific to transactions
use crate::EthApi;
use alloy_primitives::{hex, Bytes, B256};
use reth_rpc_convert::RpcConvert;
use reth_rpc_eth_api::{
helpers::{spec::SignersForRpc, EthTransactions, LoadTransaction},
FromEvmError, RpcNodeCore,
};
use reth_rpc_eth_types::{utils::recover_raw_transaction, EthApiError};
use reth_transaction_pool::{AddedTransactionOutcome, PoolTransaction, TransactionPool};
impl<N, Rpc> EthTransactions for EthApi<N, Rpc>
where
N: RpcNodeCore,
EthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{
#[inline]
fn signers(&self) -> &SignersForRpc<Self::Provider, Self::NetworkTypes> {
self.inner.signers()
}
/// Decodes and recovers the transaction and submits it to the pool.
///
/// Returns the hash of the transaction.
async fn send_raw_transaction(&self, tx: Bytes) -> Result<B256, Self::Error> {
let recovered = recover_raw_transaction(&tx)?;
let pool_transaction = <Self::Pool as TransactionPool>::Transaction::from_pooled(recovered);
// forward the transaction to the specific endpoint if configured.
if let Some(client) = self.raw_tx_forwarder() {
tracing::debug!(target: "rpc::eth", hash = %pool_transaction.hash(), "forwarding raw transaction to forwarder");
let rlp_hex = hex::encode_prefixed(&tx);
// broadcast raw transaction to subscribers if there is any.
self.broadcast_raw_transaction(tx);
let hash =
client.request("eth_sendRawTransaction", (rlp_hex,)).await.inspect_err(|err| {
tracing::debug!(target: "rpc::eth", %err, hash=% *pool_transaction.hash(), "failed to forward raw transaction");
}).map_err(EthApiError::other)?;
// Retain tx in local tx pool after forwarding, for local RPC usage.
let _ = self.inner.add_pool_transaction(pool_transaction).await;
return Ok(hash);
}
// broadcast raw transaction to subscribers if there is any.
self.broadcast_raw_transaction(tx);
// submit the transaction to the pool with a `Local` origin
let AddedTransactionOutcome { hash, .. } =
self.inner.add_pool_transaction(pool_transaction).await?;
Ok(hash)
}
}
impl<N, Rpc> LoadTransaction for EthApi<N, Rpc>
where
N: RpcNodeCore,
EthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_primitives::{hex_literal::hex, Bytes};
use reth_chainspec::ChainSpecProvider;
use reth_evm_ethereum::EthEvmConfig;
use reth_network_api::noop::NoopNetwork;
use reth_provider::test_utils::NoopProvider;
use reth_rpc_eth_api::helpers::EthTransactions;
use reth_transaction_pool::{test_utils::testing_pool, TransactionPool};
#[tokio::test]
async fn send_raw_transaction() {
let noop_provider = NoopProvider::default();
let noop_network_provider = NoopNetwork::default();
let pool = testing_pool();
let evm_config = EthEvmConfig::new(noop_provider.chain_spec());
let eth_api =
EthApi::builder(noop_provider.clone(), pool.clone(), noop_network_provider, evm_config)
.build();
// https://etherscan.io/tx/0xa694b71e6c128a2ed8e2e0f6770bddbe52e3bb8f10e8472f9a79ab81497a8b5d
let tx_1 = Bytes::from(hex!(
"02f871018303579880850555633d1b82520894eee27662c2b8eba3cd936a23f039f3189633e4c887ad591c62bdaeb180c080a07ea72c68abfb8fca1bd964f0f99132ed9280261bdca3e549546c0205e800f7d0a05b4ef3039e9c9b9babc179a1878fb825b5aaf5aed2fa8744854150157b08d6f3"
));
let tx_1_result = eth_api.send_raw_transaction(tx_1).await.unwrap();
assert_eq!(
pool.len(),
1,
"expect 1 transaction in the pool, but pool size is {}",
pool.len()
);
// https://etherscan.io/tx/0x48816c2f32c29d152b0d86ff706f39869e6c1f01dc2fe59a3c1f9ecf39384694
let tx_2 = Bytes::from(hex!(
"02f9043c018202b7843b9aca00850c807d37a08304d21d94ef1c6e67703c7bd7107eed8303fbe6ec2554bf6b881bc16d674ec80000b903c43593564c000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000063e2d99f00000000000000000000000000000000000000000000000000000000000000030b000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000001e0000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000001bc16d674ec80000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000065717fe021ea67801d1088cc80099004b05b64600000000000000000000000000000000000000000000000001bc16d674ec80000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002bc02aaa39b223fe8d0a0e5c4f27ead9083c756cc20001f4a0b86991c6218b36c1d19d4a2e9eb0ce3606eb480000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000009e95fd5965fd1f1a6f0d4600000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000a0b86991c6218b36c1d19d4a2e9eb0ce3606eb48000000000000000000000000428dca9537116148616a5a3e44035af17238fe9dc080a0c6ec1e41f5c0b9511c49b171ad4e04c6bb419c74d99fe9891d74126ec6e4e879a032069a753d7a2cfa158df95421724d24c0e9501593c09905abf3699b4a4405ce"
));
let tx_2_result = eth_api.send_raw_transaction(tx_2).await.unwrap();
assert_eq!(
pool.len(),
2,
"expect 2 transactions in the pool, but pool size is {}",
pool.len()
);
assert!(pool.get(&tx_1_result).is_some(), "tx1 not found in the pool");
assert!(pool.get(&tx_2_result).is_some(), "tx2 not found in the pool");
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc/src/eth/helpers/spec.rs | crates/rpc/rpc/src/eth/helpers/spec.rs | use alloy_primitives::U256;
use reth_rpc_convert::RpcConvert;
use reth_rpc_eth_api::{
helpers::{spec::SignersForApi, EthApiSpec},
RpcNodeCore,
};
use reth_storage_api::ProviderTx;
use crate::EthApi;
impl<N, Rpc> EthApiSpec for EthApi<N, Rpc>
where
N: RpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives>,
{
type Transaction = ProviderTx<N::Provider>;
type Rpc = Rpc::Network;
fn starting_block(&self) -> U256 {
self.inner.starting_block()
}
fn signers(&self) -> &SignersForApi<Self> {
self.inner.signers()
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc/src/eth/helpers/fees.rs | crates/rpc/rpc/src/eth/helpers/fees.rs | //! Contains RPC handler implementations for fee history.
use reth_rpc_convert::RpcConvert;
use reth_rpc_eth_api::{
helpers::{EthFees, LoadFee},
FromEvmError, RpcNodeCore,
};
use reth_rpc_eth_types::{EthApiError, FeeHistoryCache, GasPriceOracle};
use reth_storage_api::ProviderHeader;
use crate::EthApi;
impl<N, Rpc> EthFees for EthApi<N, Rpc>
where
N: RpcNodeCore,
EthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{
}
impl<N, Rpc> LoadFee for EthApi<N, Rpc>
where
N: RpcNodeCore,
EthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<Primitives = N::Primitives, Error = EthApiError>,
{
#[inline]
fn gas_oracle(&self) -> &GasPriceOracle<Self::Provider> {
self.inner.gas_oracle()
}
#[inline]
fn fee_history_cache(&self) -> &FeeHistoryCache<ProviderHeader<N::Provider>> {
self.inner.fee_history_cache()
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-eth-types/src/pending_block.rs | crates/rpc/rpc-eth-types/src/pending_block.rs | //! Helper types for `reth_rpc_eth_api::EthApiServer` implementation.
//!
//! Types used in block building.
use std::{sync::Arc, time::Instant};
use alloy_consensus::BlockHeader;
use alloy_eips::{BlockId, BlockNumberOrTag};
use alloy_primitives::{BlockHash, B256};
use derive_more::Constructor;
use reth_chain_state::{
BlockState, ExecutedBlock, ExecutedBlockWithTrieUpdates, ExecutedTrieUpdates,
};
use reth_ethereum_primitives::Receipt;
use reth_evm::EvmEnv;
use reth_primitives_traits::{Block, NodePrimitives, RecoveredBlock, SealedHeader};
/// Configured [`EvmEnv`] for a pending block.
#[derive(Debug, Clone, Constructor)]
pub struct PendingBlockEnv<B: Block, R, Spec> {
/// Configured [`EvmEnv`] for the pending block.
pub evm_env: EvmEnv<Spec>,
/// Origin block for the config
pub origin: PendingBlockEnvOrigin<B, R>,
}
/// The origin for a configured [`PendingBlockEnv`]
#[derive(Clone, Debug)]
pub enum PendingBlockEnvOrigin<B: Block = reth_ethereum_primitives::Block, R = Receipt> {
/// The pending block as received from the CL.
ActualPending(Arc<RecoveredBlock<B>>, Arc<Vec<R>>),
/// The _modified_ header of the latest block.
///
/// This derives the pending state based on the latest header by modifying:
/// - the timestamp
/// - the block number
/// - fees
DerivedFromLatest(SealedHeader<B::Header>),
}
impl<B: Block, R> PendingBlockEnvOrigin<B, R> {
/// Returns true if the origin is the actual pending block as received from the CL.
pub const fn is_actual_pending(&self) -> bool {
matches!(self, Self::ActualPending(_, _))
}
/// Consumes the type and returns the actual pending block.
pub fn into_actual_pending(self) -> Option<Arc<RecoveredBlock<B>>> {
match self {
Self::ActualPending(block, _) => Some(block),
_ => None,
}
}
/// Returns the [`BlockId`] that represents the state of the block.
///
/// If this is the actual pending block, the state is the "Pending" tag, otherwise we can safely
/// identify the block by its hash (latest block).
pub fn state_block_id(&self) -> BlockId {
match self {
Self::ActualPending(_, _) => BlockNumberOrTag::Pending.into(),
Self::DerivedFromLatest(latest) => BlockId::Hash(latest.hash().into()),
}
}
/// Returns the hash of the block the pending block should be built on.
///
/// For the [`PendingBlockEnvOrigin::ActualPending`] this is the parent hash of the block.
/// For the [`PendingBlockEnvOrigin::DerivedFromLatest`] this is the hash of the _latest_
/// header.
pub fn build_target_hash(&self) -> B256 {
match self {
Self::ActualPending(block, _) => block.header().parent_hash(),
Self::DerivedFromLatest(latest) => latest.hash(),
}
}
}
/// A type alias for an [`Arc`] wrapped [`RecoveredBlock`].
pub type PendingRecoveredBlock<N> = Arc<RecoveredBlock<<N as NodePrimitives>::Block>>;
/// A type alias for an [`Arc`] wrapped vector of [`NodePrimitives::Receipt`].
pub type PendingBlockReceipts<N> = Arc<Vec<<N as NodePrimitives>::Receipt>>;
/// A type alias for a pair of an [`Arc`] wrapped [`RecoveredBlock`] and a vector of
/// [`NodePrimitives::Receipt`].
pub type PendingBlockAndReceipts<N> = (PendingRecoveredBlock<N>, PendingBlockReceipts<N>);
/// Locally built pending block for `pending` tag.
#[derive(Debug, Clone, Constructor)]
pub struct PendingBlock<N: NodePrimitives> {
/// Timestamp when the pending block is considered outdated.
pub expires_at: Instant,
/// The receipts for the pending block
pub receipts: PendingBlockReceipts<N>,
/// The locally built pending block with execution output.
pub executed_block: ExecutedBlock<N>,
}
impl<N: NodePrimitives> PendingBlock<N> {
/// Creates a new instance of [`PendingBlock`] with `executed_block` as its output that should
/// not be used past `expires_at`.
pub fn with_executed_block(expires_at: Instant, executed_block: ExecutedBlock<N>) -> Self {
Self {
expires_at,
receipts: Arc::new(
executed_block.execution_output.receipts.iter().flatten().cloned().collect(),
),
executed_block,
}
}
/// Returns the locally built pending [`RecoveredBlock`].
pub const fn block(&self) -> &PendingRecoveredBlock<N> {
&self.executed_block.recovered_block
}
/// Converts this [`PendingBlock`] into a pair of [`RecoveredBlock`] and a vector of
/// [`NodePrimitives::Receipt`]s, taking self.
pub fn into_block_and_receipts(self) -> PendingBlockAndReceipts<N> {
(self.executed_block.recovered_block, self.receipts)
}
/// Returns a pair of [`RecoveredBlock`] and a vector of [`NodePrimitives::Receipt`]s by
/// cloning from borrowed self.
pub fn to_block_and_receipts(&self) -> PendingBlockAndReceipts<N> {
(self.executed_block.recovered_block.clone(), self.receipts.clone())
}
/// Returns a hash of the parent block for this `executed_block`.
pub fn parent_hash(&self) -> BlockHash {
self.executed_block.recovered_block().parent_hash()
}
}
impl<N: NodePrimitives> From<PendingBlock<N>> for BlockState<N> {
fn from(pending_block: PendingBlock<N>) -> Self {
Self::new(ExecutedBlockWithTrieUpdates::<N>::new(
pending_block.executed_block.recovered_block,
pending_block.executed_block.execution_output,
pending_block.executed_block.hashed_state,
ExecutedTrieUpdates::Missing,
))
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-eth-types/src/lib.rs | crates/rpc/rpc-eth-types/src/lib.rs | //! Reth RPC server types, used in server implementation of `eth` namespace API.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
pub mod builder;
pub mod cache;
pub mod error;
pub mod fee_history;
pub mod gas_oracle;
pub mod id_provider;
pub mod logs_utils;
pub mod pending_block;
pub mod receipt;
pub mod simulate;
pub mod transaction;
pub mod tx_forward;
pub mod utils;
pub use builder::config::{EthConfig, EthFilterConfig};
pub use cache::{
config::EthStateCacheConfig, db::StateCacheDb, multi_consumer::MultiConsumerLruCache,
EthStateCache,
};
pub use error::{EthApiError, EthResult, RevertError, RpcInvalidTransactionError, SignError};
pub use fee_history::{FeeHistoryCache, FeeHistoryCacheConfig, FeeHistoryEntry};
pub use gas_oracle::{
GasCap, GasPriceOracle, GasPriceOracleConfig, GasPriceOracleResult, RPC_DEFAULT_GAS_CAP,
};
pub use id_provider::EthSubscriptionIdProvider;
pub use pending_block::{PendingBlock, PendingBlockEnv, PendingBlockEnvOrigin};
pub use transaction::TransactionSource;
pub use tx_forward::ForwardConfig;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-eth-types/src/id_provider.rs | crates/rpc/rpc-eth-types/src/id_provider.rs | //! Helper type for `reth_rpc_eth_api::EthPubSubApiServer` implementation.
//!
//! Generates IDs for tracking subscriptions.
use std::fmt::Write;
use jsonrpsee_types::SubscriptionId;
/// An [`IdProvider`](jsonrpsee_core::traits::IdProvider) for ethereum subscription ids.
///
/// Returns new hex-string [QUANTITY](https://ethereum.org/en/developers/docs/apis/json-rpc/#quantities-encoding) ids
#[derive(Debug, Clone, Copy, Default)]
#[non_exhaustive]
pub struct EthSubscriptionIdProvider;
impl jsonrpsee_core::traits::IdProvider for EthSubscriptionIdProvider {
fn next_id(&self) -> SubscriptionId<'static> {
to_quantity(rand::random::<u128>())
}
}
/// Returns a hex quantity string for the given value
///
/// Strips all leading zeros, `0` is returned as `0x0`
#[inline(always)]
fn to_quantity(val: u128) -> SubscriptionId<'static> {
let bytes = val.to_be_bytes();
let b = bytes.as_slice();
let non_zero = b.iter().take_while(|b| **b == 0).count();
let b = &b[non_zero..];
if b.is_empty() {
return SubscriptionId::Str("0x0".into())
}
let mut id = String::with_capacity(2 * b.len() + 2);
id.push_str("0x");
let first_byte = b[0];
write!(id, "{first_byte:x}").unwrap();
for byte in &b[1..] {
write!(id, "{byte:02x}").unwrap();
}
id.into()
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_primitives::U128;
#[test]
fn test_id_provider_quantity() {
let id = to_quantity(0);
assert_eq!(id, SubscriptionId::Str("0x0".into()));
let id = to_quantity(1);
assert_eq!(id, SubscriptionId::Str("0x1".into()));
for _ in 0..1000 {
let val = rand::random::<u128>();
let id = to_quantity(val);
match id {
SubscriptionId::Str(id) => {
let from_hex: U128 = id.parse().unwrap();
assert_eq!(from_hex, U128::from(val));
}
SubscriptionId::Num(_) => {
unreachable!()
}
}
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-eth-types/src/logs_utils.rs | crates/rpc/rpc-eth-types/src/logs_utils.rs | //! Helper functions for `reth_rpc_eth_api::EthFilterApiServer` implementation.
//!
//! Log parsing for building filter.
use alloy_consensus::TxReceipt;
use alloy_eips::{eip2718::Encodable2718, BlockNumHash};
use alloy_primitives::TxHash;
use alloy_rpc_types_eth::{Filter, Log};
use reth_chainspec::ChainInfo;
use reth_errors::ProviderError;
use reth_primitives_traits::{BlockBody, RecoveredBlock, SignedTransaction};
use reth_storage_api::{BlockReader, ProviderBlock};
use std::sync::Arc;
/// Returns all matching of a block's receipts when the transaction hashes are known.
pub fn matching_block_logs_with_tx_hashes<'a, I, R>(
filter: &Filter,
block_num_hash: BlockNumHash,
block_timestamp: u64,
tx_hashes_and_receipts: I,
removed: bool,
) -> Vec<Log>
where
I: IntoIterator<Item = (TxHash, &'a R)>,
R: TxReceipt<Log = alloy_primitives::Log> + 'a,
{
if !filter.matches_block(&block_num_hash) {
return vec![];
}
let mut all_logs = Vec::new();
// Tracks the index of a log in the entire block.
let mut log_index: u64 = 0;
// Iterate over transaction hashes and receipts and append matching logs.
for (receipt_idx, (tx_hash, receipt)) in tx_hashes_and_receipts.into_iter().enumerate() {
for log in receipt.logs() {
if filter.matches(log) {
let log = Log {
inner: log.clone(),
block_hash: Some(block_num_hash.hash),
block_number: Some(block_num_hash.number),
transaction_hash: Some(tx_hash),
// The transaction and receipt index is always the same.
transaction_index: Some(receipt_idx as u64),
log_index: Some(log_index),
removed,
block_timestamp: Some(block_timestamp),
};
all_logs.push(log);
}
log_index += 1;
}
}
all_logs
}
/// Helper enum to fetch a transaction either from a block or from the provider.
#[derive(Debug)]
pub enum ProviderOrBlock<'a, P: BlockReader> {
/// Provider
Provider(&'a P),
/// [`RecoveredBlock`]
Block(Arc<RecoveredBlock<ProviderBlock<P>>>),
}
/// Appends all matching logs of a block's receipts.
/// If the log matches, look up the corresponding transaction hash.
pub fn append_matching_block_logs<P>(
all_logs: &mut Vec<Log>,
provider_or_block: ProviderOrBlock<'_, P>,
filter: &Filter,
block_num_hash: BlockNumHash,
receipts: &[P::Receipt],
removed: bool,
block_timestamp: u64,
) -> Result<(), ProviderError>
where
P: BlockReader<Transaction: SignedTransaction>,
{
// Tracks the index of a log in the entire block.
let mut log_index: u64 = 0;
// Lazy loaded number of the first transaction in the block.
// This is useful for blocks with multiple matching logs because it
// prevents re-querying the block body indices.
let mut loaded_first_tx_num = None;
// Iterate over receipts and append matching logs.
for (receipt_idx, receipt) in receipts.iter().enumerate() {
// The transaction hash of the current receipt.
let mut transaction_hash = None;
for log in receipt.logs() {
if filter.matches(log) {
// if this is the first match in the receipt's logs, look up the transaction hash
if transaction_hash.is_none() {
transaction_hash = match &provider_or_block {
ProviderOrBlock::Block(block) => {
block.body().transactions().get(receipt_idx).map(|t| t.trie_hash())
}
ProviderOrBlock::Provider(provider) => {
let first_tx_num = match loaded_first_tx_num {
Some(num) => num,
None => {
let block_body_indices = provider
.block_body_indices(block_num_hash.number)?
.ok_or(ProviderError::BlockBodyIndicesNotFound(
block_num_hash.number,
))?;
loaded_first_tx_num = Some(block_body_indices.first_tx_num);
block_body_indices.first_tx_num
}
};
// This is safe because Transactions and Receipts have the same
// keys.
let transaction_id = first_tx_num + receipt_idx as u64;
let transaction =
provider.transaction_by_id(transaction_id)?.ok_or_else(|| {
ProviderError::TransactionNotFound(transaction_id.into())
})?;
Some(transaction.trie_hash())
}
};
}
let log = Log {
inner: log.clone(),
block_hash: Some(block_num_hash.hash),
block_number: Some(block_num_hash.number),
transaction_hash,
// The transaction and receipt index is always the same.
transaction_index: Some(receipt_idx as u64),
log_index: Some(log_index),
removed,
block_timestamp: Some(block_timestamp),
};
all_logs.push(log);
}
log_index += 1;
}
}
Ok(())
}
/// Computes the block range based on the filter range and current block numbers
pub fn get_filter_block_range(
from_block: Option<u64>,
to_block: Option<u64>,
start_block: u64,
info: ChainInfo,
) -> (u64, u64) {
let mut from_block_number = start_block;
let mut to_block_number = info.best_number;
// if a `from_block` argument is provided then the `from_block_number` is the converted value or
// the start block if the converted value is larger than the start block, since `from_block`
// can't be a future block: `min(head, from_block)`
if let Some(filter_from_block) = from_block {
from_block_number = start_block.min(filter_from_block)
}
// upper end of the range is the converted `to_block` argument, restricted by the best block:
// `min(best_number,to_block_number)`
if let Some(filter_to_block) = to_block {
to_block_number = info.best_number.min(filter_to_block);
}
(from_block_number, to_block_number)
}
#[cfg(test)]
mod tests {
use alloy_rpc_types_eth::Filter;
use super::*;
#[test]
fn test_log_range_from_and_to() {
let from = 14000000u64;
let to = 14000100u64;
let info = ChainInfo { best_number: 15000000, ..Default::default() };
let range = get_filter_block_range(Some(from), Some(to), info.best_number, info);
assert_eq!(range, (from, to));
}
#[test]
fn test_log_range_higher() {
let from = 15000001u64;
let to = 15000002u64;
let info = ChainInfo { best_number: 15000000, ..Default::default() };
let range = get_filter_block_range(Some(from), Some(to), info.best_number, info);
assert_eq!(range, (info.best_number, info.best_number));
}
#[test]
fn test_log_range_from() {
let from = 14000000u64;
let info = ChainInfo { best_number: 15000000, ..Default::default() };
let range = get_filter_block_range(Some(from), None, info.best_number, info);
assert_eq!(range, (from, info.best_number));
}
#[test]
fn test_log_range_to() {
let to = 14000000u64;
let info = ChainInfo { best_number: 15000000, ..Default::default() };
let range = get_filter_block_range(None, Some(to), info.best_number, info);
assert_eq!(range, (info.best_number, to));
}
#[test]
fn test_log_range_empty() {
let info = ChainInfo { best_number: 15000000, ..Default::default() };
let range = get_filter_block_range(None, None, info.best_number, info);
// no range given -> head
assert_eq!(range, (info.best_number, info.best_number));
}
#[test]
fn parse_log_from_only() {
let s = r#"{"fromBlock":"0xf47a42","address":["0x7de93682b9b5d80d45cd371f7a14f74d49b0914c","0x0f00392fcb466c0e4e4310d81b941e07b4d5a079","0xebf67ab8cff336d3f609127e8bbf8bd6dd93cd81"],"topics":["0x0559884fd3a460db3073b7fc896cc77986f16e378210ded43186175bf646fc5f"]}"#;
let filter: Filter = serde_json::from_str(s).unwrap();
assert_eq!(filter.get_from_block(), Some(16022082));
assert!(filter.get_to_block().is_none());
let best_number = 17229427;
let info = ChainInfo { best_number, ..Default::default() };
let (from_block, to_block) = filter.block_option.as_range();
let start_block = info.best_number;
let (from_block_number, to_block_number) = get_filter_block_range(
from_block.and_then(alloy_rpc_types_eth::BlockNumberOrTag::as_number),
to_block.and_then(alloy_rpc_types_eth::BlockNumberOrTag::as_number),
start_block,
info,
);
assert_eq!(from_block_number, 16022082);
assert_eq!(to_block_number, best_number);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-eth-types/src/receipt.rs | crates/rpc/rpc-eth-types/src/receipt.rs | //! RPC receipt response builder, extends a layer one receipt with layer two data.
use crate::EthApiError;
use alloy_consensus::{ReceiptEnvelope, Transaction, TxReceipt};
use alloy_eips::eip7840::BlobParams;
use alloy_primitives::{Address, TxKind};
use alloy_rpc_types_eth::{Log, ReceiptWithBloom, TransactionReceipt};
use reth_chainspec::EthChainSpec;
use reth_ethereum_primitives::Receipt;
use reth_primitives_traits::NodePrimitives;
use reth_rpc_convert::transaction::{ConvertReceiptInput, ReceiptConverter};
use std::{borrow::Cow, sync::Arc};
/// Builds an [`TransactionReceipt`] obtaining the inner receipt envelope from the given closure.
pub fn build_receipt<N, E>(
input: &ConvertReceiptInput<'_, N>,
blob_params: Option<BlobParams>,
build_envelope: impl FnOnce(ReceiptWithBloom<alloy_consensus::Receipt<Log>>) -> E,
) -> TransactionReceipt<E>
where
N: NodePrimitives,
{
let ConvertReceiptInput { tx, meta, receipt, gas_used, next_log_index } = input;
let from = tx.signer();
let blob_gas_used = tx.blob_gas_used();
// Blob gas price should only be present if the transaction is a blob transaction
let blob_gas_price =
blob_gas_used.and_then(|_| Some(blob_params?.calc_blob_fee(meta.excess_blob_gas?)));
let status = receipt.status_or_post_state();
let cumulative_gas_used = receipt.cumulative_gas_used();
let logs_bloom = receipt.bloom();
let logs = match receipt {
Cow::Borrowed(r) => {
Log::collect_for_receipt(*next_log_index, *meta, r.logs().iter().cloned())
}
Cow::Owned(r) => Log::collect_for_receipt(*next_log_index, *meta, r.into_logs()),
};
let rpc_receipt = alloy_rpc_types_eth::Receipt { status, cumulative_gas_used, logs };
let (contract_address, to) = match tx.kind() {
TxKind::Create => (Some(from.create(tx.nonce())), None),
TxKind::Call(addr) => (None, Some(Address(*addr))),
};
TransactionReceipt {
inner: build_envelope(ReceiptWithBloom { receipt: rpc_receipt, logs_bloom }),
transaction_hash: meta.tx_hash,
transaction_index: Some(meta.index),
block_hash: Some(meta.block_hash),
block_number: Some(meta.block_number),
from,
to,
gas_used: *gas_used,
contract_address,
effective_gas_price: tx.effective_gas_price(meta.base_fee),
// EIP-4844 fields
blob_gas_price,
blob_gas_used,
}
}
/// Converter for Ethereum receipts.
#[derive(Debug)]
pub struct EthReceiptConverter<ChainSpec> {
chain_spec: Arc<ChainSpec>,
}
impl<ChainSpec> Clone for EthReceiptConverter<ChainSpec> {
fn clone(&self) -> Self {
Self { chain_spec: self.chain_spec.clone() }
}
}
impl<ChainSpec> EthReceiptConverter<ChainSpec> {
/// Creates a new converter with the given chain spec.
pub const fn new(chain_spec: Arc<ChainSpec>) -> Self {
Self { chain_spec }
}
}
impl<N, ChainSpec> ReceiptConverter<N> for EthReceiptConverter<ChainSpec>
where
N: NodePrimitives<Receipt = Receipt>,
ChainSpec: EthChainSpec + 'static,
{
type RpcReceipt = TransactionReceipt;
type Error = EthApiError;
fn convert_receipts(
&self,
inputs: Vec<ConvertReceiptInput<'_, N>>,
) -> Result<Vec<Self::RpcReceipt>, Self::Error> {
let mut receipts = Vec::with_capacity(inputs.len());
for input in inputs {
let timestamp_seconds = if cfg!(feature = "timestamp-in-seconds") {
input.meta.timestamp
} else {
input.meta.timestamp / 1000
};
let tx_type = input.receipt.tx_type;
let blob_params = self.chain_spec.blob_params_at_timestamp(timestamp_seconds);
receipts.push(build_receipt(&input, blob_params, |receipt_with_bloom| {
ReceiptEnvelope::from_typed(tx_type, receipt_with_bloom)
}));
}
Ok(receipts)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-eth-types/src/utils.rs | crates/rpc/rpc-eth-types/src/utils.rs | //! Commonly used code snippets
use super::{EthApiError, EthResult};
use reth_primitives_traits::{Recovered, SignedTransaction};
use std::future::Future;
/// Recovers a [`SignedTransaction`] from an enveloped encoded byte stream.
///
/// This is a helper function that returns the appropriate RPC-specific error if the input data is
/// malformed.
///
/// See [`alloy_eips::eip2718::Decodable2718::decode_2718`]
pub fn recover_raw_transaction<T: SignedTransaction>(mut data: &[u8]) -> EthResult<Recovered<T>> {
if data.is_empty() {
return Err(EthApiError::EmptyRawTransactionData)
}
let transaction =
T::decode_2718(&mut data).map_err(|_| EthApiError::FailedToDecodeSignedTransaction)?;
SignedTransaction::try_into_recovered(transaction)
.or(Err(EthApiError::InvalidTransactionSignature))
}
/// Performs a binary search within a given block range to find the desired block number.
///
/// The binary search is performed by calling the provided asynchronous `check` closure on the
/// blocks of the range. The closure should return a future representing the result of performing
/// the desired logic at a given block. The future resolves to an `bool` where:
/// - `true` indicates that the condition has been matched, but we can try to find a lower block to
/// make the condition more matchable.
/// - `false` indicates that the condition not matched, so the target is not present in the current
/// block and should continue searching in a higher range.
///
/// Args:
/// - `low`: The lower bound of the block range (inclusive).
/// - `high`: The upper bound of the block range (inclusive).
/// - `check`: A closure that performs the desired logic at a given block.
pub async fn binary_search<F, Fut, E>(low: u64, high: u64, check: F) -> Result<u64, E>
where
F: Fn(u64) -> Fut,
Fut: Future<Output = Result<bool, E>>,
{
let mut low = low;
let mut high = high;
let mut num = high;
while low <= high {
let mid = (low + high) / 2;
if check(mid).await? {
high = mid - 1;
num = mid;
} else {
low = mid + 1
}
}
Ok(num)
}
/// Calculates the blob gas used ratio for a block, accounting for the case where
/// `max_blob_gas_per_block` is zero.
///
/// Returns `0.0` if `blob_gas_used` is `0`, otherwise returns the ratio
/// `blob_gas_used/max_blob_gas_per_block`.
pub fn checked_blob_gas_used_ratio(blob_gas_used: u64, max_blob_gas_per_block: u64) -> f64 {
if blob_gas_used == 0 {
0.0
} else {
blob_gas_used as f64 / max_blob_gas_per_block as f64
}
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn test_binary_search() {
// in the middle
let num: Result<_, ()> =
binary_search(1, 10, |mid| Box::pin(async move { Ok(mid >= 5) })).await;
assert_eq!(num, Ok(5));
// in the upper
let num: Result<_, ()> =
binary_search(1, 10, |mid| Box::pin(async move { Ok(mid >= 7) })).await;
assert_eq!(num, Ok(7));
// in the lower
let num: Result<_, ()> =
binary_search(1, 10, |mid| Box::pin(async move { Ok(mid >= 1) })).await;
assert_eq!(num, Ok(1));
// higher than the upper
let num: Result<_, ()> =
binary_search(1, 10, |mid| Box::pin(async move { Ok(mid >= 11) })).await;
assert_eq!(num, Ok(10));
}
#[test]
fn test_checked_blob_gas_used_ratio() {
// No blob gas used, max blob gas per block is 0
assert_eq!(checked_blob_gas_used_ratio(0, 0), 0.0);
// Blob gas used is zero, max blob gas per block is non-zero
assert_eq!(checked_blob_gas_used_ratio(0, 100), 0.0);
// Blob gas used is non-zero, max blob gas per block is non-zero
assert_eq!(checked_blob_gas_used_ratio(50, 100), 0.5);
// Blob gas used is non-zero and equal to max blob gas per block
assert_eq!(checked_blob_gas_used_ratio(100, 100), 1.0);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-eth-types/src/simulate.rs | crates/rpc/rpc-eth-types/src/simulate.rs | //! Utilities for serving `eth_simulateV1`
use crate::{
error::{
api::{FromEthApiError, FromEvmHalt},
ToRpcError,
},
EthApiError, RevertError,
};
use alloy_consensus::{BlockHeader, Transaction as _};
use alloy_eips::eip2718::WithEncoded;
use alloy_network::TransactionBuilder;
use alloy_rpc_types_eth::{
simulate::{SimCallResult, SimulateError, SimulatedBlock},
BlockTransactionsKind,
};
use jsonrpsee_types::ErrorObject;
use reth_evm::{
execute::{BlockBuilder, BlockBuilderOutcome, BlockExecutor},
Evm,
};
use reth_primitives_traits::{
BlockBody as _, BlockTy, NodePrimitives, Recovered, RecoveredBlock, SignedTransaction,
};
use reth_rpc_convert::{RpcBlock, RpcConvert, RpcTxReq};
use reth_rpc_server_types::result::rpc_err;
use reth_storage_api::noop::NoopProvider;
use revm::{
context_interface::result::ExecutionResult,
primitives::{Address, Bytes, TxKind},
Database,
};
/// Errors which may occur during `eth_simulateV1` execution.
#[derive(Debug, thiserror::Error)]
pub enum EthSimulateError {
/// Total gas limit of transactions for the block exceeds the block gas limit.
#[error("Block gas limit exceeded by the block's transactions")]
BlockGasLimitExceeded,
/// Max gas limit for entire operation exceeded.
#[error("Client adjustable limit reached")]
GasLimitReached,
}
impl EthSimulateError {
const fn error_code(&self) -> i32 {
match self {
Self::BlockGasLimitExceeded => -38015,
Self::GasLimitReached => -38026,
}
}
}
impl ToRpcError for EthSimulateError {
fn to_rpc_error(&self) -> ErrorObject<'static> {
rpc_err(self.error_code(), self.to_string(), None)
}
}
/// Converts all [`TransactionRequest`]s into [`Recovered`] transactions and applies them to the
/// given [`BlockExecutor`].
///
/// Returns all executed transactions and the result of the execution.
///
/// [`TransactionRequest`]: alloy_rpc_types_eth::TransactionRequest
#[expect(clippy::type_complexity)]
pub fn execute_transactions<S, T>(
mut builder: S,
calls: Vec<RpcTxReq<T::Network>>,
default_gas_limit: u64,
chain_id: u64,
tx_resp_builder: &T,
) -> Result<
(
BlockBuilderOutcome<S::Primitives>,
Vec<ExecutionResult<<<S::Executor as BlockExecutor>::Evm as Evm>::HaltReason>>,
),
EthApiError,
>
where
S: BlockBuilder<Executor: BlockExecutor<Evm: Evm<DB: Database<Error: Into<EthApiError>>>>>,
T: RpcConvert<Primitives = S::Primitives>,
{
builder.apply_pre_execution_changes()?;
let mut results = Vec::with_capacity(calls.len());
for call in calls {
// Resolve transaction, populate missing fields and enforce calls
// correctness.
let tx = resolve_transaction(
call,
default_gas_limit,
builder.evm().block().basefee,
chain_id,
builder.evm_mut().db_mut(),
tx_resp_builder,
)?;
// Create transaction with an empty envelope.
// The effect for a layer-2 execution client is that it does not charge L1 cost.
let tx = WithEncoded::new(Default::default(), tx);
builder
.execute_transaction_with_result_closure(tx, |result| results.push(result.clone()))?;
}
// Pass noop provider to skip state root calculations.
let result = builder.finish(NoopProvider::default())?;
Ok((result, results))
}
/// Goes over the list of [`TransactionRequest`]s and populates missing fields trying to resolve
/// them into primitive transactions.
///
/// This will set the defaults as defined in <https://github.com/ethereum/execution-apis/blob/e56d3208789259d0b09fa68e9d8594aa4d73c725/docs/ethsimulatev1-notes.md#default-values-for-transactions>
///
/// [`TransactionRequest`]: alloy_rpc_types_eth::TransactionRequest
pub fn resolve_transaction<DB: Database, Tx, T>(
mut tx: RpcTxReq<T::Network>,
default_gas_limit: u64,
block_base_fee_per_gas: u64,
chain_id: u64,
db: &mut DB,
tx_resp_builder: &T,
) -> Result<Recovered<Tx>, EthApiError>
where
DB::Error: Into<EthApiError>,
T: RpcConvert<Primitives: NodePrimitives<SignedTx = Tx>>,
{
// If we're missing any fields we try to fill nonce, gas and
// gas price.
let tx_type = tx.as_ref().output_tx_type();
let from = if let Some(from) = tx.as_ref().from() {
from
} else {
tx.as_mut().set_from(Address::ZERO);
Address::ZERO
};
if tx.as_ref().nonce().is_none() {
tx.as_mut().set_nonce(
db.basic(from).map_err(Into::into)?.map(|acc| acc.nonce).unwrap_or_default(),
);
}
if tx.as_ref().gas_limit().is_none() {
tx.as_mut().set_gas_limit(default_gas_limit);
}
if tx.as_ref().chain_id().is_none() {
tx.as_mut().set_chain_id(chain_id);
}
if tx.as_ref().kind().is_none() {
tx.as_mut().set_kind(TxKind::Create);
}
// if we can't build the _entire_ transaction yet, we need to check the fee values
if tx.as_ref().output_tx_type_checked().is_none() {
if tx_type.is_legacy() || tx_type.is_eip2930() {
if tx.as_ref().gas_price().is_none() {
tx.as_mut().set_gas_price(block_base_fee_per_gas as u128);
}
} else {
// set dynamic 1559 fees
if tx.as_ref().max_fee_per_gas().is_none() {
let mut max_fee_per_gas = block_base_fee_per_gas as u128;
if let Some(prio_fee) = tx.as_ref().max_priority_fee_per_gas() {
// if a prio fee is provided we need to select the max fee accordingly
// because the base fee must be higher than the prio fee.
max_fee_per_gas = prio_fee.max(max_fee_per_gas);
}
tx.as_mut().set_max_fee_per_gas(max_fee_per_gas);
}
if tx.as_ref().max_priority_fee_per_gas().is_none() {
tx.as_mut().set_max_priority_fee_per_gas(0);
}
}
}
let tx = tx_resp_builder
.build_simulate_v1_transaction(tx)
.map_err(|e| EthApiError::other(e.into()))?;
Ok(Recovered::new_unchecked(tx, from))
}
/// Handles outputs of the calls execution and builds a [`SimulatedBlock`].
pub fn build_simulated_block<T, Halt: Clone>(
block: RecoveredBlock<BlockTy<T::Primitives>>,
results: Vec<ExecutionResult<Halt>>,
txs_kind: BlockTransactionsKind,
tx_resp_builder: &T,
) -> Result<SimulatedBlock<RpcBlock<T::Network>>, T::Error>
where
T: RpcConvert<Error: FromEthApiError + FromEvmHalt<Halt>>,
{
let mut calls: Vec<SimCallResult> = Vec::with_capacity(results.len());
let mut log_index = 0;
for (index, (result, tx)) in results.into_iter().zip(block.body().transactions()).enumerate() {
let call = match result {
ExecutionResult::Halt { reason, gas_used } => {
let error = T::Error::from_evm_halt(reason, tx.gas_limit());
SimCallResult {
return_data: Bytes::new(),
error: Some(SimulateError {
message: error.to_string(),
code: error.into().code(),
}),
gas_used,
logs: Vec::new(),
status: false,
}
}
ExecutionResult::Revert { output, gas_used } => {
let error = RevertError::new(output.clone());
SimCallResult {
return_data: output,
error: Some(SimulateError {
code: error.error_code(),
message: error.to_string(),
}),
gas_used,
status: false,
logs: Vec::new(),
}
}
ExecutionResult::Success { output, gas_used, logs, .. } => SimCallResult {
return_data: output.into_data(),
error: None,
gas_used,
logs: logs
.into_iter()
.map(|log| {
log_index += 1;
alloy_rpc_types_eth::Log {
inner: log,
log_index: Some(log_index - 1),
transaction_index: Some(index as u64),
transaction_hash: Some(*tx.tx_hash()),
block_number: Some(block.header().number()),
block_timestamp: Some(block.header().timestamp()),
..Default::default()
}
})
.collect(),
status: true,
},
};
calls.push(call);
}
let block = block.into_rpc_block(
txs_kind,
|tx, tx_info| tx_resp_builder.fill(tx, tx_info),
|header, size| tx_resp_builder.convert_header(header, size),
)?;
Ok(SimulatedBlock { inner: block, calls })
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-eth-types/src/gas_oracle.rs | crates/rpc/rpc-eth-types/src/gas_oracle.rs | //! An implementation of the eth gas price oracle, used for providing gas price estimates based on
//! previous blocks.
use super::{EthApiError, EthResult, EthStateCache, RpcInvalidTransactionError};
use alloy_consensus::{constants::GWEI_TO_WEI, BlockHeader, Transaction, TxReceipt};
use alloy_eips::BlockNumberOrTag;
use alloy_primitives::{B256, U256};
use alloy_rpc_types_eth::BlockId;
use derive_more::{Deref, DerefMut, From, Into};
use itertools::Itertools;
use reth_rpc_server_types::{
constants,
constants::gas_oracle::{
DEFAULT_GAS_PRICE_BLOCKS, DEFAULT_GAS_PRICE_PERCENTILE, DEFAULT_IGNORE_GAS_PRICE,
DEFAULT_MAX_GAS_PRICE, MAX_HEADER_HISTORY, MAX_REWARD_PERCENTILE_COUNT, SAMPLE_NUMBER,
},
};
use reth_storage_api::{BlockReaderIdExt, NodePrimitivesProvider};
use schnellru::{ByLength, LruMap};
use serde::{Deserialize, Serialize};
use std::fmt::{self, Debug, Formatter};
use tokio::sync::Mutex;
use tracing::warn;
/// The default gas limit for `eth_call` and adjacent calls. See
/// [`RPC_DEFAULT_GAS_CAP`](constants::gas_oracle::RPC_DEFAULT_GAS_CAP).
pub const RPC_DEFAULT_GAS_CAP: GasCap = GasCap(constants::gas_oracle::RPC_DEFAULT_GAS_CAP);
/// Settings for the [`GasPriceOracle`]
#[derive(Debug, Clone, Copy, Eq, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct GasPriceOracleConfig {
/// The number of populated blocks to produce the gas price estimate
pub blocks: u32,
/// The percentile of gas prices to use for the estimate
pub percentile: u32,
/// The maximum number of headers to keep in the cache
pub max_header_history: u64,
/// The maximum number of blocks for estimating gas price
pub max_block_history: u64,
/// The maximum number for reward percentiles.
///
/// This effectively limits how many transactions and receipts are fetched to compute the
/// reward percentile.
pub max_reward_percentile_count: u64,
/// The default gas price to use if there are no blocks to use
pub default_suggested_fee: Option<U256>,
/// The maximum gas price to use for the estimate
pub max_price: Option<U256>,
/// The minimum gas price, under which the sample will be ignored
pub ignore_price: Option<U256>,
}
impl Default for GasPriceOracleConfig {
fn default() -> Self {
Self {
blocks: DEFAULT_GAS_PRICE_BLOCKS,
percentile: DEFAULT_GAS_PRICE_PERCENTILE,
max_header_history: MAX_HEADER_HISTORY,
max_block_history: MAX_HEADER_HISTORY,
max_reward_percentile_count: MAX_REWARD_PERCENTILE_COUNT,
default_suggested_fee: None,
max_price: Some(DEFAULT_MAX_GAS_PRICE),
ignore_price: Some(DEFAULT_IGNORE_GAS_PRICE),
}
}
}
/// Calculates a gas price depending on recent blocks.
#[derive(Debug)]
pub struct GasPriceOracle<Provider>
where
Provider: NodePrimitivesProvider,
{
/// The type used to subscribe to block events and get block info
provider: Provider,
/// The cache for blocks
cache: EthStateCache<Provider::Primitives>,
/// The config for the oracle
oracle_config: GasPriceOracleConfig,
/// The price under which the sample will be ignored.
ignore_price: Option<u128>,
/// Stores the latest calculated price and its block hash and Cache stores the lowest effective
/// tip values of recent blocks
inner: Mutex<GasPriceOracleInner>,
}
impl<Provider> GasPriceOracle<Provider>
where
Provider: BlockReaderIdExt + NodePrimitivesProvider,
{
/// Creates and returns the [`GasPriceOracle`].
pub fn new(
provider: Provider,
mut oracle_config: GasPriceOracleConfig,
cache: EthStateCache<Provider::Primitives>,
) -> Self {
// sanitize the percentile to be less than 100
if oracle_config.percentile > 100 {
warn!(prev_percentile = ?oracle_config.percentile, "Invalid configured gas price percentile, assuming 100.");
oracle_config.percentile = 100;
}
let ignore_price = oracle_config.ignore_price.map(|price| price.saturating_to());
// this is the number of blocks that we will cache the values for
let cached_values = (oracle_config.blocks * 5).max(oracle_config.max_block_history as u32);
let inner = Mutex::new(GasPriceOracleInner {
last_price: GasPriceOracleResult {
block_hash: B256::ZERO,
price: oracle_config
.default_suggested_fee
.unwrap_or_else(|| GasPriceOracleResult::default().price),
},
lowest_effective_tip_cache: EffectiveTipLruCache(LruMap::new(ByLength::new(
cached_values,
))),
});
Self { provider, oracle_config, cache, ignore_price, inner }
}
/// Returns the configuration of the gas price oracle.
pub const fn config(&self) -> &GasPriceOracleConfig {
&self.oracle_config
}
/// Suggests a gas price estimate based on recent blocks, using the configured percentile.
pub async fn suggest_tip_cap(&self) -> EthResult<U256> {
let header = self
.provider
.sealed_header_by_number_or_tag(BlockNumberOrTag::Latest)?
.ok_or(EthApiError::HeaderNotFound(BlockId::latest()))?;
let mut inner = self.inner.lock().await;
// if we have stored a last price, then we check whether or not it was for the same head
if inner.last_price.block_hash == header.hash() {
return Ok(inner.last_price.price)
}
// if all responses are empty, then we can return a maximum of 2*check_block blocks' worth
// of prices
//
// we only return more than check_block blocks' worth of prices if one or more return empty
// transactions
let mut current_hash = header.hash();
let mut results = Vec::new();
let mut populated_blocks = 0;
// we only check a maximum of 2 * max_block_history, or the number of blocks in the chain
let max_blocks = if self.oracle_config.max_block_history * 2 > header.number() {
header.number()
} else {
self.oracle_config.max_block_history * 2
};
for _ in 0..max_blocks {
// Check if current hash is in cache
let (parent_hash, block_values) =
if let Some(vals) = inner.lowest_effective_tip_cache.get(¤t_hash) {
vals.to_owned()
} else {
// Otherwise we fetch it using get_block_values
let (parent_hash, block_values) = self
.get_block_values(current_hash, SAMPLE_NUMBER)
.await?
.ok_or(EthApiError::HeaderNotFound(current_hash.into()))?;
inner
.lowest_effective_tip_cache
.insert(current_hash, (parent_hash, block_values.clone()));
(parent_hash, block_values)
};
if block_values.is_empty() {
results.push(U256::from(inner.last_price.price));
} else {
results.extend(block_values);
populated_blocks += 1;
}
// break when we have enough populated blocks
if populated_blocks >= self.oracle_config.blocks {
break
}
current_hash = parent_hash;
}
// sort results then take the configured percentile result
let mut price = if results.is_empty() {
inner.last_price.price
} else {
results.sort_unstable();
*results.get((results.len() - 1) * self.oracle_config.percentile as usize / 100).expect(
"gas price index is a percent of nonzero array length, so a value always exists",
)
};
// constrain to the max price
if let Some(max_price) = self.oracle_config.max_price {
if price > max_price {
price = max_price;
}
}
inner.last_price = GasPriceOracleResult { block_hash: header.hash(), price };
Ok(price)
}
/// Get the `limit` lowest effective tip values for the given block. If the oracle has a
/// configured `ignore_price` threshold, then tip values under that threshold will be ignored
/// before returning a result.
///
/// If the block cannot be found, then this will return `None`.
///
/// This method also returns the parent hash for the given block.
async fn get_block_values(
&self,
block_hash: B256,
limit: usize,
) -> EthResult<Option<(B256, Vec<U256>)>> {
// check the cache (this will hit the disk if the block is not cached)
let Some(block) = self.cache.get_recovered_block(block_hash).await? else {
return Ok(None)
};
let base_fee_per_gas = block.base_fee_per_gas();
let parent_hash = block.parent_hash();
// sort the functions by ascending effective tip first
let sorted_transactions = block.transactions_recovered().sorted_by_cached_key(|tx| {
if let Some(base_fee) = base_fee_per_gas {
(*tx).effective_tip_per_gas(base_fee)
} else {
Some((*tx).priority_fee_or_price())
}
});
let mut prices = Vec::with_capacity(limit);
for tx in sorted_transactions {
let effective_tip = if let Some(base_fee) = base_fee_per_gas {
tx.effective_tip_per_gas(base_fee)
} else {
Some(tx.priority_fee_or_price())
};
// ignore transactions with a tip under the configured threshold
if let Some(ignore_under) = self.ignore_price {
if effective_tip < Some(ignore_under) {
continue
}
}
// check if the sender was the coinbase, if so, ignore
if tx.signer() == block.beneficiary() {
continue
}
// a `None` effective_gas_tip represents a transaction where the max_fee_per_gas is
// less than the base fee which would be invalid
prices.push(U256::from(effective_tip.ok_or(RpcInvalidTransactionError::FeeCapTooLow)?));
// we have enough entries
if prices.len() >= limit {
break
}
}
Ok(Some((parent_hash, prices)))
}
/// Suggests a max priority fee value using a simplified and more predictable algorithm
/// appropriate for chains like Optimism with a single known block builder.
///
/// It returns either:
/// - The minimum suggested priority fee when blocks have capacity
/// - 10% above the median effective priority fee from the last block when at capacity
///
/// A block is considered at capacity if its total gas used plus the maximum single transaction
/// gas would exceed the block's gas limit.
pub async fn op_suggest_tip_cap(&self, min_suggested_priority_fee: U256) -> EthResult<U256> {
let header = self
.provider
.sealed_header_by_number_or_tag(BlockNumberOrTag::Latest)?
.ok_or(EthApiError::HeaderNotFound(BlockId::latest()))?;
let mut inner = self.inner.lock().await;
// if we have stored a last price, then we check whether or not it was for the same head
if inner.last_price.block_hash == header.hash() {
return Ok(inner.last_price.price);
}
let mut suggestion = min_suggested_priority_fee;
// find the maximum gas used by any of the transactions in the block to use as the
// capacity margin for the block, if no receipts are found return the
// suggested_min_priority_fee
let receipts = self
.cache
.get_receipts(header.hash())
.await?
.ok_or(EthApiError::ReceiptsNotFound(BlockId::latest()))?;
let mut max_tx_gas_used = 0u64;
let mut last_cumulative_gas = 0;
for receipt in receipts.as_ref() {
let cumulative_gas = receipt.cumulative_gas_used();
// get the gas used by each transaction in the block, by subtracting the
// cumulative gas used of the previous transaction from the cumulative gas used of
// the current transaction. This is because there is no gas_used()
// method on the Receipt trait.
let gas_used = cumulative_gas - last_cumulative_gas;
max_tx_gas_used = max_tx_gas_used.max(gas_used);
last_cumulative_gas = cumulative_gas;
}
// if the block is at capacity, the suggestion must be increased
if header.gas_used() + max_tx_gas_used > header.gas_limit() {
let Some(median_tip) = self.get_block_median_tip(header.hash()).await? else {
return Ok(suggestion);
};
let new_suggestion = median_tip + median_tip / U256::from(10);
if new_suggestion > suggestion {
suggestion = new_suggestion;
}
}
// constrain to the max price
if let Some(max_price) = self.oracle_config.max_price {
if suggestion > max_price {
suggestion = max_price;
}
}
inner.last_price = GasPriceOracleResult { block_hash: header.hash(), price: suggestion };
Ok(suggestion)
}
/// Get the median tip value for the given block. This is useful for determining
/// tips when a block is at capacity.
///
/// If the block cannot be found or has no transactions, this will return `None`.
pub async fn get_block_median_tip(&self, block_hash: B256) -> EthResult<Option<U256>> {
// check the cache (this will hit the disk if the block is not cached)
let Some(block) = self.cache.get_recovered_block(block_hash).await? else {
return Ok(None)
};
let base_fee_per_gas = block.base_fee_per_gas();
// Filter, sort and collect the prices
let prices = block
.transactions_recovered()
.filter_map(|tx| {
if let Some(base_fee) = base_fee_per_gas {
(*tx).effective_tip_per_gas(base_fee)
} else {
Some((*tx).priority_fee_or_price())
}
})
.sorted()
.collect::<Vec<_>>();
let median = if prices.is_empty() {
// if there are no prices, return `None`
None
} else if prices.len() % 2 == 1 {
Some(U256::from(prices[prices.len() / 2]))
} else {
Some(U256::from((prices[prices.len() / 2 - 1] + prices[prices.len() / 2]) / 2))
};
Ok(median)
}
}
/// Container type for mutable inner state of the [`GasPriceOracle`]
#[derive(Debug)]
struct GasPriceOracleInner {
last_price: GasPriceOracleResult,
lowest_effective_tip_cache: EffectiveTipLruCache,
}
/// Wrapper struct for `LruMap`
#[derive(Deref, DerefMut)]
pub struct EffectiveTipLruCache(LruMap<B256, (B256, Vec<U256>), ByLength>);
impl Debug for EffectiveTipLruCache {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.debug_struct("EffectiveTipLruCache")
.field("cache_length", &self.len())
.field("cache_memory_usage", &self.memory_usage())
.finish()
}
}
/// Stores the last result that the oracle returned
#[derive(Debug, Clone)]
pub struct GasPriceOracleResult {
/// The block hash that the oracle used to calculate the price
pub block_hash: B256,
/// The price that the oracle calculated
pub price: U256,
}
impl Default for GasPriceOracleResult {
fn default() -> Self {
Self { block_hash: B256::ZERO, price: U256::from(GWEI_TO_WEI) }
}
}
/// The wrapper type for gas limit
#[derive(Debug, Clone, Copy, From, Into)]
pub struct GasCap(pub u64);
impl Default for GasCap {
fn default() -> Self {
RPC_DEFAULT_GAS_CAP
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn max_price_sanity() {
assert_eq!(DEFAULT_MAX_GAS_PRICE, U256::from(500_000_000_000u64));
assert_eq!(DEFAULT_MAX_GAS_PRICE, U256::from(500 * GWEI_TO_WEI))
}
#[test]
fn ignore_price_sanity() {
assert_eq!(DEFAULT_IGNORE_GAS_PRICE, U256::from(2u64));
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-eth-types/src/fee_history.rs | crates/rpc/rpc-eth-types/src/fee_history.rs | //! Consist of types adjacent to the fee history cache and its configs
use std::{
collections::{BTreeMap, VecDeque},
fmt::Debug,
sync::{atomic::Ordering::SeqCst, Arc},
};
use alloy_consensus::{BlockHeader, Header, Transaction, TxReceipt};
use alloy_eips::eip7840::BlobParams;
use alloy_rpc_types_eth::TxGasAndReward;
use futures::{
future::{Fuse, FusedFuture},
FutureExt, Stream, StreamExt,
};
use metrics::atomics::AtomicU64;
use reth_chain_state::CanonStateNotification;
use reth_chainspec::{ChainSpecProvider, EthChainSpec};
use reth_primitives_traits::{Block, BlockBody, NodePrimitives, SealedBlock};
use reth_rpc_server_types::constants::gas_oracle::MAX_HEADER_HISTORY;
use reth_storage_api::BlockReaderIdExt;
use serde::{Deserialize, Serialize};
use tracing::trace;
use crate::utils::checked_blob_gas_used_ratio;
use super::{EthApiError, EthStateCache};
/// Contains cached fee history entries for blocks.
///
/// Purpose for this is to provide cached data for `eth_feeHistory`.
#[derive(Debug, Clone)]
pub struct FeeHistoryCache<H> {
inner: Arc<FeeHistoryCacheInner<H>>,
}
impl<H> FeeHistoryCache<H>
where
H: BlockHeader + Clone,
{
/// Creates new `FeeHistoryCache` instance, initialize it with the more recent data, set bounds
pub fn new(config: FeeHistoryCacheConfig) -> Self {
let inner = FeeHistoryCacheInner {
lower_bound: Default::default(),
upper_bound: Default::default(),
config,
entries: Default::default(),
};
Self { inner: Arc::new(inner) }
}
/// How the cache is configured.
#[inline]
pub fn config(&self) -> &FeeHistoryCacheConfig {
&self.inner.config
}
/// Returns the configured resolution for percentile approximation.
#[inline]
pub fn resolution(&self) -> u64 {
self.config().resolution
}
/// Returns all blocks that are missing in the cache in the [`lower_bound`, `upper_bound`]
/// range.
///
/// This function is used to populate the cache with missing blocks, which can happen if the
/// node switched to stage sync node.
async fn missing_consecutive_blocks(&self) -> VecDeque<u64> {
let entries = self.inner.entries.read().await;
(self.lower_bound()..self.upper_bound())
.rev()
.filter(|&block_number| !entries.contains_key(&block_number))
.collect()
}
/// Insert block data into the cache.
async fn insert_blocks<'a, I, B, R, C>(&self, blocks: I, chain_spec: &C)
where
B: Block<Header = H> + 'a,
R: TxReceipt + 'a,
I: IntoIterator<Item = (&'a SealedBlock<B>, &'a [R])>,
C: EthChainSpec,
{
let mut entries = self.inner.entries.write().await;
let percentiles = self.predefined_percentiles();
// Insert all new blocks and calculate approximated rewards
for (block, receipts) in blocks {
let mut fee_history_entry = FeeHistoryEntry::<H>::new(
block,
chain_spec.blob_params_at_timestamp(block.timestamp_seconds()),
);
fee_history_entry.rewards = calculate_reward_percentiles_for_block(
&percentiles,
fee_history_entry.header.gas_used(),
fee_history_entry.header.base_fee_per_gas().unwrap_or_default(),
block.body().transactions(),
receipts,
)
.unwrap_or_default();
entries.insert(block.number(), fee_history_entry);
}
// enforce bounds by popping the oldest entries
while entries.len() > self.inner.config.max_blocks as usize {
entries.pop_first();
}
if entries.is_empty() {
self.inner.upper_bound.store(0, SeqCst);
self.inner.lower_bound.store(0, SeqCst);
return
}
let upper_bound = *entries.last_entry().expect("Contains at least one entry").key();
// also enforce proper lower bound in case we have gaps
let target_lower = upper_bound.saturating_sub(self.inner.config.max_blocks);
while entries.len() > 1 && *entries.first_key_value().unwrap().0 < target_lower {
entries.pop_first();
}
let lower_bound = *entries.first_entry().expect("Contains at least one entry").key();
self.inner.upper_bound.store(upper_bound, SeqCst);
self.inner.lower_bound.store(lower_bound, SeqCst);
}
/// Get `UpperBound` value for `FeeHistoryCache`
pub fn upper_bound(&self) -> u64 {
self.inner.upper_bound.load(SeqCst)
}
/// Get `LowerBound` value for `FeeHistoryCache`
pub fn lower_bound(&self) -> u64 {
self.inner.lower_bound.load(SeqCst)
}
/// Collect fee history for the given range (inclusive `start_block..=end_block`).
///
/// This function retrieves fee history entries from the cache for the specified range.
/// If the requested range (`start_block` to `end_block`) is within the cache bounds,
/// it returns the corresponding entries.
/// Otherwise it returns None.
pub async fn get_history(
&self,
start_block: u64,
end_block: u64,
) -> Option<Vec<FeeHistoryEntry<H>>> {
if end_block < start_block {
// invalid range, return None
return None
}
let lower_bound = self.lower_bound();
let upper_bound = self.upper_bound();
if start_block >= lower_bound && end_block <= upper_bound {
let entries = self.inner.entries.read().await;
let result = entries
.range(start_block..=end_block)
.map(|(_, fee_entry)| fee_entry.clone())
.collect::<Vec<_>>();
if result.is_empty() {
return None
}
Some(result)
} else {
None
}
}
/// Generates predefined set of percentiles
///
/// This returns 100 * resolution points
pub fn predefined_percentiles(&self) -> Vec<f64> {
let res = self.resolution() as f64;
(0..=100 * self.resolution()).map(|p| p as f64 / res).collect()
}
}
/// Settings for the [`FeeHistoryCache`].
#[derive(Debug, Clone, Copy, Eq, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct FeeHistoryCacheConfig {
/// Max number of blocks in cache.
///
/// Default is [`MAX_HEADER_HISTORY`] plus some change to also serve slightly older blocks from
/// cache, since `fee_history` supports the entire range
pub max_blocks: u64,
/// Percentile approximation resolution
///
/// Default is 4 which means 0.25
pub resolution: u64,
}
impl Default for FeeHistoryCacheConfig {
fn default() -> Self {
Self { max_blocks: MAX_HEADER_HISTORY + 100, resolution: 4 }
}
}
/// Container type for shared state in [`FeeHistoryCache`]
#[derive(Debug)]
struct FeeHistoryCacheInner<H> {
/// Stores the lower bound of the cache
lower_bound: AtomicU64,
/// Stores the upper bound of the cache
upper_bound: AtomicU64,
/// Config for `FeeHistoryCache`, consists of resolution for percentile approximation
/// and max number of blocks
config: FeeHistoryCacheConfig,
/// Stores the entries of the cache
entries: tokio::sync::RwLock<BTreeMap<u64, FeeHistoryEntry<H>>>,
}
/// Awaits for new chain events and directly inserts them into the cache so they're available
/// immediately before they need to be fetched from disk.
pub async fn fee_history_cache_new_blocks_task<St, Provider, N>(
fee_history_cache: FeeHistoryCache<N::BlockHeader>,
mut events: St,
provider: Provider,
cache: EthStateCache<N>,
) where
St: Stream<Item = CanonStateNotification<N>> + Unpin + 'static,
Provider:
BlockReaderIdExt<Block = N::Block, Receipt = N::Receipt> + ChainSpecProvider + 'static,
N: NodePrimitives,
N::BlockHeader: BlockHeader + Clone,
{
// We're listening for new blocks emitted when the node is in live sync.
// If the node transitions to stage sync, we need to fetch the missing blocks
let mut missing_blocks = VecDeque::new();
let mut fetch_missing_block = Fuse::terminated();
loop {
if fetch_missing_block.is_terminated() {
if let Some(block_number) = missing_blocks.pop_front() {
trace!(target: "rpc::fee", ?block_number, "Fetching missing block for fee history cache");
if let Ok(Some(hash)) = provider.block_hash(block_number) {
// fetch missing block
fetch_missing_block = cache.get_block_and_receipts(hash).boxed().fuse();
}
}
}
let chain_spec = provider.chain_spec();
tokio::select! {
res = &mut fetch_missing_block => {
if let Ok(res) = res {
let res = res.as_ref()
.map(|(b, r)| (b.sealed_block(), r.as_slice()));
fee_history_cache.insert_blocks(res, &chain_spec).await;
}
}
event = events.next() => {
let Some(event) = event else {
// the stream ended, we are done
break
};
let committed = event.committed();
let blocks_and_receipts = committed
.blocks_and_receipts()
.map(|(block, receipts)| {
(block.sealed_block(), receipts.as_slice())
});
fee_history_cache.insert_blocks(blocks_and_receipts, &chain_spec).await;
// keep track of missing blocks
missing_blocks = fee_history_cache.missing_consecutive_blocks().await;
}
}
}
}
/// Calculates reward percentiles for transactions in a block header.
/// Given a list of percentiles and a sealed block header, this function computes
/// the corresponding rewards for the transactions at each percentile.
///
/// The results are returned as a vector of U256 values.
pub fn calculate_reward_percentiles_for_block<T, R>(
percentiles: &[f64],
gas_used: u64,
base_fee_per_gas: u64,
transactions: &[T],
receipts: &[R],
) -> Result<Vec<u128>, EthApiError>
where
T: Transaction,
R: TxReceipt,
{
let mut transactions = transactions
.iter()
.zip(receipts)
.scan(0, |previous_gas, (tx, receipt)| {
// Convert the cumulative gas used in the receipts
// to the gas usage by the transaction
//
// While we will sum up the gas again later, it is worth
// noting that the order of the transactions will be different,
// so the sum will also be different for each receipt.
let gas_used = receipt.cumulative_gas_used() - *previous_gas;
*previous_gas = receipt.cumulative_gas_used();
Some(TxGasAndReward {
gas_used,
reward: tx.effective_tip_per_gas(base_fee_per_gas).unwrap_or_default(),
})
})
.collect::<Vec<_>>();
// Sort the transactions by their rewards in ascending order
transactions.sort_by_key(|tx| tx.reward);
// Find the transaction that corresponds to the given percentile
//
// We use a `tx_index` here that is shared across all percentiles, since we know
// the percentiles are monotonically increasing.
let mut tx_index = 0;
let mut cumulative_gas_used = transactions.first().map(|tx| tx.gas_used).unwrap_or_default();
let mut rewards_in_block = Vec::with_capacity(percentiles.len());
for percentile in percentiles {
// Empty blocks should return in a zero row
if transactions.is_empty() {
rewards_in_block.push(0);
continue
}
let threshold = (gas_used as f64 * percentile / 100.) as u64;
while cumulative_gas_used < threshold && tx_index < transactions.len() - 1 {
tx_index += 1;
cumulative_gas_used += transactions[tx_index].gas_used;
}
rewards_in_block.push(transactions[tx_index].reward);
}
Ok(rewards_in_block)
}
/// A cached entry for a block's fee history.
#[derive(Debug, Clone)]
pub struct FeeHistoryEntry<H = Header> {
/// The full block header.
pub header: H,
/// Gas used ratio this block.
pub gas_used_ratio: f64,
/// The base per blob gas for EIP-4844.
/// For pre EIP-4844 equals to zero.
pub base_fee_per_blob_gas: Option<u128>,
/// Blob gas used ratio for this block.
///
/// Calculated as the ratio of blob gas used and the available blob data gas per block.
/// Will be zero if no blob gas was used or pre EIP-4844.
pub blob_gas_used_ratio: f64,
/// Approximated rewards for the configured percentiles.
pub rewards: Vec<u128>,
/// Blob parameters for this block.
pub blob_params: Option<BlobParams>,
}
impl<H> FeeHistoryEntry<H>
where
H: BlockHeader + Clone,
{
/// Creates a new entry from a sealed block.
///
/// Note: This does not calculate the rewards for the block.
pub fn new<B>(block: &SealedBlock<B>, blob_params: Option<BlobParams>) -> Self
where
B: Block<Header = H>,
{
let header = block.header();
Self {
header: block.header().clone(),
gas_used_ratio: header.gas_used() as f64 / header.gas_limit() as f64,
base_fee_per_blob_gas: header
.excess_blob_gas()
.and_then(|excess_blob_gas| Some(blob_params?.calc_blob_fee(excess_blob_gas))),
blob_gas_used_ratio: checked_blob_gas_used_ratio(
block.body().blob_gas_used(),
blob_params
.as_ref()
.map(|params| params.max_blob_gas_per_block())
.unwrap_or(alloy_eips::eip4844::MAX_DATA_GAS_PER_BLOCK_DENCUN),
),
rewards: Vec::new(),
blob_params,
}
}
/// Returns the blob fee for the next block according to the EIP-4844 spec.
///
/// Returns `None` if `excess_blob_gas` is None.
///
/// See also [`Self::next_block_excess_blob_gas`]
pub fn next_block_blob_fee(&self) -> Option<u128> {
self.next_block_excess_blob_gas()
.and_then(|excess_blob_gas| Some(self.blob_params?.calc_blob_fee(excess_blob_gas)))
}
/// Calculate excess blob gas for the next block according to the EIP-4844 spec.
///
/// Returns a `None` if no excess blob gas is set, no EIP-4844 support
pub fn next_block_excess_blob_gas(&self) -> Option<u64> {
self.header.excess_blob_gas().and_then(|excess_blob_gas| {
Some(self.blob_params?.next_block_excess_blob_gas_osaka(
excess_blob_gas,
self.header.blob_gas_used()?,
self.header.base_fee_per_gas()?,
))
})
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-eth-types/src/tx_forward.rs | crates/rpc/rpc-eth-types/src/tx_forward.rs | //! Consist of types adjacent to the fee history cache and its configs
use alloy_rpc_client::RpcClient;
use reqwest::Url;
use serde::{Deserialize, Serialize};
use std::fmt::Debug;
/// Configuration for the transaction forwarder.
#[derive(Debug, PartialEq, Eq, Clone, Default, Serialize, Deserialize)]
pub struct ForwardConfig {
/// The raw transaction forwarder.
///
/// Default is `None`
pub tx_forwarder: Option<Url>,
}
impl ForwardConfig {
/// Builds an [`RpcClient`] from the forwarder URL, if configured.
pub fn forwarder_client(&self) -> Option<RpcClient> {
self.tx_forwarder.clone().map(RpcClient::new_http)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-eth-types/src/transaction.rs | crates/rpc/rpc-eth-types/src/transaction.rs | //! Helper types for `reth_rpc_eth_api::EthApiServer` implementation.
//!
//! Transaction wrapper that labels transaction with its origin.
use alloy_primitives::B256;
use alloy_rpc_types_eth::TransactionInfo;
use reth_ethereum_primitives::TransactionSigned;
use reth_primitives_traits::{NodePrimitives, Recovered, SignedTransaction};
use reth_rpc_convert::{RpcConvert, RpcTransaction};
/// Represents from where a transaction was fetched.
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum TransactionSource<T = TransactionSigned> {
/// Transaction exists in the pool (Pending)
Pool(Recovered<T>),
/// Transaction already included in a block
///
/// This can be a historical block or a pending block (received from the CL)
Block {
/// Transaction fetched via provider
transaction: Recovered<T>,
/// Index of the transaction in the block
index: u64,
/// Hash of the block.
block_hash: B256,
/// Number of the block.
block_number: u64,
/// base fee of the block.
base_fee: Option<u64>,
},
}
// === impl TransactionSource ===
impl<T: SignedTransaction> TransactionSource<T> {
/// Consumes the type and returns the wrapped transaction.
pub fn into_recovered(self) -> Recovered<T> {
self.into()
}
/// Conversion into network specific transaction type.
pub fn into_transaction<Builder>(
self,
resp_builder: &Builder,
) -> Result<RpcTransaction<Builder::Network>, Builder::Error>
where
Builder: RpcConvert<Primitives: NodePrimitives<SignedTx = T>>,
{
match self {
Self::Pool(tx) => resp_builder.fill_pending(tx),
Self::Block { transaction, index, block_hash, block_number, base_fee } => {
let tx_info = TransactionInfo {
hash: Some(transaction.trie_hash()),
index: Some(index),
block_hash: Some(block_hash),
block_number: Some(block_number),
base_fee,
};
resp_builder.fill(transaction, tx_info)
}
}
}
/// Returns the transaction and block related info, if not pending
pub fn split(self) -> (Recovered<T>, TransactionInfo) {
match self {
Self::Pool(tx) => {
let hash = tx.trie_hash();
(tx, TransactionInfo { hash: Some(hash), ..Default::default() })
}
Self::Block { transaction, index, block_hash, block_number, base_fee } => {
let hash = transaction.trie_hash();
(
transaction,
TransactionInfo {
hash: Some(hash),
index: Some(index),
block_hash: Some(block_hash),
block_number: Some(block_number),
base_fee,
},
)
}
}
}
}
impl<T> From<TransactionSource<T>> for Recovered<T> {
fn from(value: TransactionSource<T>) -> Self {
match value {
TransactionSource::Pool(tx) => tx,
TransactionSource::Block { transaction, .. } => transaction,
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-eth-types/src/builder/config.rs | crates/rpc/rpc-eth-types/src/builder/config.rs | //! Configuration for `eth` namespace APIs.
use std::time::Duration;
use crate::{
EthStateCacheConfig, FeeHistoryCacheConfig, ForwardConfig, GasPriceOracleConfig,
RPC_DEFAULT_GAS_CAP,
};
use reqwest::Url;
use reth_rpc_server_types::constants::{
default_max_tracing_requests, DEFAULT_ETH_PROOF_WINDOW, DEFAULT_MAX_BLOCKS_PER_FILTER,
DEFAULT_MAX_LOGS_PER_RESPONSE, DEFAULT_MAX_SIMULATE_BLOCKS, DEFAULT_MAX_TRACE_FILTER_BLOCKS,
DEFAULT_PROOF_PERMITS,
};
use serde::{Deserialize, Serialize};
/// Default value for stale filter ttl
pub const DEFAULT_STALE_FILTER_TTL: Duration = Duration::from_secs(5 * 60);
/// Config for the locally built pending block
#[derive(Debug, Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Default)]
#[serde(rename_all = "lowercase")]
pub enum PendingBlockKind {
/// Return a pending block with header only, no transactions included
Empty,
/// Return null/no pending block
None,
/// Return a pending block with all transactions from the mempool (default behavior)
#[default]
Full,
}
impl std::str::FromStr for PendingBlockKind {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.to_lowercase().as_str() {
"empty" => Ok(Self::Empty),
"none" => Ok(Self::None),
"full" => Ok(Self::Full),
_ => Err(format!(
"Invalid pending block kind: {s}. Valid options are: empty, none, full"
)),
}
}
}
impl PendingBlockKind {
/// Returns true if the pending block kind is `None`
pub const fn is_none(&self) -> bool {
matches!(self, Self::None)
}
/// Returns true if the pending block kind is `Empty`
pub const fn is_empty(&self) -> bool {
matches!(self, Self::Empty)
}
}
/// Additional config values for the eth namespace.
#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)]
pub struct EthConfig {
/// Settings for the caching layer
pub cache: EthStateCacheConfig,
/// Settings for the gas price oracle
pub gas_oracle: GasPriceOracleConfig,
/// The maximum number of blocks into the past for generating state proofs.
pub eth_proof_window: u64,
/// The maximum number of tracing calls that can be executed in concurrently.
pub max_tracing_requests: usize,
/// Maximum number of blocks for `trace_filter` requests.
pub max_trace_filter_blocks: u64,
/// Maximum number of blocks that could be scanned per filter request in `eth_getLogs` calls.
pub max_blocks_per_filter: u64,
/// Maximum number of logs that can be returned in a single response in `eth_getLogs` calls.
pub max_logs_per_response: usize,
/// Gas limit for `eth_call` and call tracing RPC methods.
///
/// Defaults to [`RPC_DEFAULT_GAS_CAP`]
pub rpc_gas_cap: u64,
/// Max number of blocks for `eth_simulateV1`.
pub rpc_max_simulate_blocks: u64,
///
/// Sets TTL for stale filters
pub stale_filter_ttl: Duration,
/// Settings for the fee history cache
pub fee_history_cache: FeeHistoryCacheConfig,
/// The maximum number of getproof calls that can be executed concurrently.
pub proof_permits: usize,
/// Maximum batch size for transaction pool insertions.
pub max_batch_size: usize,
/// Controls how pending blocks are built when requested via RPC methods
pub pending_block_kind: PendingBlockKind,
/// The raw transaction forwarder.
pub raw_tx_forwarder: ForwardConfig,
}
impl EthConfig {
/// Returns the filter config for the `eth_filter` handler.
pub fn filter_config(&self) -> EthFilterConfig {
EthFilterConfig::default()
.max_blocks_per_filter(self.max_blocks_per_filter)
.max_logs_per_response(self.max_logs_per_response)
.stale_filter_ttl(self.stale_filter_ttl)
}
}
impl Default for EthConfig {
fn default() -> Self {
Self {
cache: EthStateCacheConfig::default(),
gas_oracle: GasPriceOracleConfig::default(),
eth_proof_window: DEFAULT_ETH_PROOF_WINDOW,
max_tracing_requests: default_max_tracing_requests(),
max_trace_filter_blocks: DEFAULT_MAX_TRACE_FILTER_BLOCKS,
max_blocks_per_filter: DEFAULT_MAX_BLOCKS_PER_FILTER,
max_logs_per_response: DEFAULT_MAX_LOGS_PER_RESPONSE,
rpc_gas_cap: RPC_DEFAULT_GAS_CAP.into(),
rpc_max_simulate_blocks: DEFAULT_MAX_SIMULATE_BLOCKS,
stale_filter_ttl: DEFAULT_STALE_FILTER_TTL,
fee_history_cache: FeeHistoryCacheConfig::default(),
proof_permits: DEFAULT_PROOF_PERMITS,
max_batch_size: 1,
pending_block_kind: PendingBlockKind::Full,
raw_tx_forwarder: ForwardConfig::default(),
}
}
}
impl EthConfig {
/// Configures the caching layer settings
pub const fn state_cache(mut self, cache: EthStateCacheConfig) -> Self {
self.cache = cache;
self
}
/// Configures the gas price oracle settings
pub const fn gpo_config(mut self, gas_oracle_config: GasPriceOracleConfig) -> Self {
self.gas_oracle = gas_oracle_config;
self
}
/// Configures the maximum number of tracing requests
pub const fn max_tracing_requests(mut self, max_requests: usize) -> Self {
self.max_tracing_requests = max_requests;
self
}
/// Configures the maximum block length to scan per `eth_getLogs` request
pub const fn max_blocks_per_filter(mut self, max_blocks: u64) -> Self {
self.max_blocks_per_filter = max_blocks;
self
}
/// Configures the maximum number of blocks for `trace_filter` requests
pub const fn max_trace_filter_blocks(mut self, max_blocks: u64) -> Self {
self.max_trace_filter_blocks = max_blocks;
self
}
/// Configures the maximum number of logs per response
pub const fn max_logs_per_response(mut self, max_logs: usize) -> Self {
self.max_logs_per_response = max_logs;
self
}
/// Configures the maximum gas limit for `eth_call` and call tracing RPC methods
pub const fn rpc_gas_cap(mut self, rpc_gas_cap: u64) -> Self {
self.rpc_gas_cap = rpc_gas_cap;
self
}
/// Configures the maximum gas limit for `eth_call` and call tracing RPC methods
pub const fn rpc_max_simulate_blocks(mut self, max_blocks: u64) -> Self {
self.rpc_max_simulate_blocks = max_blocks;
self
}
/// Configures the maximum proof window for historical proof generation.
pub const fn eth_proof_window(mut self, window: u64) -> Self {
self.eth_proof_window = window;
self
}
/// Configures the number of getproof requests
pub const fn proof_permits(mut self, permits: usize) -> Self {
self.proof_permits = permits;
self
}
/// Configures the maximum batch size for transaction pool insertions
pub const fn max_batch_size(mut self, max_batch_size: usize) -> Self {
self.max_batch_size = max_batch_size;
self
}
/// Configures the pending block config
pub const fn pending_block_kind(mut self, pending_block_kind: PendingBlockKind) -> Self {
self.pending_block_kind = pending_block_kind;
self
}
/// Configures the raw transaction forwarder.
pub fn raw_tx_forwarder(mut self, tx_forwarder: Option<Url>) -> Self {
if let Some(tx_forwarder) = tx_forwarder {
self.raw_tx_forwarder.tx_forwarder = Some(tx_forwarder);
}
self
}
}
/// Config for the filter
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct EthFilterConfig {
/// Maximum number of blocks that a filter can scan for logs.
///
/// If `None` then no limit is enforced.
pub max_blocks_per_filter: Option<u64>,
/// Maximum number of logs that can be returned in a single response in `eth_getLogs` calls.
///
/// If `None` then no limit is enforced.
pub max_logs_per_response: Option<usize>,
/// How long a filter remains valid after the last poll.
///
/// A filter is considered stale if it has not been polled for longer than this duration and
/// will be removed.
pub stale_filter_ttl: Duration,
}
impl EthFilterConfig {
/// Sets the maximum number of blocks that a filter can scan for logs.
pub const fn max_blocks_per_filter(mut self, num: u64) -> Self {
self.max_blocks_per_filter = Some(num);
self
}
/// Sets the maximum number of logs that can be returned in a single response in `eth_getLogs`
/// calls.
pub const fn max_logs_per_response(mut self, num: usize) -> Self {
self.max_logs_per_response = Some(num);
self
}
/// Sets how long a filter remains valid after the last poll before it will be removed.
pub const fn stale_filter_ttl(mut self, duration: Duration) -> Self {
self.stale_filter_ttl = duration;
self
}
}
impl Default for EthFilterConfig {
fn default() -> Self {
Self {
max_blocks_per_filter: None,
max_logs_per_response: None,
// 5min
stale_filter_ttl: Duration::from_secs(5 * 60),
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-eth-types/src/builder/mod.rs | crates/rpc/rpc-eth-types/src/builder/mod.rs | //! `eth` namespace API builder types.
pub mod config;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-eth-types/src/cache/db.rs | crates/rpc/rpc-eth-types/src/cache/db.rs | //! Helper types to workaround 'higher-ranked lifetime error'
//! <https://github.com/rust-lang/rust/issues/100013> in default implementation of
//! `reth_rpc_eth_api::helpers::Call`.
use alloy_primitives::{Address, B256, U256};
use reth_errors::ProviderResult;
use reth_revm::{database::StateProviderDatabase, DatabaseRef};
use reth_storage_api::{BytecodeReader, HashedPostStateProvider, StateProvider};
use reth_trie::{HashedStorage, MultiProofTargets};
use revm::{
database::{BundleState, CacheDB},
primitives::HashMap,
state::{AccountInfo, Bytecode},
Database, DatabaseCommit,
};
use revm::state::FlaggedStorage;
/// Helper alias type for the state's [`CacheDB`]
pub type StateCacheDb<'a> = CacheDB<StateProviderDatabase<StateProviderTraitObjWrapper<'a>>>;
/// Hack to get around 'higher-ranked lifetime error', see
/// <https://github.com/rust-lang/rust/issues/100013>
#[expect(missing_debug_implementations)]
pub struct StateProviderTraitObjWrapper<'a>(pub &'a dyn StateProvider);
impl reth_storage_api::StateRootProvider for StateProviderTraitObjWrapper<'_> {
fn state_root(
&self,
hashed_state: reth_trie::HashedPostState,
) -> reth_errors::ProviderResult<B256> {
self.0.state_root(hashed_state)
}
fn state_root_from_nodes(
&self,
input: reth_trie::TrieInput,
) -> reth_errors::ProviderResult<B256> {
self.0.state_root_from_nodes(input)
}
fn state_root_with_updates(
&self,
hashed_state: reth_trie::HashedPostState,
) -> reth_errors::ProviderResult<(B256, reth_trie::updates::TrieUpdates)> {
self.0.state_root_with_updates(hashed_state)
}
fn state_root_from_nodes_with_updates(
&self,
input: reth_trie::TrieInput,
) -> reth_errors::ProviderResult<(B256, reth_trie::updates::TrieUpdates)> {
self.0.state_root_from_nodes_with_updates(input)
}
}
impl reth_storage_api::StorageRootProvider for StateProviderTraitObjWrapper<'_> {
fn storage_root(
&self,
address: Address,
hashed_storage: HashedStorage,
) -> ProviderResult<B256> {
self.0.storage_root(address, hashed_storage)
}
fn storage_proof(
&self,
address: Address,
slot: B256,
hashed_storage: HashedStorage,
) -> ProviderResult<reth_trie::StorageProof> {
self.0.storage_proof(address, slot, hashed_storage)
}
fn storage_multiproof(
&self,
address: Address,
slots: &[B256],
hashed_storage: HashedStorage,
) -> ProviderResult<reth_trie::StorageMultiProof> {
self.0.storage_multiproof(address, slots, hashed_storage)
}
}
impl reth_storage_api::StateProofProvider for StateProviderTraitObjWrapper<'_> {
fn proof(
&self,
input: reth_trie::TrieInput,
address: Address,
slots: &[B256],
) -> reth_errors::ProviderResult<reth_trie::AccountProof> {
self.0.proof(input, address, slots)
}
fn multiproof(
&self,
input: reth_trie::TrieInput,
targets: MultiProofTargets,
) -> ProviderResult<reth_trie::MultiProof> {
self.0.multiproof(input, targets)
}
fn witness(
&self,
input: reth_trie::TrieInput,
target: reth_trie::HashedPostState,
) -> reth_errors::ProviderResult<Vec<alloy_primitives::Bytes>> {
self.0.witness(input, target)
}
}
impl reth_storage_api::AccountReader for StateProviderTraitObjWrapper<'_> {
fn basic_account(
&self,
address: &Address,
) -> reth_errors::ProviderResult<Option<reth_primitives_traits::Account>> {
self.0.basic_account(address)
}
}
impl reth_storage_api::BlockHashReader for StateProviderTraitObjWrapper<'_> {
fn block_hash(
&self,
block_number: alloy_primitives::BlockNumber,
) -> reth_errors::ProviderResult<Option<B256>> {
self.0.block_hash(block_number)
}
fn convert_block_hash(
&self,
hash_or_number: alloy_rpc_types_eth::BlockHashOrNumber,
) -> reth_errors::ProviderResult<Option<B256>> {
self.0.convert_block_hash(hash_or_number)
}
fn canonical_hashes_range(
&self,
start: alloy_primitives::BlockNumber,
end: alloy_primitives::BlockNumber,
) -> reth_errors::ProviderResult<Vec<B256>> {
self.0.canonical_hashes_range(start, end)
}
}
impl HashedPostStateProvider for StateProviderTraitObjWrapper<'_> {
fn hashed_post_state(&self, bundle_state: &BundleState) -> reth_trie::HashedPostState {
self.0.hashed_post_state(bundle_state)
}
}
impl StateProvider for StateProviderTraitObjWrapper<'_> {
fn storage(
&self,
account: Address,
storage_key: alloy_primitives::StorageKey,
) -> reth_errors::ProviderResult<Option<FlaggedStorage>> {
self.0.storage(account, storage_key)
}
fn account_code(
&self,
addr: &Address,
) -> reth_errors::ProviderResult<Option<reth_primitives_traits::Bytecode>> {
self.0.account_code(addr)
}
fn account_balance(&self, addr: &Address) -> reth_errors::ProviderResult<Option<U256>> {
self.0.account_balance(addr)
}
fn account_nonce(&self, addr: &Address) -> reth_errors::ProviderResult<Option<u64>> {
self.0.account_nonce(addr)
}
}
impl BytecodeReader for StateProviderTraitObjWrapper<'_> {
fn bytecode_by_hash(
&self,
code_hash: &B256,
) -> reth_errors::ProviderResult<Option<reth_primitives_traits::Bytecode>> {
self.0.bytecode_by_hash(code_hash)
}
}
/// Hack to get around 'higher-ranked lifetime error', see
/// <https://github.com/rust-lang/rust/issues/100013>
pub struct StateCacheDbRefMutWrapper<'a, 'b>(pub &'b mut StateCacheDb<'a>);
impl<'a, 'b> core::fmt::Debug for StateCacheDbRefMutWrapper<'a, 'b> {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
f.debug_struct("StateCacheDbRefMutWrapper").finish_non_exhaustive()
}
}
impl<'a> Database for StateCacheDbRefMutWrapper<'a, '_> {
type Error = <StateCacheDb<'a> as Database>::Error;
fn basic(&mut self, address: Address) -> Result<Option<AccountInfo>, Self::Error> {
self.0.basic(address)
}
fn code_by_hash(&mut self, code_hash: B256) -> Result<Bytecode, Self::Error> {
self.0.code_by_hash(code_hash)
}
fn storage(&mut self, address: Address, index: U256) -> Result<FlaggedStorage, Self::Error> {
self.0.storage(address, index)
}
fn block_hash(&mut self, number: u64) -> Result<B256, Self::Error> {
self.0.block_hash(number)
}
}
impl<'a> DatabaseRef for StateCacheDbRefMutWrapper<'a, '_> {
type Error = <StateCacheDb<'a> as Database>::Error;
fn basic_ref(&self, address: Address) -> Result<Option<AccountInfo>, Self::Error> {
self.0.basic_ref(address)
}
fn code_by_hash_ref(&self, code_hash: B256) -> Result<Bytecode, Self::Error> {
self.0.code_by_hash_ref(code_hash)
}
fn storage_ref(&self, address: Address, index: U256) -> Result<FlaggedStorage, Self::Error> {
self.0.storage_ref(address, index)
}
fn block_hash_ref(&self, number: u64) -> Result<B256, Self::Error> {
self.0.block_hash_ref(number)
}
}
impl DatabaseCommit for StateCacheDbRefMutWrapper<'_, '_> {
fn commit(&mut self, changes: HashMap<Address, revm::state::Account>) {
self.0.commit(changes)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-eth-types/src/cache/config.rs | crates/rpc/rpc-eth-types/src/cache/config.rs | //! Configuration for RPC cache.
use serde::{Deserialize, Serialize};
use reth_rpc_server_types::constants::cache::{
DEFAULT_BLOCK_CACHE_MAX_LEN, DEFAULT_CONCURRENT_DB_REQUESTS, DEFAULT_HEADER_CACHE_MAX_LEN,
DEFAULT_RECEIPT_CACHE_MAX_LEN,
};
/// Settings for the [`EthStateCache`](super::EthStateCache).
#[derive(Debug, Clone, Copy, Eq, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct EthStateCacheConfig {
/// Max number of blocks in cache.
///
/// Default is 5000.
pub max_blocks: u32,
/// Max number receipts in cache.
///
/// Default is 2000.
pub max_receipts: u32,
/// Max number of headers in cache.
///
/// Default is 1000.
pub max_headers: u32,
/// Max number of concurrent database requests.
///
/// Default is 512.
pub max_concurrent_db_requests: usize,
}
impl Default for EthStateCacheConfig {
fn default() -> Self {
Self {
max_blocks: DEFAULT_BLOCK_CACHE_MAX_LEN,
max_receipts: DEFAULT_RECEIPT_CACHE_MAX_LEN,
max_headers: DEFAULT_HEADER_CACHE_MAX_LEN,
max_concurrent_db_requests: DEFAULT_CONCURRENT_DB_REQUESTS,
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-eth-types/src/cache/multi_consumer.rs | crates/rpc/rpc-eth-types/src/cache/multi_consumer.rs | //! Metered cache, which also provides storage for senders in order to queue queries that result in
//! a cache miss.
use super::metrics::CacheMetrics;
use reth_primitives_traits::InMemorySize;
use schnellru::{ByLength, Limiter, LruMap};
use std::{
collections::{hash_map::Entry, HashMap},
fmt::{self, Debug, Formatter},
hash::Hash,
};
/// A multi-consumer LRU cache.
pub struct MultiConsumerLruCache<K, V, L, S>
where
K: Hash + Eq,
L: Limiter<K, V>,
{
/// The LRU cache.
cache: LruMap<K, V, L>,
/// All queued consumers.
queued: HashMap<K, Vec<S>>,
/// Cache metrics
metrics: CacheMetrics,
// Tracked heap usage
memory_usage: usize,
}
impl<K, V, L, S> Debug for MultiConsumerLruCache<K, V, L, S>
where
K: Hash + Eq,
L: Limiter<K, V>,
{
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.debug_struct("MultiConsumerLruCache")
.field("cache_length", &self.cache.len())
.field("cache_memory_usage", &self.cache.memory_usage())
.field("queued_length", &self.queued.len())
.field("memory_usage", &self.memory_usage)
.finish()
}
}
impl<K, V, L, S> MultiConsumerLruCache<K, V, L, S>
where
K: Hash + Eq + Debug,
L: Limiter<K, V>,
{
/// Adds the sender to the queue for the given key.
///
/// Returns true if this is the first queued sender for the key
pub fn queue(&mut self, key: K, sender: S) -> bool {
self.metrics.queued_consumers_count.increment(1.0);
match self.queued.entry(key) {
Entry::Occupied(mut entry) => {
entry.get_mut().push(sender);
false
}
Entry::Vacant(entry) => {
entry.insert(vec![sender]);
true
}
}
}
/// Remove consumers for a given key, this will also remove the key from the cache.
pub fn remove(&mut self, key: &K) -> Option<Vec<S>>
where
V: InMemorySize,
{
self.cache
.remove(key)
.inspect(|value| self.memory_usage = self.memory_usage.saturating_sub(value.size()));
self.queued
.remove(key)
.inspect(|removed| self.metrics.queued_consumers_count.decrement(removed.len() as f64))
}
/// Returns a reference to the value for a given key and promotes that element to be the most
/// recently used.
pub fn get(&mut self, key: &K) -> Option<&mut V> {
let entry = self.cache.get(key);
if entry.is_some() {
self.metrics.hits_total.increment(1);
} else {
self.metrics.misses_total.increment(1);
}
entry
}
/// Inserts a new element into the map.
///
/// Can fail if the element is rejected by the limiter or if we fail to grow an empty map.
///
/// See [`Schnellru::insert`](LruMap::insert) for more info.
pub fn insert<'a>(&mut self, key: L::KeyToInsert<'a>, value: V) -> bool
where
L::KeyToInsert<'a>: Hash + PartialEq<K>,
V: InMemorySize,
{
let size = value.size();
if self.cache.limiter().is_over_the_limit(self.cache.len() + 1) {
if let Some((_, evicted)) = self.cache.pop_oldest() {
// update tracked memory with the evicted value
self.memory_usage = self.memory_usage.saturating_sub(evicted.size());
}
}
if self.cache.insert(key, value) {
self.memory_usage = self.memory_usage.saturating_add(size);
true
} else {
false
}
}
/// Shrinks the capacity of the queue with a lower limit.
#[inline]
pub fn shrink_to(&mut self, min_capacity: usize) {
self.queued.shrink_to(min_capacity);
}
/// Update metrics for the inner cache.
#[inline]
pub fn update_cached_metrics(&self) {
self.metrics.cached_count.set(self.cache.len() as f64);
self.metrics.memory_usage.set(self.memory_usage as f64);
}
}
impl<K, V, S> MultiConsumerLruCache<K, V, ByLength, S>
where
K: Hash + Eq,
{
/// Creates a new empty map with a given `max_len` and metric label.
pub fn new(max_len: u32, cache_id: &str) -> Self {
Self {
cache: LruMap::new(ByLength::new(max_len)),
queued: Default::default(),
metrics: CacheMetrics::new_with_labels(&[("cache", cache_id.to_string())]),
memory_usage: 0,
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-eth-types/src/cache/mod.rs | crates/rpc/rpc-eth-types/src/cache/mod.rs | //! Async caching support for eth RPC
use super::{EthStateCacheConfig, MultiConsumerLruCache};
use alloy_consensus::BlockHeader;
use alloy_eips::BlockHashOrNumber;
use alloy_primitives::B256;
use futures::{future::Either, stream::FuturesOrdered, Stream, StreamExt};
use reth_chain_state::CanonStateNotification;
use reth_errors::{ProviderError, ProviderResult};
use reth_execution_types::Chain;
use reth_primitives_traits::{Block, BlockBody, NodePrimitives, RecoveredBlock};
use reth_storage_api::{BlockReader, TransactionVariant};
use reth_tasks::{TaskSpawner, TokioTaskExecutor};
use schnellru::{ByLength, Limiter};
use std::{
future::Future,
pin::Pin,
sync::Arc,
task::{Context, Poll},
};
use tokio::sync::{
mpsc::{unbounded_channel, UnboundedSender},
oneshot, Semaphore,
};
use tokio_stream::wrappers::UnboundedReceiverStream;
pub mod config;
pub mod db;
pub mod metrics;
pub mod multi_consumer;
/// The type that can send the response to a requested [`RecoveredBlock`]
type BlockTransactionsResponseSender<T> = oneshot::Sender<ProviderResult<Option<Vec<T>>>>;
/// The type that can send the response to a requested [`RecoveredBlock`]
type BlockWithSendersResponseSender<B> =
oneshot::Sender<ProviderResult<Option<Arc<RecoveredBlock<B>>>>>;
/// The type that can send the response to the requested receipts of a block.
type ReceiptsResponseSender<R> = oneshot::Sender<ProviderResult<Option<Arc<Vec<R>>>>>;
type CachedBlockResponseSender<B> = oneshot::Sender<Option<Arc<RecoveredBlock<B>>>>;
type CachedBlockAndReceiptsResponseSender<B, R> =
oneshot::Sender<(Option<Arc<RecoveredBlock<B>>>, Option<Arc<Vec<R>>>)>;
/// The type that can send the response to a requested header
type HeaderResponseSender<H> = oneshot::Sender<ProviderResult<H>>;
/// The type that can send the response with a chain of cached blocks
type CachedParentBlocksResponseSender<B> = oneshot::Sender<Vec<Arc<RecoveredBlock<B>>>>;
type BlockLruCache<B, L> = MultiConsumerLruCache<
B256,
Arc<RecoveredBlock<B>>,
L,
Either<
BlockWithSendersResponseSender<B>,
BlockTransactionsResponseSender<<<B as Block>::Body as BlockBody>::Transaction>,
>,
>;
type ReceiptsLruCache<R, L> =
MultiConsumerLruCache<B256, Arc<Vec<R>>, L, ReceiptsResponseSender<R>>;
type HeaderLruCache<H, L> = MultiConsumerLruCache<B256, H, L, HeaderResponseSender<H>>;
/// Provides async access to cached eth data
///
/// This is the frontend for the async caching service which manages cached data on a different
/// task.
#[derive(Debug)]
pub struct EthStateCache<N: NodePrimitives> {
to_service: UnboundedSender<CacheAction<N::Block, N::Receipt>>,
}
impl<N: NodePrimitives> Clone for EthStateCache<N> {
fn clone(&self) -> Self {
Self { to_service: self.to_service.clone() }
}
}
impl<N: NodePrimitives> EthStateCache<N> {
/// Creates and returns both [`EthStateCache`] frontend and the memory bound service.
fn create<Provider, Tasks>(
provider: Provider,
action_task_spawner: Tasks,
max_blocks: u32,
max_receipts: u32,
max_headers: u32,
max_concurrent_db_operations: usize,
) -> (Self, EthStateCacheService<Provider, Tasks>)
where
Provider: BlockReader<Block = N::Block, Receipt = N::Receipt>,
{
let (to_service, rx) = unbounded_channel();
let service = EthStateCacheService {
provider,
full_block_cache: BlockLruCache::new(max_blocks, "blocks"),
receipts_cache: ReceiptsLruCache::new(max_receipts, "receipts"),
headers_cache: HeaderLruCache::new(max_headers, "headers"),
action_tx: to_service.clone(),
action_rx: UnboundedReceiverStream::new(rx),
action_task_spawner,
rate_limiter: Arc::new(Semaphore::new(max_concurrent_db_operations)),
};
let cache = Self { to_service };
(cache, service)
}
/// Creates a new async LRU backed cache service task and spawns it to a new task via
/// [`tokio::spawn`].
///
/// See also [`Self::spawn_with`]
pub fn spawn<Provider>(provider: Provider, config: EthStateCacheConfig) -> Self
where
Provider: BlockReader<Block = N::Block, Receipt = N::Receipt> + Clone + Unpin + 'static,
{
Self::spawn_with(provider, config, TokioTaskExecutor::default())
}
/// Creates a new async LRU backed cache service task and spawns it to a new task via the given
/// spawner.
///
/// The cache is memory limited by the given max bytes values.
pub fn spawn_with<Provider, Tasks>(
provider: Provider,
config: EthStateCacheConfig,
executor: Tasks,
) -> Self
where
Provider: BlockReader<Block = N::Block, Receipt = N::Receipt> + Clone + Unpin + 'static,
Tasks: TaskSpawner + Clone + 'static,
{
let EthStateCacheConfig {
max_blocks,
max_receipts,
max_headers,
max_concurrent_db_requests,
} = config;
let (this, service) = Self::create(
provider,
executor.clone(),
max_blocks,
max_receipts,
max_headers,
max_concurrent_db_requests,
);
executor.spawn_critical("eth state cache", Box::pin(service));
this
}
/// Requests the [`RecoveredBlock`] for the block hash
///
/// Returns `None` if the block does not exist.
pub async fn get_recovered_block(
&self,
block_hash: B256,
) -> ProviderResult<Option<Arc<RecoveredBlock<N::Block>>>> {
let (response_tx, rx) = oneshot::channel();
let _ = self.to_service.send(CacheAction::GetBlockWithSenders { block_hash, response_tx });
rx.await.map_err(|_| CacheServiceUnavailable)?
}
/// Requests the receipts for the block hash
///
/// Returns `None` if the block was not found.
pub async fn get_receipts(
&self,
block_hash: B256,
) -> ProviderResult<Option<Arc<Vec<N::Receipt>>>> {
let (response_tx, rx) = oneshot::channel();
let _ = self.to_service.send(CacheAction::GetReceipts { block_hash, response_tx });
rx.await.map_err(|_| CacheServiceUnavailable)?
}
/// Fetches both receipts and block for the given block hash.
pub async fn get_block_and_receipts(
&self,
block_hash: B256,
) -> ProviderResult<Option<(Arc<RecoveredBlock<N::Block>>, Arc<Vec<N::Receipt>>)>> {
let block = self.get_recovered_block(block_hash);
let receipts = self.get_receipts(block_hash);
let (block, receipts) = futures::try_join!(block, receipts)?;
Ok(block.zip(receipts))
}
/// Retrieves receipts and blocks from cache if block is in the cache, otherwise only receipts.
pub async fn get_receipts_and_maybe_block(
&self,
block_hash: B256,
) -> ProviderResult<Option<(Arc<Vec<N::Receipt>>, Option<Arc<RecoveredBlock<N::Block>>>)>> {
let (response_tx, rx) = oneshot::channel();
let _ = self.to_service.send(CacheAction::GetCachedBlock { block_hash, response_tx });
let receipts = self.get_receipts(block_hash);
let (receipts, block) = futures::join!(receipts, rx);
let block = block.map_err(|_| CacheServiceUnavailable)?;
Ok(receipts?.map(|r| (r, block)))
}
/// Retrieves both block and receipts from cache if available.
pub async fn maybe_cached_block_and_receipts(
&self,
block_hash: B256,
) -> ProviderResult<(Option<Arc<RecoveredBlock<N::Block>>>, Option<Arc<Vec<N::Receipt>>>)> {
let (response_tx, rx) = oneshot::channel();
let _ = self
.to_service
.send(CacheAction::GetCachedBlockAndReceipts { block_hash, response_tx });
rx.await.map_err(|_| CacheServiceUnavailable.into())
}
/// Streams cached receipts and blocks for a list of block hashes, preserving input order.
#[allow(clippy::type_complexity)]
pub fn get_receipts_and_maybe_block_stream<'a>(
&'a self,
hashes: Vec<B256>,
) -> impl Stream<
Item = ProviderResult<
Option<(Arc<Vec<N::Receipt>>, Option<Arc<RecoveredBlock<N::Block>>>)>,
>,
> + 'a {
let futures = hashes.into_iter().map(move |hash| self.get_receipts_and_maybe_block(hash));
futures.collect::<FuturesOrdered<_>>()
}
/// Requests the header for the given hash.
///
/// Returns an error if the header is not found.
pub async fn get_header(&self, block_hash: B256) -> ProviderResult<N::BlockHeader> {
let (response_tx, rx) = oneshot::channel();
let _ = self.to_service.send(CacheAction::GetHeader { block_hash, response_tx });
rx.await.map_err(|_| CacheServiceUnavailable)?
}
/// Retrieves a chain of connected blocks from the cache, starting from the given block hash
/// and traversing down through parent hashes. Returns blocks in descending order (newest
/// first).
/// This is useful for efficiently retrieving a sequence of blocks that might already be in
/// cache without making separate database requests.
/// Returns `None` if no blocks are found in the cache, otherwise returns `Some(Vec<...>)`
/// with at least one block.
pub async fn get_cached_parent_blocks(
&self,
block_hash: B256,
max_blocks: usize,
) -> Option<Vec<Arc<RecoveredBlock<N::Block>>>> {
let (response_tx, rx) = oneshot::channel();
let _ = self.to_service.send(CacheAction::GetCachedParentBlocks {
block_hash,
max_blocks,
response_tx,
});
let blocks = rx.await.unwrap_or_default();
if blocks.is_empty() {
None
} else {
Some(blocks)
}
}
}
/// Thrown when the cache service task dropped.
#[derive(Debug, thiserror::Error)]
#[error("cache service task stopped")]
pub struct CacheServiceUnavailable;
impl From<CacheServiceUnavailable> for ProviderError {
fn from(err: CacheServiceUnavailable) -> Self {
Self::other(err)
}
}
/// A task that manages caches for data required by the `eth` rpc implementation.
///
/// It provides a caching layer on top of the given
/// [`StateProvider`](reth_storage_api::StateProvider) and keeps data fetched via the provider in
/// memory in an LRU cache. If the requested data is missing in the cache it is fetched and inserted
/// into the cache afterwards. While fetching data from disk is sync, this service is async since
/// requests and data is shared via channels.
///
/// This type is an endless future that listens for incoming messages from the user facing
/// [`EthStateCache`] via a channel. If the requested data is not cached then it spawns a new task
/// that does the IO and sends the result back to it. This way the caching service only
/// handles messages and does LRU lookups and never blocking IO.
///
/// Caution: The channel for the data is _unbounded_ it is assumed that this is mainly used by the
/// `reth_rpc::EthApi` which is typically invoked by the RPC server, which already uses
/// permits to limit concurrent requests.
#[must_use = "Type does nothing unless spawned"]
pub(crate) struct EthStateCacheService<
Provider,
Tasks,
LimitBlocks = ByLength,
LimitReceipts = ByLength,
LimitHeaders = ByLength,
> where
Provider: BlockReader,
LimitBlocks: Limiter<B256, Arc<RecoveredBlock<Provider::Block>>>,
LimitReceipts: Limiter<B256, Arc<Vec<Provider::Receipt>>>,
LimitHeaders: Limiter<B256, Provider::Header>,
{
/// The type used to lookup data from disk
provider: Provider,
/// The LRU cache for full blocks grouped by their block hash.
full_block_cache: BlockLruCache<Provider::Block, LimitBlocks>,
/// The LRU cache for block receipts grouped by the block hash.
receipts_cache: ReceiptsLruCache<Provider::Receipt, LimitReceipts>,
/// The LRU cache for headers.
///
/// Headers are cached because they are required to populate the environment for execution
/// (evm).
headers_cache: HeaderLruCache<Provider::Header, LimitHeaders>,
/// Sender half of the action channel.
action_tx: UnboundedSender<CacheAction<Provider::Block, Provider::Receipt>>,
/// Receiver half of the action channel.
action_rx: UnboundedReceiverStream<CacheAction<Provider::Block, Provider::Receipt>>,
/// The type that's used to spawn tasks that do the actual work
action_task_spawner: Tasks,
/// Rate limiter for spawned fetch tasks.
///
/// This restricts the max concurrent fetch tasks at the same time.
rate_limiter: Arc<Semaphore>,
}
impl<Provider, Tasks> EthStateCacheService<Provider, Tasks>
where
Provider: BlockReader + Clone + Unpin + 'static,
Tasks: TaskSpawner + Clone + 'static,
{
fn on_new_block(
&mut self,
block_hash: B256,
res: ProviderResult<Option<Arc<RecoveredBlock<Provider::Block>>>>,
) {
if let Some(queued) = self.full_block_cache.remove(&block_hash) {
// send the response to queued senders
for tx in queued {
match tx {
Either::Left(block_with_senders) => {
let _ = block_with_senders.send(res.clone());
}
Either::Right(transaction_tx) => {
let _ = transaction_tx.send(res.clone().map(|maybe_block| {
maybe_block.map(|block| block.body().transactions().to_vec())
}));
}
}
}
}
// cache good block
if let Ok(Some(block)) = res {
self.full_block_cache.insert(block_hash, block);
}
}
fn on_new_receipts(
&mut self,
block_hash: B256,
res: ProviderResult<Option<Arc<Vec<Provider::Receipt>>>>,
) {
if let Some(queued) = self.receipts_cache.remove(&block_hash) {
// send the response to queued senders
for tx in queued {
let _ = tx.send(res.clone());
}
}
// cache good receipts
if let Ok(Some(receipts)) = res {
self.receipts_cache.insert(block_hash, receipts);
}
}
fn on_reorg_block(
&mut self,
block_hash: B256,
res: ProviderResult<Option<RecoveredBlock<Provider::Block>>>,
) {
let res = res.map(|b| b.map(Arc::new));
if let Some(queued) = self.full_block_cache.remove(&block_hash) {
// send the response to queued senders
for tx in queued {
match tx {
Either::Left(block_with_senders) => {
let _ = block_with_senders.send(res.clone());
}
Either::Right(transaction_tx) => {
let _ = transaction_tx.send(res.clone().map(|maybe_block| {
maybe_block.map(|block| block.body().transactions().to_vec())
}));
}
}
}
}
}
fn on_reorg_receipts(
&mut self,
block_hash: B256,
res: ProviderResult<Option<Arc<Vec<Provider::Receipt>>>>,
) {
if let Some(queued) = self.receipts_cache.remove(&block_hash) {
// send the response to queued senders
for tx in queued {
let _ = tx.send(res.clone());
}
}
}
/// Shrinks the queues but leaves some space for the next requests
fn shrink_queues(&mut self) {
let min_capacity = 2;
self.full_block_cache.shrink_to(min_capacity);
self.receipts_cache.shrink_to(min_capacity);
self.headers_cache.shrink_to(min_capacity);
}
fn update_cached_metrics(&self) {
self.full_block_cache.update_cached_metrics();
self.receipts_cache.update_cached_metrics();
self.headers_cache.update_cached_metrics();
}
}
impl<Provider, Tasks> Future for EthStateCacheService<Provider, Tasks>
where
Provider: BlockReader + Clone + Unpin + 'static,
Tasks: TaskSpawner + Clone + 'static,
{
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.get_mut();
loop {
let Poll::Ready(action) = this.action_rx.poll_next_unpin(cx) else {
// shrink queues if we don't have any work to do
this.shrink_queues();
return Poll::Pending;
};
match action {
None => {
unreachable!("can't close")
}
Some(action) => {
match action {
CacheAction::GetCachedBlock { block_hash, response_tx } => {
let _ =
response_tx.send(this.full_block_cache.get(&block_hash).cloned());
}
CacheAction::GetCachedBlockAndReceipts { block_hash, response_tx } => {
let block = this.full_block_cache.get(&block_hash).cloned();
let receipts = this.receipts_cache.get(&block_hash).cloned();
let _ = response_tx.send((block, receipts));
}
CacheAction::GetBlockWithSenders { block_hash, response_tx } => {
if let Some(block) = this.full_block_cache.get(&block_hash).cloned() {
let _ = response_tx.send(Ok(Some(block)));
continue
}
// block is not in the cache, request it if this is the first consumer
if this.full_block_cache.queue(block_hash, Either::Left(response_tx)) {
let provider = this.provider.clone();
let action_tx = this.action_tx.clone();
let rate_limiter = this.rate_limiter.clone();
let mut action_sender =
ActionSender::new(CacheKind::Block, block_hash, action_tx);
this.action_task_spawner.spawn_blocking(Box::pin(async move {
// Acquire permit
let _permit = rate_limiter.acquire().await;
// Only look in the database to prevent situations where we
// looking up the tree is blocking
let block_sender = provider
.sealed_block_with_senders(
BlockHashOrNumber::Hash(block_hash),
TransactionVariant::WithHash,
)
.map(|maybe_block| maybe_block.map(Arc::new));
action_sender.send_block(block_sender);
}));
}
}
CacheAction::GetReceipts { block_hash, response_tx } => {
// check if block is cached
if let Some(receipts) = this.receipts_cache.get(&block_hash).cloned() {
let _ = response_tx.send(Ok(Some(receipts)));
continue
}
// block is not in the cache, request it if this is the first consumer
if this.receipts_cache.queue(block_hash, response_tx) {
let provider = this.provider.clone();
let action_tx = this.action_tx.clone();
let rate_limiter = this.rate_limiter.clone();
let mut action_sender =
ActionSender::new(CacheKind::Receipt, block_hash, action_tx);
this.action_task_spawner.spawn_blocking(Box::pin(async move {
// Acquire permit
let _permit = rate_limiter.acquire().await;
let res = provider
.receipts_by_block(block_hash.into())
.map(|maybe_receipts| maybe_receipts.map(Arc::new));
action_sender.send_receipts(res);
}));
}
}
CacheAction::GetHeader { block_hash, response_tx } => {
// check if the header is cached
if let Some(header) = this.headers_cache.get(&block_hash).cloned() {
let _ = response_tx.send(Ok(header));
continue
}
// it's possible we have the entire block cached
if let Some(block) = this.full_block_cache.get(&block_hash) {
let _ = response_tx.send(Ok(block.clone_header()));
continue
}
// header is not in the cache, request it if this is the first
// consumer
if this.headers_cache.queue(block_hash, response_tx) {
let provider = this.provider.clone();
let action_tx = this.action_tx.clone();
let rate_limiter = this.rate_limiter.clone();
let mut action_sender =
ActionSender::new(CacheKind::Header, block_hash, action_tx);
this.action_task_spawner.spawn_blocking(Box::pin(async move {
// Acquire permit
let _permit = rate_limiter.acquire().await;
let header = provider.header(&block_hash).and_then(|header| {
header.ok_or_else(|| {
ProviderError::HeaderNotFound(block_hash.into())
})
});
action_sender.send_header(header);
}));
}
}
CacheAction::ReceiptsResult { block_hash, res } => {
this.on_new_receipts(block_hash, res);
}
CacheAction::BlockWithSendersResult { block_hash, res } => match res {
Ok(Some(block_with_senders)) => {
this.on_new_block(block_hash, Ok(Some(block_with_senders)));
}
Ok(None) => {
this.on_new_block(block_hash, Ok(None));
}
Err(e) => {
this.on_new_block(block_hash, Err(e));
}
},
CacheAction::HeaderResult { block_hash, res } => {
let res = *res;
if let Some(queued) = this.headers_cache.remove(&block_hash) {
// send the response to queued senders
for tx in queued {
let _ = tx.send(res.clone());
}
}
// cache good header
if let Ok(data) = res {
this.headers_cache.insert(block_hash, data);
}
}
CacheAction::CacheNewCanonicalChain { chain_change } => {
for block in chain_change.blocks {
this.on_new_block(block.hash(), Ok(Some(Arc::new(block))));
}
for block_receipts in chain_change.receipts {
this.on_new_receipts(
block_receipts.block_hash,
Ok(Some(Arc::new(block_receipts.receipts))),
);
}
}
CacheAction::RemoveReorgedChain { chain_change } => {
for block in chain_change.blocks {
this.on_reorg_block(block.hash(), Ok(Some(block)));
}
for block_receipts in chain_change.receipts {
this.on_reorg_receipts(
block_receipts.block_hash,
Ok(Some(Arc::new(block_receipts.receipts))),
);
}
}
CacheAction::GetCachedParentBlocks {
block_hash,
max_blocks,
response_tx,
} => {
let mut blocks = Vec::new();
let mut current_hash = block_hash;
// Start with the requested block
while blocks.len() < max_blocks {
if let Some(block) =
this.full_block_cache.get(¤t_hash).cloned()
{
// Get the parent hash for the next iteration
current_hash = block.header().parent_hash();
blocks.push(block);
} else {
// Break the loop if we can't find the current block
break;
}
}
let _ = response_tx.send(blocks);
}
};
this.update_cached_metrics();
}
}
}
}
}
/// All message variants sent through the channel
enum CacheAction<B: Block, R> {
GetBlockWithSenders {
block_hash: B256,
response_tx: BlockWithSendersResponseSender<B>,
},
GetHeader {
block_hash: B256,
response_tx: HeaderResponseSender<B::Header>,
},
GetReceipts {
block_hash: B256,
response_tx: ReceiptsResponseSender<R>,
},
GetCachedBlock {
block_hash: B256,
response_tx: CachedBlockResponseSender<B>,
},
GetCachedBlockAndReceipts {
block_hash: B256,
response_tx: CachedBlockAndReceiptsResponseSender<B, R>,
},
BlockWithSendersResult {
block_hash: B256,
res: ProviderResult<Option<Arc<RecoveredBlock<B>>>>,
},
ReceiptsResult {
block_hash: B256,
res: ProviderResult<Option<Arc<Vec<R>>>>,
},
HeaderResult {
block_hash: B256,
res: Box<ProviderResult<B::Header>>,
},
CacheNewCanonicalChain {
chain_change: ChainChange<B, R>,
},
RemoveReorgedChain {
chain_change: ChainChange<B, R>,
},
GetCachedParentBlocks {
block_hash: B256,
max_blocks: usize,
response_tx: CachedParentBlocksResponseSender<B>,
},
}
struct BlockReceipts<R> {
block_hash: B256,
receipts: Vec<R>,
}
/// A change of the canonical chain
struct ChainChange<B: Block, R> {
blocks: Vec<RecoveredBlock<B>>,
receipts: Vec<BlockReceipts<R>>,
}
impl<B: Block, R: Clone> ChainChange<B, R> {
fn new<N>(chain: Arc<Chain<N>>) -> Self
where
N: NodePrimitives<Block = B, Receipt = R>,
{
let (blocks, receipts): (Vec<_>, Vec<_>) = chain
.blocks_and_receipts()
.map(|(block, receipts)| {
let block_receipts =
BlockReceipts { block_hash: block.hash(), receipts: receipts.clone() };
(block.clone(), block_receipts)
})
.unzip();
Self { blocks, receipts }
}
}
/// Identifier for the caches.
#[derive(Copy, Clone, Debug)]
enum CacheKind {
Block,
Receipt,
Header,
}
/// Drop aware sender struct that ensures a response is always emitted even if the db task panics
/// before a result could be sent.
///
/// This type wraps a sender and in case the sender is still present on drop emit an error response.
#[derive(Debug)]
struct ActionSender<B: Block, R: Send + Sync> {
kind: CacheKind,
blockhash: B256,
tx: Option<UnboundedSender<CacheAction<B, R>>>,
}
impl<R: Send + Sync, B: Block> ActionSender<B, R> {
const fn new(kind: CacheKind, blockhash: B256, tx: UnboundedSender<CacheAction<B, R>>) -> Self {
Self { kind, blockhash, tx: Some(tx) }
}
fn send_block(&mut self, block_sender: Result<Option<Arc<RecoveredBlock<B>>>, ProviderError>) {
if let Some(tx) = self.tx.take() {
let _ = tx.send(CacheAction::BlockWithSendersResult {
block_hash: self.blockhash,
res: block_sender,
});
}
}
fn send_receipts(&mut self, receipts: Result<Option<Arc<Vec<R>>>, ProviderError>) {
if let Some(tx) = self.tx.take() {
let _ =
tx.send(CacheAction::ReceiptsResult { block_hash: self.blockhash, res: receipts });
}
}
fn send_header(&mut self, header: Result<<B as Block>::Header, ProviderError>) {
if let Some(tx) = self.tx.take() {
let _ = tx.send(CacheAction::HeaderResult {
block_hash: self.blockhash,
res: Box::new(header),
});
}
}
}
impl<R: Send + Sync, B: Block> Drop for ActionSender<B, R> {
fn drop(&mut self) {
if let Some(tx) = self.tx.take() {
let msg = match self.kind {
CacheKind::Block => CacheAction::BlockWithSendersResult {
block_hash: self.blockhash,
res: Err(CacheServiceUnavailable.into()),
},
CacheKind::Receipt => CacheAction::ReceiptsResult {
block_hash: self.blockhash,
res: Err(CacheServiceUnavailable.into()),
},
CacheKind::Header => CacheAction::HeaderResult {
block_hash: self.blockhash,
res: Box::new(Err(CacheServiceUnavailable.into())),
},
};
let _ = tx.send(msg);
}
}
}
/// Awaits for new chain events and directly inserts them into the cache so they're available
/// immediately before they need to be fetched from disk.
///
/// Reorged blocks are removed from the cache.
pub async fn cache_new_blocks_task<St, N: NodePrimitives>(
eth_state_cache: EthStateCache<N>,
mut events: St,
) where
St: Stream<Item = CanonStateNotification<N>> + Unpin + 'static,
{
while let Some(event) = events.next().await {
if let Some(reverted) = event.reverted() {
let chain_change = ChainChange::new(reverted);
let _ =
eth_state_cache.to_service.send(CacheAction::RemoveReorgedChain { chain_change });
}
let chain_change = ChainChange::new(event.committed());
let _ =
eth_state_cache.to_service.send(CacheAction::CacheNewCanonicalChain { chain_change });
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-eth-types/src/cache/metrics.rs | crates/rpc/rpc-eth-types/src/cache/metrics.rs | //! Tracks state of RPC cache.
use metrics::Counter;
use reth_metrics::{metrics::Gauge, Metrics};
#[derive(Metrics)]
#[metrics(scope = "rpc.eth_cache")]
pub(crate) struct CacheMetrics {
/// The number of entities in the cache.
pub(crate) cached_count: Gauge,
/// The number of queued consumers.
pub(crate) queued_consumers_count: Gauge,
/// The number of cache hits.
pub(crate) hits_total: Counter,
/// The number of cache misses.
pub(crate) misses_total: Counter,
/// The memory usage of the cache.
pub(crate) memory_usage: Gauge,
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-eth-types/src/error/api.rs | crates/rpc/rpc-eth-types/src/error/api.rs | //! Helper traits to wrap generic l1 errors, in network specific error type configured in
//! `reth_rpc_eth_api::EthApiTypes`.
use crate::EthApiError;
use reth_errors::ProviderError;
use reth_evm::{ConfigureEvm, EvmErrorFor, HaltReasonFor};
use revm::context_interface::result::HaltReason;
use seismic_revm::SeismicHaltReason;
use super::RpcInvalidTransactionError;
/// Helper trait to wrap core [`EthApiError`].
pub trait FromEthApiError: From<EthApiError> {
/// Converts from error via [`EthApiError`].
fn from_eth_err<E>(err: E) -> Self
where
EthApiError: From<E>;
}
impl<T> FromEthApiError for T
where
T: From<EthApiError>,
{
fn from_eth_err<E>(err: E) -> Self
where
EthApiError: From<E>,
{
T::from(EthApiError::from(err))
}
}
/// Helper trait to wrap core [`EthApiError`].
pub trait IntoEthApiError: Into<EthApiError> {
/// Converts into error via [`EthApiError`].
fn into_eth_err<E>(self) -> E
where
E: FromEthApiError;
}
impl<T> IntoEthApiError for T
where
EthApiError: From<T>,
{
fn into_eth_err<E>(self) -> E
where
E: FromEthApiError,
{
E::from_eth_err(self)
}
}
/// Helper trait to access wrapped core error.
pub trait AsEthApiError {
/// Returns reference to [`EthApiError`], if this an error variant inherited from core
/// functionality.
fn as_err(&self) -> Option<&EthApiError>;
/// Returns `true` if error is
/// [`RpcInvalidTransactionError::GasTooHigh`].
fn is_gas_too_high(&self) -> bool {
if let Some(err) = self.as_err() {
return err.is_gas_too_high()
}
false
}
/// Returns `true` if error is
/// [`RpcInvalidTransactionError::GasTooLow`].
fn is_gas_too_low(&self) -> bool {
if let Some(err) = self.as_err() {
return err.is_gas_too_low()
}
false
}
}
impl AsEthApiError for EthApiError {
fn as_err(&self) -> Option<&EthApiError> {
Some(self)
}
}
/// Helper trait to convert from revm errors.
pub trait FromEvmError<Evm: ConfigureEvm>:
From<EvmErrorFor<Evm, ProviderError>> + FromEvmHalt<HaltReasonFor<Evm>>
{
/// Converts from EVM error to this type.
fn from_evm_err(err: EvmErrorFor<Evm, ProviderError>) -> Self {
err.into()
}
}
impl<T, Evm> FromEvmError<Evm> for T
where
T: From<EvmErrorFor<Evm, ProviderError>> + FromEvmHalt<HaltReasonFor<Evm>>,
Evm: ConfigureEvm,
{
}
/// Helper trait to convert from revm errors.
pub trait FromEvmHalt<Halt> {
/// Converts from EVM halt to this type.
fn from_evm_halt(halt: Halt, gas_limit: u64) -> Self;
}
impl FromEvmHalt<HaltReason> for EthApiError {
fn from_evm_halt(halt: HaltReason, gas_limit: u64) -> Self {
RpcInvalidTransactionError::halt(halt, gas_limit).into()
}
}
impl FromEvmHalt<SeismicHaltReason> for EthApiError {
fn from_evm_halt(halt: SeismicHaltReason, gas_limit: u64) -> Self {
match halt {
SeismicHaltReason::Base(reason) => EthApiError::from_evm_halt(reason, gas_limit),
SeismicHaltReason::InvalidPrivateStorageAccess => {
EthApiError::EvmCustom("Invalid Private Storage Access".to_string())
}
SeismicHaltReason::InvalidPublicStorageAccess => {
EthApiError::EvmCustom("Invalid Public Storage Access".to_string())
}
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-eth-types/src/error/mod.rs | crates/rpc/rpc-eth-types/src/error/mod.rs | //! Implementation specific Errors for the `eth_` namespace.
pub mod api;
use crate::error::api::FromEvmHalt;
use alloy_eips::BlockId;
use alloy_evm::{call::CallError, overrides::StateOverrideError};
use alloy_primitives::{Address, Bytes, B256, U256};
use alloy_rpc_types_eth::{error::EthRpcErrorCode, request::TransactionInputError, BlockError};
use alloy_sol_types::{ContractError, RevertReason};
use alloy_transport::{RpcError, TransportErrorKind};
pub use api::{AsEthApiError, FromEthApiError, FromEvmError, IntoEthApiError};
use core::time::Duration;
use reth_errors::{BlockExecutionError, BlockValidationError, RethError};
use reth_primitives_traits::transaction::{error::InvalidTransactionError, signed::RecoveryError};
use reth_rpc_convert::{CallFeesError, EthTxEnvError, TransactionConversionError};
use reth_rpc_server_types::result::{
block_id_to_str, internal_rpc_err, invalid_params_rpc_err, rpc_err, rpc_error_with_code,
};
use reth_transaction_pool::error::{
Eip4844PoolTransactionError, Eip7702PoolTransactionError, InvalidPoolTransactionError,
PoolError, PoolErrorKind, PoolTransactionError,
};
use revm::context_interface::result::{
EVMError, ExecutionResult, HaltReason, InvalidHeader, InvalidTransaction, OutOfGasError,
};
use revm_inspectors::tracing::MuxError;
use std::convert::Infallible;
use tokio::sync::oneshot::error::RecvError;
/// A trait to convert an error to an RPC error.
pub trait ToRpcError: core::error::Error + Send + Sync + 'static {
/// Converts the error to a JSON-RPC error object.
fn to_rpc_error(&self) -> jsonrpsee_types::ErrorObject<'static>;
}
impl ToRpcError for jsonrpsee_types::ErrorObject<'static> {
fn to_rpc_error(&self) -> jsonrpsee_types::ErrorObject<'static> {
self.clone()
}
}
impl ToRpcError for RpcError<TransportErrorKind> {
fn to_rpc_error(&self) -> jsonrpsee_types::ErrorObject<'static> {
match self {
Self::ErrorResp(payload) => jsonrpsee_types::error::ErrorObject::owned(
payload.code as i32,
payload.message.clone(),
payload.data.clone(),
),
err => internal_rpc_err(err.to_string()),
}
}
}
/// Result alias
pub type EthResult<T> = Result<T, EthApiError>;
/// Errors that can occur when interacting with the `eth_` namespace
#[derive(Debug, thiserror::Error)]
pub enum EthApiError {
/// When a raw transaction is empty
#[error("empty transaction data")]
EmptyRawTransactionData,
/// When decoding a signed transaction fails
#[error("failed to decode signed transaction")]
FailedToDecodeSignedTransaction,
/// When the transaction signature is invalid
#[error("invalid transaction signature")]
InvalidTransactionSignature,
/// Errors related to the transaction pool
#[error(transparent)]
PoolError(RpcPoolError),
/// Header not found for block hash/number/tag
#[error("header not found")]
HeaderNotFound(BlockId),
/// Header range not found for start block hash/number/tag to end block hash/number/tag
#[error("header range not found, start block {0:?}, end block {1:?}")]
HeaderRangeNotFound(BlockId, BlockId),
/// Thrown when historical data is not available because it has been pruned
///
/// This error is intended for use as a standard response when historical data is
/// requested that has been pruned according to the node's data retention policy.
///
/// See also <https://eips.ethereum.org/EIPS/eip-4444>
#[error("pruned history unavailable")]
PrunedHistoryUnavailable,
/// Receipts not found for block hash/number/tag
#[error("receipts not found")]
ReceiptsNotFound(BlockId),
/// Thrown when an unknown block or transaction index is encountered
#[error("unknown block or tx index")]
UnknownBlockOrTxIndex,
/// When an invalid block range is provided
#[error("invalid block range")]
InvalidBlockRange,
/// Thrown when the target block for proof computation exceeds the maximum configured window.
#[error("distance to target block exceeds maximum proof window")]
ExceedsMaxProofWindow,
/// An internal error where prevrandao is not set in the evm's environment
#[error("prevrandao not in the EVM's environment after merge")]
PrevrandaoNotSet,
/// `excess_blob_gas` is not set for Cancun and above
#[error("excess blob gas missing in the EVM's environment after Cancun")]
ExcessBlobGasNotSet,
/// Thrown when a call or transaction request (`eth_call`, `eth_estimateGas`,
/// `eth_sendTransaction`) contains conflicting fields (legacy, EIP-1559)
#[error("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified")]
ConflictingFeeFieldsInRequest,
/// Errors related to invalid transactions
#[error(transparent)]
InvalidTransaction(#[from] RpcInvalidTransactionError),
/// Thrown when constructing an RPC block from primitive block data fails
#[error(transparent)]
InvalidBlockData(#[from] BlockError),
/// Thrown when an `AccountOverride` contains conflicting `state` and `stateDiff` fields
#[error("account {0:?} has both 'state' and 'stateDiff'")]
BothStateAndStateDiffInOverride(Address),
/// Other internal error
#[error(transparent)]
Internal(RethError),
/// Error related to signing
#[error(transparent)]
Signing(#[from] SignError),
/// Thrown when a requested transaction is not found
#[error("transaction not found")]
TransactionNotFound,
/// Some feature is unsupported
#[error("unsupported")]
Unsupported(&'static str),
/// General purpose error for invalid params
#[error("{0}")]
InvalidParams(String),
/// When the tracer config does not match the tracer
#[error("invalid tracer config")]
InvalidTracerConfig,
/// When the percentile array is invalid
#[error("invalid reward percentiles")]
InvalidRewardPercentiles,
/// Error thrown when a spawned blocking task failed to deliver an anticipated response.
///
/// This only happens if the blocking task panics and is aborted before it can return a
/// response back to the request handler.
#[error("internal blocking task error")]
InternalBlockingTaskError,
/// Error thrown when a spawned blocking task failed to deliver an anticipated response
#[error("internal eth error")]
InternalEthError,
/// Error thrown when a (tracing) call exceeds the configured timeout
#[error("execution aborted (timeout = {0:?})")]
ExecutionTimedOut(Duration),
/// Internal Error thrown by the javascript tracer
#[error("{0}")]
InternalJsTracerError(String),
#[error(transparent)]
/// Call Input error when both `data` and `input` fields are set and not equal.
TransactionInputError(#[from] TransactionInputError),
/// Evm generic purpose error.
#[error("Revm error: {0}")]
EvmCustom(String),
/// Bytecode override is invalid.
///
/// This can happen if bytecode provided in an
/// [`AccountOverride`](alloy_rpc_types_eth::state::AccountOverride) is malformed, e.g. invalid
/// 7702 bytecode.
#[error("Invalid bytecode: {0}")]
InvalidBytecode(String),
/// Error encountered when converting a transaction type
#[error("Transaction conversion error")]
TransactionConversionError,
/// Error thrown when tracing with a muxTracer fails
#[error(transparent)]
MuxTracerError(#[from] MuxError),
/// Error thrown when waiting for transaction confirmation times out
#[error(
"Transaction {hash} was added to the mempool but wasn't confirmed within {duration:?}."
)]
TransactionConfirmationTimeout {
/// Hash of the transaction that timed out
hash: B256,
/// Duration that was waited before timing out
duration: Duration,
},
/// Error thrown when batch tx response channel fails
#[error(transparent)]
BatchTxRecvError(#[from] RecvError),
/// Error thrown when batch tx send channel fails
#[error("Batch transaction sender channel closed")]
BatchTxSendError,
/// Any other error
#[error("{0}")]
Other(Box<dyn ToRpcError>),
}
impl EthApiError {
/// crates a new [`EthApiError::Other`] variant.
pub fn other<E: ToRpcError>(err: E) -> Self {
Self::Other(Box::new(err))
}
/// Returns `true` if error is [`RpcInvalidTransactionError::GasTooHigh`]
pub const fn is_gas_too_high(&self) -> bool {
matches!(
self,
Self::InvalidTransaction(
RpcInvalidTransactionError::GasTooHigh |
RpcInvalidTransactionError::GasLimitTooHigh
)
)
}
/// Returns `true` if error is [`RpcInvalidTransactionError::GasTooLow`]
pub const fn is_gas_too_low(&self) -> bool {
matches!(self, Self::InvalidTransaction(RpcInvalidTransactionError::GasTooLow))
}
/// Returns the [`RpcInvalidTransactionError`] if this is a [`EthApiError::InvalidTransaction`]
pub const fn as_invalid_transaction(&self) -> Option<&RpcInvalidTransactionError> {
match self {
Self::InvalidTransaction(e) => Some(e),
_ => None,
}
}
/// Converts the given [`StateOverrideError`] into a new [`EthApiError`] instance.
pub fn from_state_overrides_err<E>(err: StateOverrideError<E>) -> Self
where
E: Into<Self>,
{
err.into()
}
/// Converts the given [`CallError`] into a new [`EthApiError`] instance.
pub fn from_call_err<E>(err: CallError<E>) -> Self
where
E: Into<Self>,
{
err.into()
}
/// Converts this error into the rpc error object.
pub fn into_rpc_err(self) -> jsonrpsee_types::error::ErrorObject<'static> {
self.into()
}
}
impl From<EthApiError> for jsonrpsee_types::error::ErrorObject<'static> {
fn from(error: EthApiError) -> Self {
match error {
EthApiError::FailedToDecodeSignedTransaction |
EthApiError::InvalidTransactionSignature |
EthApiError::EmptyRawTransactionData |
EthApiError::InvalidBlockRange |
EthApiError::ExceedsMaxProofWindow |
EthApiError::ConflictingFeeFieldsInRequest |
EthApiError::Signing(_) |
EthApiError::BothStateAndStateDiffInOverride(_) |
EthApiError::InvalidTracerConfig |
EthApiError::TransactionConversionError |
EthApiError::InvalidRewardPercentiles |
EthApiError::InvalidBytecode(_) => invalid_params_rpc_err(error.to_string()),
EthApiError::InvalidTransaction(err) => err.into(),
EthApiError::PoolError(err) => err.into(),
EthApiError::PrevrandaoNotSet |
EthApiError::ExcessBlobGasNotSet |
EthApiError::InvalidBlockData(_) |
EthApiError::Internal(_) |
EthApiError::EvmCustom(_) => internal_rpc_err(error.to_string()),
EthApiError::UnknownBlockOrTxIndex | EthApiError::TransactionNotFound => {
rpc_error_with_code(EthRpcErrorCode::ResourceNotFound.code(), error.to_string())
}
// TODO(onbjerg): We rewrite the error message here because op-node does string matching
// on the error message.
//
// Until https://github.com/ethereum-optimism/optimism/pull/11759 is released, this must be kept around.
EthApiError::HeaderNotFound(id) => rpc_error_with_code(
EthRpcErrorCode::ResourceNotFound.code(),
format!("block not found: {}", block_id_to_str(id)),
),
EthApiError::ReceiptsNotFound(id) => rpc_error_with_code(
EthRpcErrorCode::ResourceNotFound.code(),
format!("{error}: {}", block_id_to_str(id)),
),
EthApiError::HeaderRangeNotFound(start_id, end_id) => rpc_error_with_code(
EthRpcErrorCode::ResourceNotFound.code(),
format!(
"{error}: start block: {}, end block: {}",
block_id_to_str(start_id),
block_id_to_str(end_id),
),
),
err @ EthApiError::TransactionConfirmationTimeout { .. } => rpc_error_with_code(
EthRpcErrorCode::TransactionConfirmationTimeout.code(),
err.to_string(),
),
EthApiError::Unsupported(msg) => internal_rpc_err(msg),
EthApiError::InternalJsTracerError(msg) => internal_rpc_err(msg),
EthApiError::InvalidParams(msg) => invalid_params_rpc_err(msg),
err @ EthApiError::ExecutionTimedOut(_) => rpc_error_with_code(
jsonrpsee_types::error::CALL_EXECUTION_FAILED_CODE,
err.to_string(),
),
err @ (EthApiError::InternalBlockingTaskError | EthApiError::InternalEthError) => {
internal_rpc_err(err.to_string())
}
err @ EthApiError::TransactionInputError(_) => invalid_params_rpc_err(err.to_string()),
EthApiError::PrunedHistoryUnavailable => rpc_error_with_code(4444, error.to_string()),
EthApiError::Other(err) => err.to_rpc_error(),
EthApiError::MuxTracerError(msg) => internal_rpc_err(msg.to_string()),
EthApiError::BatchTxRecvError(err) => internal_rpc_err(err.to_string()),
EthApiError::BatchTxSendError => {
internal_rpc_err("Batch transaction sender channel closed".to_string())
}
}
}
}
impl From<TransactionConversionError> for EthApiError {
fn from(_: TransactionConversionError) -> Self {
Self::TransactionConversionError
}
}
impl<E> From<CallError<E>> for EthApiError
where
E: Into<Self>,
{
fn from(value: CallError<E>) -> Self {
match value {
CallError::Database(err) => err.into(),
CallError::InsufficientFunds(insufficient_funds_error) => {
Self::InvalidTransaction(RpcInvalidTransactionError::InsufficientFunds {
cost: insufficient_funds_error.cost,
balance: insufficient_funds_error.balance,
})
}
}
}
}
impl<E> From<StateOverrideError<E>> for EthApiError
where
E: Into<Self>,
{
fn from(value: StateOverrideError<E>) -> Self {
match value {
StateOverrideError::InvalidBytecode(bytecode_decode_error) => {
Self::InvalidBytecode(bytecode_decode_error.to_string())
}
StateOverrideError::BothStateAndStateDiff(address) => {
Self::BothStateAndStateDiffInOverride(address)
}
StateOverrideError::Database(err) => err.into(),
}
}
}
impl From<EthTxEnvError> for EthApiError {
fn from(value: EthTxEnvError) -> Self {
match value {
EthTxEnvError::CallFees(CallFeesError::BlobTransactionMissingBlobHashes) => {
Self::InvalidTransaction(
RpcInvalidTransactionError::BlobTransactionMissingBlobHashes,
)
}
EthTxEnvError::CallFees(CallFeesError::FeeCapTooLow) => {
Self::InvalidTransaction(RpcInvalidTransactionError::FeeCapTooLow)
}
EthTxEnvError::CallFees(CallFeesError::ConflictingFeeFieldsInRequest) => {
Self::ConflictingFeeFieldsInRequest
}
EthTxEnvError::CallFees(CallFeesError::TipAboveFeeCap) => {
Self::InvalidTransaction(RpcInvalidTransactionError::TipAboveFeeCap)
}
EthTxEnvError::CallFees(CallFeesError::TipVeryHigh) => {
Self::InvalidTransaction(RpcInvalidTransactionError::TipVeryHigh)
}
EthTxEnvError::Input(err) => Self::TransactionInputError(err),
}
}
}
#[cfg(feature = "js-tracer")]
impl From<revm_inspectors::tracing::js::JsInspectorError> for EthApiError {
fn from(error: revm_inspectors::tracing::js::JsInspectorError) -> Self {
match error {
err @ revm_inspectors::tracing::js::JsInspectorError::JsError(_) => {
Self::InternalJsTracerError(err.to_string())
}
err => Self::InvalidParams(err.to_string()),
}
}
}
impl From<RethError> for EthApiError {
fn from(error: RethError) -> Self {
match error {
RethError::Provider(err) => err.into(),
err => Self::Internal(err),
}
}
}
impl From<BlockExecutionError> for EthApiError {
fn from(error: BlockExecutionError) -> Self {
match error {
BlockExecutionError::Validation(validation_error) => match validation_error {
BlockValidationError::InvalidTx { error, .. } => {
if let Some(invalid_tx) = error.as_invalid_tx_err() {
Self::InvalidTransaction(RpcInvalidTransactionError::from(
invalid_tx.clone(),
))
} else {
Self::InvalidTransaction(RpcInvalidTransactionError::other(
rpc_error_with_code(
EthRpcErrorCode::TransactionRejected.code(),
error.to_string(),
),
))
}
}
_ => Self::Internal(RethError::Execution(BlockExecutionError::Validation(
validation_error,
))),
},
BlockExecutionError::Internal(internal_error) => {
Self::Internal(RethError::Execution(BlockExecutionError::Internal(internal_error)))
}
}
}
}
impl From<reth_errors::ProviderError> for EthApiError {
fn from(error: reth_errors::ProviderError) -> Self {
use reth_errors::ProviderError;
match error {
ProviderError::HeaderNotFound(hash) => Self::HeaderNotFound(hash.into()),
ProviderError::BlockHashNotFound(hash) | ProviderError::UnknownBlockHash(hash) => {
Self::HeaderNotFound(hash.into())
}
ProviderError::BestBlockNotFound => Self::HeaderNotFound(BlockId::latest()),
ProviderError::BlockNumberForTransactionIndexNotFound => Self::UnknownBlockOrTxIndex,
ProviderError::TotalDifficultyNotFound(num) => Self::HeaderNotFound(num.into()),
ProviderError::FinalizedBlockNotFound => Self::HeaderNotFound(BlockId::finalized()),
ProviderError::SafeBlockNotFound => Self::HeaderNotFound(BlockId::safe()),
err => Self::Internal(err.into()),
}
}
}
impl From<InvalidHeader> for EthApiError {
fn from(value: InvalidHeader) -> Self {
match value {
InvalidHeader::ExcessBlobGasNotSet => Self::ExcessBlobGasNotSet,
InvalidHeader::PrevrandaoNotSet => Self::PrevrandaoNotSet,
}
}
}
impl<T> From<EVMError<T, InvalidTransaction>> for EthApiError
where
T: Into<Self>,
{
fn from(err: EVMError<T, InvalidTransaction>) -> Self {
match err {
EVMError::Transaction(invalid_tx) => match invalid_tx {
InvalidTransaction::NonceTooLow { tx, state } => {
Self::InvalidTransaction(RpcInvalidTransactionError::NonceTooLow { tx, state })
}
_ => RpcInvalidTransactionError::from(invalid_tx).into(),
},
EVMError::Header(err) => err.into(),
EVMError::Database(err) => err.into(),
EVMError::Custom(err) => Self::EvmCustom(err),
}
}
}
impl From<RecoveryError> for EthApiError {
fn from(_: RecoveryError) -> Self {
Self::InvalidTransactionSignature
}
}
impl From<Infallible> for EthApiError {
fn from(_: Infallible) -> Self {
unreachable!()
}
}
/// An error due to invalid transaction.
///
/// The only reason this exists is to maintain compatibility with other clients de-facto standard
/// error messages.
///
/// These error variants can be thrown when the transaction is checked prior to execution.
///
/// These variants also cover all errors that can be thrown by revm.
///
/// ## Nomenclature
///
/// This type is explicitly modeled after geth's error variants and uses
/// `fee cap` for `max_fee_per_gas`
/// `tip` for `max_priority_fee_per_gas`
#[derive(thiserror::Error, Debug)]
pub enum RpcInvalidTransactionError {
/// returned if the nonce of a transaction is lower than the one present in the local chain.
#[error("nonce too low: next nonce {state}, tx nonce {tx}")]
NonceTooLow {
/// The nonce of the transaction.
tx: u64,
/// The current state of the nonce in the local chain.
state: u64,
},
/// returned if the nonce of a transaction is higher than the next one expected based on the
/// local chain.
#[error("nonce too high")]
NonceTooHigh,
/// Returned if the nonce of a transaction is too high
/// Incrementing the nonce would lead to invalid state (overflow)
#[error("nonce has max value")]
NonceMaxValue,
/// thrown if the transaction sender doesn't have enough funds for a transfer
#[error("insufficient funds for transfer")]
InsufficientFundsForTransfer,
/// thrown if creation transaction provides the init code bigger than init code size limit.
#[error("max initcode size exceeded")]
MaxInitCodeSizeExceeded,
/// Represents the inability to cover max fee + value (account balance too low).
#[error("insufficient funds for gas * price + value: have {balance} want {cost}")]
InsufficientFunds {
/// Transaction cost.
cost: U256,
/// Current balance of transaction sender.
balance: U256,
},
/// This is similar to [`Self::InsufficientFunds`] but with a different error message and
/// exists for compatibility reasons.
///
/// This error is used in `eth_estimateCall` when the highest available gas limit, capped with
/// the allowance of the caller is too low: [`Self::GasTooLow`].
#[error("gas required exceeds allowance ({gas_limit})")]
GasRequiredExceedsAllowance {
/// The gas limit the transaction was executed with.
gas_limit: u64,
},
/// Thrown when calculating gas usage
#[error("gas uint64 overflow")]
GasUintOverflow,
/// Thrown if the transaction is specified to use less gas than required to start the
/// invocation.
#[error("intrinsic gas too low")]
GasTooLow,
/// Thrown if the transaction gas exceeds the limit
#[error("intrinsic gas too high")]
GasTooHigh,
/// Thrown if the transaction gas limit exceeds the maximum
#[error("gas limit too high")]
GasLimitTooHigh,
/// Thrown if a transaction is not supported in the current network configuration.
#[error("transaction type not supported")]
TxTypeNotSupported,
/// Thrown to ensure no one is able to specify a transaction with a tip higher than the total
/// fee cap.
#[error("max priority fee per gas higher than max fee per gas")]
TipAboveFeeCap,
/// A sanity error to avoid huge numbers specified in the tip field.
#[error("max priority fee per gas higher than 2^256-1")]
TipVeryHigh,
/// A sanity error to avoid huge numbers specified in the fee cap field.
#[error("max fee per gas higher than 2^256-1")]
FeeCapVeryHigh,
/// Thrown post London if the transaction's fee is less than the base fee of the block
#[error("max fee per gas less than block base fee")]
FeeCapTooLow,
/// Thrown if the sender of a transaction is a contract.
#[error("sender is not an EOA")]
SenderNoEOA,
/// Gas limit was exceeded during execution.
/// Contains the gas limit.
#[error("out of gas: gas required exceeds: {0}")]
BasicOutOfGas(u64),
/// Gas limit was exceeded during memory expansion.
/// Contains the gas limit.
#[error("out of gas: gas exhausted during memory expansion: {0}")]
MemoryOutOfGas(u64),
/// Gas limit was exceeded during precompile execution.
/// Contains the gas limit.
#[error("out of gas: gas exhausted during precompiled contract execution: {0}")]
PrecompileOutOfGas(u64),
/// An operand to an opcode was invalid or out of range.
/// Contains the gas limit.
#[error("out of gas: invalid operand to an opcode: {0}")]
InvalidOperandOutOfGas(u64),
/// Thrown if executing a transaction failed during estimate/call
#[error(transparent)]
Revert(RevertError),
/// Unspecific EVM halt error.
#[error("EVM error: {0:?}")]
EvmHalt(HaltReason),
/// Invalid chain id set for the transaction.
#[error("invalid chain ID")]
InvalidChainId,
/// The transaction is before Spurious Dragon and has a chain ID
#[error("transactions before Spurious Dragon should not have a chain ID")]
OldLegacyChainId,
/// The transitions is before Berlin and has access list
#[error("transactions before Berlin should not have access list")]
AccessListNotSupported,
/// `max_fee_per_blob_gas` is not supported for blocks before the Cancun hardfork.
#[error("max_fee_per_blob_gas is not supported for blocks before the Cancun hardfork")]
MaxFeePerBlobGasNotSupported,
/// `blob_hashes`/`blob_versioned_hashes` is not supported for blocks before the Cancun
/// hardfork.
#[error("blob_versioned_hashes is not supported for blocks before the Cancun hardfork")]
BlobVersionedHashesNotSupported,
/// Block `blob_base_fee` is greater than tx-specified `max_fee_per_blob_gas` after Cancun.
#[error("max fee per blob gas less than block blob gas fee")]
BlobFeeCapTooLow,
/// Blob transaction has a versioned hash with an invalid blob
#[error("blob hash version mismatch")]
BlobHashVersionMismatch,
/// Blob transaction has no versioned hashes
#[error("blob transaction missing blob hashes")]
BlobTransactionMissingBlobHashes,
/// Blob transaction has too many blobs
#[error("blob transaction exceeds max blobs per block; got {have}")]
TooManyBlobs {
/// The number of blobs in the transaction.
have: usize,
},
/// Blob transaction is a create transaction
#[error("blob transaction is a create transaction")]
BlobTransactionIsCreate,
/// EIP-7702 is not enabled.
#[error("EIP-7702 authorization list not supported")]
AuthorizationListNotSupported,
/// EIP-7702 transaction has invalid fields set.
#[error("EIP-7702 authorization list has invalid fields")]
AuthorizationListInvalidFields,
/// Transaction priority fee is below the minimum required priority fee.
#[error("transaction priority fee below minimum required priority fee {minimum_priority_fee}")]
PriorityFeeBelowMinimum {
/// Minimum required priority fee.
minimum_priority_fee: u128,
},
/// Failed to decrypt calldata of seismic tx
#[error("Failed to decrypt seismic tx")]
FailedToDecryptSeismicTx,
/// Any other error
#[error("{0}")]
Other(Box<dyn ToRpcError>),
}
impl RpcInvalidTransactionError {
/// crates a new [`RpcInvalidTransactionError::Other`] variant.
pub fn other<E: ToRpcError>(err: E) -> Self {
Self::Other(Box::new(err))
}
/// Returns the rpc error code for this error.
pub const fn error_code(&self) -> i32 {
match self {
Self::InvalidChainId |
Self::GasTooLow |
Self::GasTooHigh |
Self::GasRequiredExceedsAllowance { .. } |
Self::NonceTooLow { .. } |
Self::NonceTooHigh { .. } |
Self::FeeCapTooLow |
Self::FeeCapVeryHigh => EthRpcErrorCode::InvalidInput.code(),
Self::Revert(_) => EthRpcErrorCode::ExecutionError.code(),
_ => EthRpcErrorCode::TransactionRejected.code(),
}
}
/// Converts the halt error
///
/// Takes the configured gas limit of the transaction which is attached to the error
pub const fn halt(reason: HaltReason, gas_limit: u64) -> Self {
match reason {
HaltReason::OutOfGas(err) => Self::out_of_gas(err, gas_limit),
HaltReason::NonceOverflow => Self::NonceMaxValue,
err => Self::EvmHalt(err),
}
}
/// Converts the out of gas error
pub const fn out_of_gas(reason: OutOfGasError, gas_limit: u64) -> Self {
match reason {
OutOfGasError::Basic | OutOfGasError::ReentrancySentry => {
Self::BasicOutOfGas(gas_limit)
}
OutOfGasError::Memory | OutOfGasError::MemoryLimit => Self::MemoryOutOfGas(gas_limit),
OutOfGasError::Precompile => Self::PrecompileOutOfGas(gas_limit),
OutOfGasError::InvalidOperand => Self::InvalidOperandOutOfGas(gas_limit),
}
}
/// Converts this error into the rpc error object.
pub fn into_rpc_err(self) -> jsonrpsee_types::error::ErrorObject<'static> {
self.into()
}
}
impl From<RpcInvalidTransactionError> for jsonrpsee_types::error::ErrorObject<'static> {
fn from(err: RpcInvalidTransactionError) -> Self {
match err {
RpcInvalidTransactionError::Revert(revert) => {
// include out data if some
rpc_err(
revert.error_code(),
revert.to_string(),
revert.output.as_ref().map(|out| out.as_ref()),
)
}
RpcInvalidTransactionError::Other(err) => err.to_rpc_error(),
err => rpc_err(err.error_code(), err.to_string(), None),
}
}
}
impl From<InvalidTransaction> for RpcInvalidTransactionError {
fn from(err: InvalidTransaction) -> Self {
match err {
InvalidTransaction::InvalidChainId | InvalidTransaction::MissingChainId => {
Self::InvalidChainId
}
InvalidTransaction::PriorityFeeGreaterThanMaxFee => Self::TipAboveFeeCap,
InvalidTransaction::GasPriceLessThanBasefee => Self::FeeCapTooLow,
InvalidTransaction::CallerGasLimitMoreThanBlock |
InvalidTransaction::TxGasLimitGreaterThanCap { .. } => {
// tx.gas > block.gas_limit
Self::GasTooHigh
}
InvalidTransaction::CallGasCostMoreThanGasLimit { .. } => {
// tx.gas < cost
Self::GasTooLow
}
InvalidTransaction::GasFloorMoreThanGasLimit { .. } => {
// Post prague EIP-7623 tx floor calldata gas cost > tx.gas_limit
// where floor gas is the minimum amount of gas that will be spent
// In other words, the tx's gas limit is lower that the minimum gas requirements of
// the tx's calldata
Self::GasTooLow
}
InvalidTransaction::RejectCallerWithCode => Self::SenderNoEOA,
InvalidTransaction::LackOfFundForMaxFee { fee, balance } => {
Self::InsufficientFunds { cost: *fee, balance: *balance }
}
InvalidTransaction::OverflowPaymentInTransaction => Self::GasUintOverflow,
InvalidTransaction::NonceOverflowInTransaction => Self::NonceMaxValue,
InvalidTransaction::CreateInitCodeSizeLimit => Self::MaxInitCodeSizeExceeded,
InvalidTransaction::NonceTooHigh { .. } => Self::NonceTooHigh,
InvalidTransaction::NonceTooLow { tx, state } => Self::NonceTooLow { tx, state },
InvalidTransaction::AccessListNotSupported => Self::AccessListNotSupported,
InvalidTransaction::MaxFeePerBlobGasNotSupported => Self::MaxFeePerBlobGasNotSupported,
InvalidTransaction::BlobVersionedHashesNotSupported => {
Self::BlobVersionedHashesNotSupported
}
InvalidTransaction::BlobGasPriceGreaterThanMax { .. } => Self::BlobFeeCapTooLow,
InvalidTransaction::EmptyBlobs => Self::BlobTransactionMissingBlobHashes,
InvalidTransaction::BlobVersionNotSupported => Self::BlobHashVersionMismatch,
InvalidTransaction::TooManyBlobs { have, .. } => Self::TooManyBlobs { have },
InvalidTransaction::BlobCreateTransaction => Self::BlobTransactionIsCreate,
InvalidTransaction::AuthorizationListNotSupported => {
Self::AuthorizationListNotSupported
}
InvalidTransaction::AuthorizationListInvalidFields |
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | true |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-layer/src/auth_client_layer.rs | crates/rpc/rpc-layer/src/auth_client_layer.rs | use crate::{Claims, JwtSecret};
use http::{header::AUTHORIZATION, HeaderValue};
use std::{
task::{Context, Poll},
time::{Duration, SystemTime, UNIX_EPOCH},
};
use tower::{Layer, Service};
/// A layer that adds a new JWT token to every request using `AuthClientService`.
#[derive(Debug)]
pub struct AuthClientLayer {
secret: JwtSecret,
}
impl AuthClientLayer {
/// Create a new `AuthClientLayer` with the given `secret`.
pub const fn new(secret: JwtSecret) -> Self {
Self { secret }
}
}
impl<S> Layer<S> for AuthClientLayer {
type Service = AuthClientService<S>;
fn layer(&self, inner: S) -> Self::Service {
AuthClientService::new(self.secret, inner)
}
}
/// Automatically authenticates every client request with the given `secret`.
#[derive(Debug, Clone)]
pub struct AuthClientService<S> {
secret: JwtSecret,
inner: S,
}
impl<S> AuthClientService<S> {
const fn new(secret: JwtSecret, inner: S) -> Self {
Self { secret, inner }
}
}
impl<S, B> Service<http::Request<B>> for AuthClientService<S>
where
S: Service<http::Request<B>>,
{
type Response = S::Response;
type Error = S::Error;
type Future = S::Future;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.inner.poll_ready(cx)
}
fn call(&mut self, mut request: http::Request<B>) -> Self::Future {
request.headers_mut().insert(AUTHORIZATION, secret_to_bearer_header(&self.secret));
self.inner.call(request)
}
}
/// Helper function to convert a secret into a Bearer auth header value with claims according to
/// <https://github.com/ethereum/execution-apis/blob/main/src/engine/authentication.md#jwt-claims>.
/// The token is valid for 60 seconds.
pub fn secret_to_bearer_header(secret: &JwtSecret) -> HeaderValue {
format!(
"Bearer {}",
secret
.encode(&Claims {
iat: (SystemTime::now().duration_since(UNIX_EPOCH).unwrap() +
Duration::from_secs(60))
.as_secs(),
exp: None,
})
.unwrap()
)
.parse()
.unwrap()
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-layer/src/lib.rs | crates/rpc/rpc-layer/src/lib.rs | //! Layer implementations used in RPC
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
use http::HeaderMap;
use jsonrpsee_http_client::HttpResponse;
mod auth_client_layer;
mod auth_layer;
mod compression_layer;
mod jwt_validator;
pub use auth_layer::{AuthService, ResponseFuture};
pub use compression_layer::CompressionLayer;
// Export alloy JWT types
pub use alloy_rpc_types_engine::{Claims, JwtError, JwtSecret};
pub use auth_client_layer::{secret_to_bearer_header, AuthClientLayer, AuthClientService};
pub use auth_layer::AuthLayer;
pub use jwt_validator::JwtAuthValidator;
/// General purpose trait to validate Http Authorization headers. It's supposed to be integrated as
/// a validator trait into an [`AuthLayer`].
pub trait AuthValidator {
/// This function is invoked by the [`AuthLayer`] to perform validation on Http headers.
/// The result conveys validation errors in the form of an Http response.
#[expect(clippy::result_large_err)]
fn validate(&self, headers: &HeaderMap) -> Result<(), HttpResponse>;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-layer/src/compression_layer.rs | crates/rpc/rpc-layer/src/compression_layer.rs | use jsonrpsee_http_client::{HttpBody, HttpRequest, HttpResponse};
use std::{
future::Future,
pin::Pin,
task::{Context, Poll},
};
use tower::{Layer, Service};
use tower_http::compression::{Compression, CompressionLayer as TowerCompressionLayer};
/// This layer is a wrapper around [`tower_http::compression::CompressionLayer`] that integrates
/// with jsonrpsee's HTTP types. It automatically compresses responses based on the client's
/// Accept-Encoding header.
#[expect(missing_debug_implementations)]
#[derive(Clone)]
pub struct CompressionLayer {
inner_layer: TowerCompressionLayer,
}
impl CompressionLayer {
/// Creates a new compression layer with zstd, gzip, brotli and deflate enabled.
pub fn new() -> Self {
Self {
inner_layer: TowerCompressionLayer::new().gzip(true).br(true).deflate(true).zstd(true),
}
}
}
impl Default for CompressionLayer {
/// Creates a new compression layer with default settings.
/// See [`CompressionLayer::new`] for details.
fn default() -> Self {
Self::new()
}
}
impl<S> Layer<S> for CompressionLayer {
type Service = CompressionService<S>;
fn layer(&self, inner: S) -> Self::Service {
CompressionService { compression: self.inner_layer.layer(inner) }
}
}
/// Service that performs response compression.
///
/// Created by [`CompressionLayer`].
#[expect(missing_debug_implementations)]
#[derive(Clone)]
pub struct CompressionService<S> {
compression: Compression<S>,
}
impl<S> Service<HttpRequest> for CompressionService<S>
where
S: Service<HttpRequest, Response = HttpResponse>,
S::Future: Send + 'static,
{
type Response = HttpResponse;
type Error = S::Error;
type Future = Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send>>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.compression.poll_ready(cx)
}
fn call(&mut self, req: HttpRequest) -> Self::Future {
let fut = self.compression.call(req);
Box::pin(async move {
let resp = fut.await?;
let (parts, compressed_body) = resp.into_parts();
let http_body = HttpBody::new(compressed_body);
Ok(Self::Response::from_parts(parts, http_body))
})
}
}
#[cfg(test)]
mod tests {
use super::*;
use http::header::{ACCEPT_ENCODING, CONTENT_ENCODING};
use http_body_util::BodyExt;
use jsonrpsee_http_client::{HttpRequest, HttpResponse};
use std::{convert::Infallible, future::ready};
const TEST_DATA: &str = "compress test data ";
const REPEAT_COUNT: usize = 1000;
#[derive(Clone)]
struct MockRequestService;
impl Service<HttpRequest> for MockRequestService {
type Response = HttpResponse;
type Error = Infallible;
type Future = std::future::Ready<Result<Self::Response, Self::Error>>;
fn poll_ready(
&mut self,
_: &mut std::task::Context<'_>,
) -> std::task::Poll<Result<(), Self::Error>> {
std::task::Poll::Ready(Ok(()))
}
fn call(&mut self, _: HttpRequest) -> Self::Future {
let body = HttpBody::from(TEST_DATA.repeat(REPEAT_COUNT));
let response = HttpResponse::builder().body(body).unwrap();
ready(Ok(response))
}
}
fn setup_compression_service(
) -> impl Service<HttpRequest, Response = HttpResponse, Error = Infallible> {
CompressionLayer::new().layer(MockRequestService)
}
async fn get_response_size(response: HttpResponse) -> usize {
// Get the total size of the response body
response.into_body().collect().await.unwrap().to_bytes().len()
}
#[tokio::test]
async fn test_gzip_compression() {
let mut service = setup_compression_service();
let request =
HttpRequest::builder().header(ACCEPT_ENCODING, "gzip").body(HttpBody::empty()).unwrap();
let uncompressed_len = TEST_DATA.repeat(REPEAT_COUNT).len();
// Make the request
let response = service.call(request).await.unwrap();
// Verify the response has gzip content-encoding
assert_eq!(
response.headers().get(CONTENT_ENCODING).unwrap(),
"gzip",
"Response should be gzip encoded"
);
// Verify the response body is actually compressed (should be smaller than original)
let compressed_size = get_response_size(response).await;
assert!(
compressed_size < uncompressed_len,
"Compressed size ({compressed_size}) should be smaller than original size ({uncompressed_len})"
);
}
#[tokio::test]
async fn test_no_compression_when_not_requested() {
// Create a service with compression
let mut service = setup_compression_service();
let request = HttpRequest::builder().body(HttpBody::empty()).unwrap();
let response = service.call(request).await.unwrap();
assert!(
response.headers().get(CONTENT_ENCODING).is_none(),
"Response should not be compressed when not requested"
);
let uncompressed_len = TEST_DATA.repeat(REPEAT_COUNT).len();
// Verify the response body matches the original size
let response_size = get_response_size(response).await;
assert!(
response_size == uncompressed_len,
"Response size ({response_size}) should equal original size ({uncompressed_len})"
);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-layer/src/auth_layer.rs | crates/rpc/rpc-layer/src/auth_layer.rs | use super::AuthValidator;
use jsonrpsee_http_client::{HttpRequest, HttpResponse};
use pin_project::pin_project;
use std::{
future::Future,
pin::Pin,
task::{Context, Poll},
};
use tower::{Layer, Service};
/// This is an Http middleware layer that acts as an
/// interceptor for `Authorization` headers. Incoming requests are dispatched to
/// an inner [`AuthValidator`]. Invalid requests are blocked and the validator's error response is
/// returned. Valid requests are instead dispatched to the next layer along the chain.
///
/// # How to integrate
/// ```rust
/// async fn build_layered_rpc_server() {
/// use jsonrpsee::server::ServerBuilder;
/// use reth_rpc_layer::{AuthLayer, JwtAuthValidator, JwtSecret};
/// use std::net::SocketAddr;
///
/// const AUTH_PORT: u32 = 8551;
/// const AUTH_ADDR: &str = "0.0.0.0";
/// const AUTH_SECRET: &str =
/// "f79ae8046bc11c9927afe911db7143c51a806c4a537cc08e0d37140b0192f430";
///
/// let addr = format!("{AUTH_ADDR}:{AUTH_PORT}");
/// let secret = JwtSecret::from_hex(AUTH_SECRET).unwrap();
/// let validator = JwtAuthValidator::new(secret);
/// let layer = AuthLayer::new(validator);
/// let middleware = tower::ServiceBuilder::default().layer(layer);
///
/// let _server = ServerBuilder::default()
/// .set_http_middleware(middleware)
/// .build(addr.parse::<SocketAddr>().unwrap())
/// .await
/// .unwrap();
/// }
/// ```
#[expect(missing_debug_implementations)]
pub struct AuthLayer<V> {
validator: V,
}
impl<V> AuthLayer<V> {
/// Creates an instance of [`AuthLayer`].
/// `validator` is a generic trait able to validate requests (see [`AuthValidator`]).
pub const fn new(validator: V) -> Self {
Self { validator }
}
}
impl<S, V> Layer<S> for AuthLayer<V>
where
V: Clone,
{
type Service = AuthService<S, V>;
fn layer(&self, inner: S) -> Self::Service {
AuthService { validator: self.validator.clone(), inner }
}
}
/// This type is the actual implementation of the middleware. It follows the [`Service`]
/// specification to correctly proxy Http requests to its inner service after headers validation.
#[derive(Clone, Debug)]
pub struct AuthService<S, V> {
/// Performs auth validation logics
validator: V,
/// Recipient of authorized Http requests
inner: S,
}
impl<S, V> Service<HttpRequest> for AuthService<S, V>
where
S: Service<HttpRequest, Response = HttpResponse>,
V: AuthValidator,
Self: Clone,
{
type Response = HttpResponse;
type Error = S::Error;
type Future = ResponseFuture<S::Future>;
/// If we get polled it means that we dispatched an authorized Http request to the inner layer.
/// So we just poll the inner layer ourselves.
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.inner.poll_ready(cx)
}
/// This is the entrypoint of the service. We receive an Http request and check the validity of
/// the authorization header.
///
/// Returns a future that wraps either:
/// - The inner service future for authorized requests
/// - An error Http response in case of authorization errors
fn call(&mut self, req: HttpRequest) -> Self::Future {
match self.validator.validate(req.headers()) {
Ok(_) => ResponseFuture::future(self.inner.call(req)),
Err(res) => ResponseFuture::invalid_auth(res),
}
}
}
/// A future representing the response of an RPC request
#[pin_project]
#[expect(missing_debug_implementations)]
pub struct ResponseFuture<F> {
/// The kind of response future, error or pending
#[pin]
kind: Kind<F>,
}
impl<F> ResponseFuture<F> {
const fn future(future: F) -> Self {
Self { kind: Kind::Future { future } }
}
const fn invalid_auth(err_res: HttpResponse) -> Self {
Self { kind: Kind::Error { response: Some(err_res) } }
}
}
#[pin_project(project = KindProj)]
enum Kind<F> {
Future {
#[pin]
future: F,
},
Error {
response: Option<HttpResponse>,
},
}
impl<F, E> Future for ResponseFuture<F>
where
F: Future<Output = Result<HttpResponse, E>>,
{
type Output = F::Output;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
match self.project().kind.project() {
KindProj::Future { future } => future.poll(cx),
KindProj::Error { response } => {
let response = response.take().unwrap();
Poll::Ready(Ok(response))
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::JwtAuthValidator;
use alloy_rpc_types_engine::{Claims, JwtError, JwtSecret};
use jsonrpsee::{
server::{RandomStringIdProvider, ServerBuilder, ServerConfig, ServerHandle},
RpcModule,
};
use reqwest::{header, StatusCode};
use std::{
net::SocketAddr,
time::{SystemTime, UNIX_EPOCH},
};
const AUTH_PORT: u32 = 8551;
const AUTH_ADDR: &str = "0.0.0.0";
const SECRET: &str = "f79ae8046bc11c9927afe911db7143c51a806c4a537cc08e0d37140b0192f430";
#[tokio::test]
async fn test_jwt_layer() {
// We group all tests into one to avoid individual #[tokio::test]
// to concurrently spawn a server on the same port.
valid_jwt().await;
missing_jwt_error().await;
wrong_jwt_signature_error().await;
invalid_issuance_timestamp_error().await;
jwt_decode_error().await
}
async fn valid_jwt() {
let claims = Claims { iat: to_u64(SystemTime::now()), exp: Some(10000000000) };
let secret = JwtSecret::from_hex(SECRET).unwrap(); // Same secret as the server
let jwt = secret.encode(&claims).unwrap();
let (status, _) = send_request(Some(jwt)).await;
assert_eq!(status, StatusCode::OK);
}
async fn missing_jwt_error() {
let (status, body) = send_request(None).await;
let expected = JwtError::MissingOrInvalidAuthorizationHeader;
assert_eq!(status, StatusCode::UNAUTHORIZED);
assert_eq!(body, expected.to_string());
}
async fn wrong_jwt_signature_error() {
// This secret is different from the server. This will generate a
// different signature
let secret = JwtSecret::random();
let claims = Claims { iat: to_u64(SystemTime::now()), exp: Some(10000000000) };
let jwt = secret.encode(&claims).unwrap();
let (status, body) = send_request(Some(jwt)).await;
let expected = JwtError::InvalidSignature;
assert_eq!(status, StatusCode::UNAUTHORIZED);
assert_eq!(body, expected.to_string());
}
async fn invalid_issuance_timestamp_error() {
let secret = JwtSecret::from_hex(SECRET).unwrap(); // Same secret as the server
let iat = to_u64(SystemTime::now()) + 1000;
let claims = Claims { iat, exp: Some(10000000000) };
let jwt = secret.encode(&claims).unwrap();
let (status, body) = send_request(Some(jwt)).await;
let expected = JwtError::InvalidIssuanceTimestamp;
assert_eq!(status, StatusCode::UNAUTHORIZED);
assert_eq!(body, expected.to_string());
}
async fn jwt_decode_error() {
let jwt = "this jwt has serious encoding problems".to_string();
let (status, body) = send_request(Some(jwt)).await;
assert_eq!(status, StatusCode::UNAUTHORIZED);
assert_eq!(body, "JWT decoding error: InvalidToken".to_string());
}
async fn send_request(jwt: Option<String>) -> (StatusCode, String) {
let server = spawn_server().await;
let client =
reqwest::Client::builder().timeout(std::time::Duration::from_secs(1)).build().unwrap();
let body = r#"{"jsonrpc": "2.0", "method": "greet_melkor", "params": [], "id": 1}"#;
let response = client
.post(format!("http://{AUTH_ADDR}:{AUTH_PORT}"))
.bearer_auth(jwt.unwrap_or_default())
.body(body)
.header(header::CONTENT_TYPE, "application/json")
.send()
.await
.unwrap();
let status = response.status();
let body = response.text().await.unwrap();
server.stop().unwrap();
server.stopped().await;
(status, body)
}
/// Spawn a new RPC server equipped with a `JwtLayer` auth middleware.
async fn spawn_server() -> ServerHandle {
let secret = JwtSecret::from_hex(SECRET).unwrap();
let addr = format!("{AUTH_ADDR}:{AUTH_PORT}");
let validator = JwtAuthValidator::new(secret);
let layer = AuthLayer::new(validator);
let middleware = tower::ServiceBuilder::default().layer(layer);
// Create a layered server
let server = ServerBuilder::default()
.set_config(
ServerConfig::builder().set_id_provider(RandomStringIdProvider::new(16)).build(),
)
.set_http_middleware(middleware)
.build(addr.parse::<SocketAddr>().unwrap())
.await
.unwrap();
// Create a mock rpc module
let mut module = RpcModule::new(());
module.register_method("greet_melkor", |_, _, _| "You are the dark lord").unwrap();
server.start(module)
}
fn to_u64(time: SystemTime) -> u64 {
time.duration_since(UNIX_EPOCH).unwrap().as_secs()
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-layer/src/jwt_validator.rs | crates/rpc/rpc-layer/src/jwt_validator.rs | use crate::{AuthValidator, JwtError, JwtSecret};
use http::{header, HeaderMap, Response, StatusCode};
use jsonrpsee_http_client::{HttpBody, HttpResponse};
use tracing::error;
/// Implements JWT validation logics and integrates
/// to an Http [`AuthLayer`][crate::AuthLayer]
/// by implementing the [`AuthValidator`] trait.
#[derive(Debug, Clone)]
pub struct JwtAuthValidator {
secret: JwtSecret,
}
impl JwtAuthValidator {
/// Creates a new instance of [`JwtAuthValidator`].
/// Validation logics are implemented by the `secret`
/// argument (see [`JwtSecret`]).
pub const fn new(secret: JwtSecret) -> Self {
Self { secret }
}
}
impl AuthValidator for JwtAuthValidator {
fn validate(&self, headers: &HeaderMap) -> Result<(), HttpResponse> {
match get_bearer(headers) {
Some(jwt) => match self.secret.validate(&jwt) {
Ok(_) => Ok(()),
Err(e) => {
error!(target: "engine::jwt-validator", "Invalid JWT: {e}");
let response = err_response(e);
Err(response)
}
},
None => {
let e = JwtError::MissingOrInvalidAuthorizationHeader;
error!(target: "engine::jwt-validator", "Invalid JWT: {e}");
let response = err_response(e);
Err(response)
}
}
}
}
/// This is an utility function that retrieves a bearer
/// token from an authorization Http header.
fn get_bearer(headers: &HeaderMap) -> Option<String> {
let header = headers.get(header::AUTHORIZATION)?;
let auth: &str = header.to_str().ok()?;
let prefix = "Bearer ";
let index = auth.find(prefix)?;
let token: &str = &auth[index + prefix.len()..];
Some(token.into())
}
fn err_response(err: JwtError) -> HttpResponse {
// We build a response from an error message.
// We don't cope with headers or other structured fields.
// Then we are safe to "expect" on the result.
Response::builder()
.status(StatusCode::UNAUTHORIZED)
.body(HttpBody::new(err.to_string()))
.expect("This should never happen")
}
#[cfg(test)]
mod tests {
use crate::jwt_validator::get_bearer;
use http::{header, HeaderMap};
#[test]
fn auth_header_available() {
let jwt = "foo";
let bearer = format!("Bearer {jwt}");
let mut headers = HeaderMap::new();
headers.insert(header::AUTHORIZATION, bearer.parse().unwrap());
let token = get_bearer(&headers).unwrap();
assert_eq!(token, jwt);
}
#[test]
fn auth_header_not_available() {
let headers = HeaderMap::new();
let token = get_bearer(&headers);
assert!(token.is_none());
}
#[test]
fn auth_header_malformed() {
let jwt = "foo";
let bearer = format!("Bea___rer {jwt}");
let mut headers = HeaderMap::new();
headers.insert(header::AUTHORIZATION, bearer.parse().unwrap());
let token = get_bearer(&headers);
assert!(token.is_none());
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-server-types/src/lib.rs | crates/rpc/rpc-server-types/src/lib.rs | //! Reth RPC server types.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
/// Common RPC constants.
pub mod constants;
pub mod result;
mod module;
pub use module::{RethRpcModule, RpcModuleSelection};
pub use result::ToRpcResult;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-server-types/src/module.rs | crates/rpc/rpc-server-types/src/module.rs | use std::{collections::HashSet, fmt, str::FromStr};
use serde::{Deserialize, Serialize, Serializer};
use strum::{AsRefStr, EnumIter, IntoStaticStr, ParseError, VariantArray, VariantNames};
/// Describes the modules that should be installed.
///
/// # Example
///
/// Create a [`RpcModuleSelection`] from a selection.
///
/// ```
/// use reth_rpc_server_types::{RethRpcModule, RpcModuleSelection};
/// let config: RpcModuleSelection = vec![RethRpcModule::Eth].into();
/// ```
#[derive(Debug, Default, Clone, Eq, PartialEq)]
pub enum RpcModuleSelection {
/// Use _all_ available modules.
All,
/// The default modules `eth`, `net`, `web3`
#[default]
Standard,
/// Only use the configured modules.
Selection(HashSet<RethRpcModule>),
}
// === impl RpcModuleSelection ===
impl RpcModuleSelection {
/// The standard modules to instantiate by default `eth`, `net`, `web3`
pub const STANDARD_MODULES: [RethRpcModule; 3] =
[RethRpcModule::Eth, RethRpcModule::Net, RethRpcModule::Web3];
/// Returns a selection of [`RethRpcModule`] with all [`RethRpcModule::all_variants`].
pub fn all_modules() -> HashSet<RethRpcModule> {
RethRpcModule::modules().into_iter().collect()
}
/// Returns the [`RpcModuleSelection::STANDARD_MODULES`] as a selection.
pub fn standard_modules() -> HashSet<RethRpcModule> {
HashSet::from(Self::STANDARD_MODULES)
}
/// All modules that are available by default on IPC.
///
/// By default all modules are available on IPC.
pub fn default_ipc_modules() -> HashSet<RethRpcModule> {
Self::all_modules()
}
/// Creates a new _unique_ [`RpcModuleSelection::Selection`] from the given items.
///
/// # Note
///
/// This will dedupe the selection and remove duplicates while preserving the order.
///
/// # Example
///
/// Create a selection from the [`RethRpcModule`] string identifiers
///
/// ```
/// use reth_rpc_server_types::{RethRpcModule, RpcModuleSelection};
/// let selection = vec!["eth", "admin"];
/// let config = RpcModuleSelection::try_from_selection(selection).unwrap();
/// assert_eq!(config, RpcModuleSelection::from([RethRpcModule::Eth, RethRpcModule::Admin]));
/// ```
///
/// Create a unique selection from the [`RethRpcModule`] string identifiers
///
/// ```
/// use reth_rpc_server_types::{RethRpcModule, RpcModuleSelection};
/// let selection = vec!["eth", "admin", "eth", "admin"];
/// let config = RpcModuleSelection::try_from_selection(selection).unwrap();
/// assert_eq!(config, RpcModuleSelection::from([RethRpcModule::Eth, RethRpcModule::Admin]));
/// ```
pub fn try_from_selection<I, T>(selection: I) -> Result<Self, T::Error>
where
I: IntoIterator<Item = T>,
T: TryInto<RethRpcModule>,
{
selection.into_iter().map(TryInto::try_into).collect()
}
/// Returns the number of modules in the selection
pub fn len(&self) -> usize {
match self {
Self::All => RethRpcModule::variant_count(),
Self::Standard => Self::STANDARD_MODULES.len(),
Self::Selection(s) => s.len(),
}
}
/// Returns true if no selection is configured
pub fn is_empty(&self) -> bool {
match self {
Self::Selection(sel) => sel.is_empty(),
_ => false,
}
}
/// Returns true if all modules are selected
pub const fn is_all(&self) -> bool {
matches!(self, Self::All)
}
/// Returns an iterator over all configured [`RethRpcModule`]
pub fn iter_selection(&self) -> Box<dyn Iterator<Item = RethRpcModule> + '_> {
match self {
Self::All => Box::new(RethRpcModule::modules().into_iter()),
Self::Standard => Box::new(Self::STANDARD_MODULES.iter().copied()),
Self::Selection(s) => Box::new(s.iter().copied()),
}
}
/// Clones the set of configured [`RethRpcModule`].
pub fn to_selection(&self) -> HashSet<RethRpcModule> {
match self {
Self::All => Self::all_modules(),
Self::Standard => Self::standard_modules(),
Self::Selection(s) => s.clone(),
}
}
/// Converts the selection into a [`HashSet`].
pub fn into_selection(self) -> HashSet<RethRpcModule> {
match self {
Self::All => Self::all_modules(),
Self::Standard => Self::standard_modules(),
Self::Selection(s) => s,
}
}
/// Returns true if both selections are identical.
pub fn are_identical(http: Option<&Self>, ws: Option<&Self>) -> bool {
match (http, ws) {
// Shortcut for common case to avoid iterating later
(Some(Self::All), Some(other)) | (Some(other), Some(Self::All)) => {
other.len() == RethRpcModule::variant_count()
}
// If either side is disabled, then the other must be empty
(Some(some), None) | (None, Some(some)) => some.is_empty(),
(Some(http), Some(ws)) => http.to_selection() == ws.to_selection(),
(None, None) => true,
}
}
/// Returns true if the selection contains the given module.
pub fn contains(&self, module: &RethRpcModule) -> bool {
match self {
Self::All => true,
Self::Standard => Self::STANDARD_MODULES.contains(module),
Self::Selection(s) => s.contains(module),
}
}
/// Adds a module to the selection.
///
/// If the selection is `All`, this is a no-op.
/// Otherwise, converts to a `Selection` and adds the module.
pub fn push(&mut self, module: RethRpcModule) {
if !self.is_all() {
let mut modules = self.to_selection();
modules.insert(module);
*self = Self::Selection(modules);
}
}
/// Returns a new selection with the given module added.
///
/// If the selection is `All`, returns `All`.
/// Otherwise, converts to a `Selection` and adds the module.
pub fn append(self, module: RethRpcModule) -> Self {
if self.is_all() {
Self::All
} else {
let mut modules = self.into_selection();
modules.insert(module);
Self::Selection(modules)
}
}
/// Extends the selection with modules from an iterator.
///
/// If the selection is `All`, this is a no-op.
/// Otherwise, converts to a `Selection` and adds the modules.
pub fn extend<I>(&mut self, iter: I)
where
I: IntoIterator<Item = RethRpcModule>,
{
if !self.is_all() {
let mut modules = self.to_selection();
modules.extend(iter);
*self = Self::Selection(modules);
}
}
/// Returns a new selection with modules from an iterator added.
///
/// If the selection is `All`, returns `All`.
/// Otherwise, converts to a `Selection` and adds the modules.
pub fn extended<I>(self, iter: I) -> Self
where
I: IntoIterator<Item = RethRpcModule>,
{
if self.is_all() {
Self::All
} else {
let mut modules = self.into_selection();
modules.extend(iter);
Self::Selection(modules)
}
}
}
impl From<&HashSet<RethRpcModule>> for RpcModuleSelection {
fn from(s: &HashSet<RethRpcModule>) -> Self {
Self::from(s.clone())
}
}
impl From<HashSet<RethRpcModule>> for RpcModuleSelection {
fn from(s: HashSet<RethRpcModule>) -> Self {
Self::Selection(s)
}
}
impl From<&[RethRpcModule]> for RpcModuleSelection {
fn from(s: &[RethRpcModule]) -> Self {
Self::Selection(s.iter().copied().collect())
}
}
impl From<Vec<RethRpcModule>> for RpcModuleSelection {
fn from(s: Vec<RethRpcModule>) -> Self {
Self::Selection(s.into_iter().collect())
}
}
impl<const N: usize> From<[RethRpcModule; N]> for RpcModuleSelection {
fn from(s: [RethRpcModule; N]) -> Self {
Self::Selection(s.iter().copied().collect())
}
}
impl<'a> FromIterator<&'a RethRpcModule> for RpcModuleSelection {
fn from_iter<I>(iter: I) -> Self
where
I: IntoIterator<Item = &'a RethRpcModule>,
{
iter.into_iter().copied().collect()
}
}
impl FromIterator<RethRpcModule> for RpcModuleSelection {
fn from_iter<I>(iter: I) -> Self
where
I: IntoIterator<Item = RethRpcModule>,
{
Self::Selection(iter.into_iter().collect())
}
}
impl FromStr for RpcModuleSelection {
type Err = ParseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
if s.is_empty() {
return Ok(Self::Selection(Default::default()))
}
let mut modules = s.split(',').map(str::trim).peekable();
let first = modules.peek().copied().ok_or(ParseError::VariantNotFound)?;
// We convert to lowercase to make the comparison case-insensitive
//
// This is a way to allow typing "all" and "ALL" and "All" and "aLl" etc.
match first.to_lowercase().as_str() {
"all" => Ok(Self::All),
"none" => Ok(Self::Selection(Default::default())),
_ => Self::try_from_selection(modules),
}
}
}
impl fmt::Display for RpcModuleSelection {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"[{}]",
self.iter_selection().map(|s| s.to_string()).collect::<Vec<_>>().join(", ")
)
}
}
/// Represents RPC modules that are supported by reth
#[derive(
Debug,
Clone,
Copy,
Eq,
PartialEq,
Hash,
AsRefStr,
IntoStaticStr,
VariantNames,
VariantArray,
EnumIter,
Deserialize,
)]
#[serde(rename_all = "snake_case")]
#[strum(serialize_all = "kebab-case")]
pub enum RethRpcModule {
/// `admin_` module
Admin,
/// `debug_` module
Debug,
/// `eth_` module
Eth,
/// `net_` module
Net,
/// `trace_` module
Trace,
/// `txpool_` module
Txpool,
/// `web3_` module
Web3,
/// `rpc_` module
Rpc,
/// `reth_` module
Reth,
/// `ots_` module
Ots,
/// `flashbots_` module
Flashbots,
/// `miner_` module
Miner,
/// `mev_` module
Mev,
}
// === impl RethRpcModule ===
impl RethRpcModule {
/// Returns the number of variants in the enum
pub const fn variant_count() -> usize {
<Self as VariantArray>::VARIANTS.len()
}
/// Returns all variant names of the enum
pub const fn all_variant_names() -> &'static [&'static str] {
<Self as VariantNames>::VARIANTS
}
/// Returns all variants of the enum
pub const fn all_variants() -> &'static [Self] {
<Self as VariantArray>::VARIANTS
}
/// Returns all variants of the enum
pub fn modules() -> impl IntoIterator<Item = Self> {
use strum::IntoEnumIterator;
Self::iter()
}
/// Returns the string representation of the module.
#[inline]
pub fn as_str(&self) -> &'static str {
self.into()
}
}
impl FromStr for RethRpcModule {
type Err = ParseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(match s {
"admin" => Self::Admin,
"debug" => Self::Debug,
"eth" => Self::Eth,
"net" => Self::Net,
"trace" => Self::Trace,
"txpool" => Self::Txpool,
"web3" => Self::Web3,
"rpc" => Self::Rpc,
"reth" => Self::Reth,
"ots" => Self::Ots,
"flashbots" => Self::Flashbots,
"miner" => Self::Miner,
"mev" => Self::Mev,
_ => return Err(ParseError::VariantNotFound),
})
}
}
impl TryFrom<&str> for RethRpcModule {
type Error = ParseError;
fn try_from(s: &str) -> Result<Self, <Self as TryFrom<&str>>::Error> {
FromStr::from_str(s)
}
}
impl fmt::Display for RethRpcModule {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.pad(self.as_ref())
}
}
impl Serialize for RethRpcModule {
fn serialize<S>(&self, s: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
s.serialize_str(self.as_ref())
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_all_modules() {
let all_modules = RpcModuleSelection::all_modules();
assert_eq!(all_modules.len(), RethRpcModule::variant_count());
}
#[test]
fn test_standard_modules() {
let standard_modules = RpcModuleSelection::standard_modules();
let expected_modules: HashSet<RethRpcModule> =
HashSet::from([RethRpcModule::Eth, RethRpcModule::Net, RethRpcModule::Web3]);
assert_eq!(standard_modules, expected_modules);
}
#[test]
fn test_default_ipc_modules() {
let default_ipc_modules = RpcModuleSelection::default_ipc_modules();
assert_eq!(default_ipc_modules, RpcModuleSelection::all_modules());
}
#[test]
fn test_try_from_selection_success() {
let selection = vec!["eth", "admin"];
let config = RpcModuleSelection::try_from_selection(selection).unwrap();
assert_eq!(config, RpcModuleSelection::from([RethRpcModule::Eth, RethRpcModule::Admin]));
}
#[test]
fn test_rpc_module_selection_len() {
let all_modules = RpcModuleSelection::All;
let standard = RpcModuleSelection::Standard;
let selection = RpcModuleSelection::from([RethRpcModule::Eth, RethRpcModule::Admin]);
assert_eq!(all_modules.len(), RethRpcModule::variant_count());
assert_eq!(standard.len(), 3);
assert_eq!(selection.len(), 2);
}
#[test]
fn test_rpc_module_selection_is_empty() {
let empty_selection = RpcModuleSelection::from(HashSet::new());
assert!(empty_selection.is_empty());
let non_empty_selection = RpcModuleSelection::from([RethRpcModule::Eth]);
assert!(!non_empty_selection.is_empty());
}
#[test]
fn test_rpc_module_selection_iter_selection() {
let all_modules = RpcModuleSelection::All;
let standard = RpcModuleSelection::Standard;
let selection = RpcModuleSelection::from([RethRpcModule::Eth, RethRpcModule::Admin]);
assert_eq!(all_modules.iter_selection().count(), RethRpcModule::variant_count());
assert_eq!(standard.iter_selection().count(), 3);
assert_eq!(selection.iter_selection().count(), 2);
}
#[test]
fn test_rpc_module_selection_to_selection() {
let all_modules = RpcModuleSelection::All;
let standard = RpcModuleSelection::Standard;
let selection = RpcModuleSelection::from([RethRpcModule::Eth, RethRpcModule::Admin]);
assert_eq!(all_modules.to_selection(), RpcModuleSelection::all_modules());
assert_eq!(standard.to_selection(), RpcModuleSelection::standard_modules());
assert_eq!(
selection.to_selection(),
HashSet::from([RethRpcModule::Eth, RethRpcModule::Admin])
);
}
#[test]
fn test_rpc_module_selection_are_identical() {
// Test scenario: both selections are `All`
//
// Since both selections include all possible RPC modules, they should be considered
// identical.
let all_modules = RpcModuleSelection::All;
assert!(RpcModuleSelection::are_identical(Some(&all_modules), Some(&all_modules)));
// Test scenario: both `http` and `ws` are `None`
//
// When both arguments are `None`, the function should return `true` because no modules are
// selected.
assert!(RpcModuleSelection::are_identical(None, None));
// Test scenario: both selections contain identical sets of specific modules
//
// In this case, both selections contain the same modules (`Eth` and `Admin`),
// so they should be considered identical.
let selection1 = RpcModuleSelection::from([RethRpcModule::Eth, RethRpcModule::Admin]);
let selection2 = RpcModuleSelection::from([RethRpcModule::Eth, RethRpcModule::Admin]);
assert!(RpcModuleSelection::are_identical(Some(&selection1), Some(&selection2)));
// Test scenario: one selection is `All`, the other is `Standard`
//
// `All` includes all possible modules, while `Standard` includes a specific set of modules.
// Since `Standard` does not cover all modules, these two selections should not be
// considered identical.
let standard = RpcModuleSelection::Standard;
assert!(!RpcModuleSelection::are_identical(Some(&all_modules), Some(&standard)));
// Test scenario: one is `None`, the other is an empty selection
//
// When one selection is `None` and the other is an empty selection (no modules),
// they should be considered identical because neither selects any modules.
let empty_selection = RpcModuleSelection::Selection(HashSet::new());
assert!(RpcModuleSelection::are_identical(None, Some(&empty_selection)));
assert!(RpcModuleSelection::are_identical(Some(&empty_selection), None));
// Test scenario: one is `None`, the other is a non-empty selection
//
// If one selection is `None` and the other contains modules, they should not be considered
// identical because `None` represents no selection, while the other explicitly
// selects modules.
let non_empty_selection = RpcModuleSelection::from([RethRpcModule::Eth]);
assert!(!RpcModuleSelection::are_identical(None, Some(&non_empty_selection)));
assert!(!RpcModuleSelection::are_identical(Some(&non_empty_selection), None));
// Test scenario: `All` vs. non-full selection
//
// If one selection is `All` (which includes all modules) and the other contains only a
// subset of modules, they should not be considered identical.
let partial_selection = RpcModuleSelection::from([RethRpcModule::Eth, RethRpcModule::Net]);
assert!(!RpcModuleSelection::are_identical(Some(&all_modules), Some(&partial_selection)));
// Test scenario: full selection vs `All`
//
// If the other selection explicitly selects all available modules, it should be identical
// to `All`.
let full_selection =
RpcModuleSelection::from(RethRpcModule::modules().into_iter().collect::<HashSet<_>>());
assert!(RpcModuleSelection::are_identical(Some(&all_modules), Some(&full_selection)));
// Test scenario: different non-empty selections
//
// If the two selections contain different sets of modules, they should not be considered
// identical.
let selection3 = RpcModuleSelection::from([RethRpcModule::Eth, RethRpcModule::Net]);
let selection4 = RpcModuleSelection::from([RethRpcModule::Eth, RethRpcModule::Web3]);
assert!(!RpcModuleSelection::are_identical(Some(&selection3), Some(&selection4)));
// Test scenario: `Standard` vs an equivalent selection
// The `Standard` selection includes a predefined set of modules. If we explicitly create
// a selection with the same set of modules, they should be considered identical.
let matching_standard =
RpcModuleSelection::from([RethRpcModule::Eth, RethRpcModule::Net, RethRpcModule::Web3]);
assert!(RpcModuleSelection::are_identical(Some(&standard), Some(&matching_standard)));
// Test scenario: `Standard` vs non-matching selection
//
// If the selection does not match the modules included in `Standard`, they should not be
// considered identical.
let non_matching_standard =
RpcModuleSelection::from([RethRpcModule::Eth, RethRpcModule::Net]);
assert!(!RpcModuleSelection::are_identical(Some(&standard), Some(&non_matching_standard)));
}
#[test]
fn test_rpc_module_selection_append() {
// Test append on Standard selection
let selection = RpcModuleSelection::Standard;
let new_selection = selection.append(RethRpcModule::Admin);
assert!(new_selection.contains(&RethRpcModule::Eth));
assert!(new_selection.contains(&RethRpcModule::Net));
assert!(new_selection.contains(&RethRpcModule::Web3));
assert!(new_selection.contains(&RethRpcModule::Admin));
// Test append on empty Selection
let selection = RpcModuleSelection::Selection(HashSet::new());
let new_selection = selection.append(RethRpcModule::Eth);
assert!(new_selection.contains(&RethRpcModule::Eth));
assert_eq!(new_selection.len(), 1);
// Test append on All (should return All)
let selection = RpcModuleSelection::All;
let new_selection = selection.append(RethRpcModule::Eth);
assert_eq!(new_selection, RpcModuleSelection::All);
}
#[test]
fn test_rpc_module_selection_extend() {
// Test extend on Standard selection
let mut selection = RpcModuleSelection::Standard;
selection.extend(vec![RethRpcModule::Admin, RethRpcModule::Debug]);
assert!(selection.contains(&RethRpcModule::Eth));
assert!(selection.contains(&RethRpcModule::Net));
assert!(selection.contains(&RethRpcModule::Web3));
assert!(selection.contains(&RethRpcModule::Admin));
assert!(selection.contains(&RethRpcModule::Debug));
// Test extend on empty Selection
let mut selection = RpcModuleSelection::Selection(HashSet::new());
selection.extend(vec![RethRpcModule::Eth, RethRpcModule::Admin]);
assert!(selection.contains(&RethRpcModule::Eth));
assert!(selection.contains(&RethRpcModule::Admin));
assert_eq!(selection.len(), 2);
// Test extend on All (should be no-op)
let mut selection = RpcModuleSelection::All;
selection.extend(vec![RethRpcModule::Eth, RethRpcModule::Admin]);
assert_eq!(selection, RpcModuleSelection::All);
}
#[test]
fn test_rpc_module_selection_from_str() {
// Test empty string returns default selection
let result = RpcModuleSelection::from_str("");
assert!(result.is_ok());
assert_eq!(result.unwrap(), RpcModuleSelection::Selection(Default::default()));
// Test "all" (case insensitive) returns All variant
let result = RpcModuleSelection::from_str("all");
assert!(result.is_ok());
assert_eq!(result.unwrap(), RpcModuleSelection::All);
let result = RpcModuleSelection::from_str("All");
assert!(result.is_ok());
assert_eq!(result.unwrap(), RpcModuleSelection::All);
let result = RpcModuleSelection::from_str("ALL");
assert!(result.is_ok());
assert_eq!(result.unwrap(), RpcModuleSelection::All);
// Test "none" (case insensitive) returns empty selection
let result = RpcModuleSelection::from_str("none");
assert!(result.is_ok());
assert_eq!(result.unwrap(), RpcModuleSelection::Selection(Default::default()));
let result = RpcModuleSelection::from_str("None");
assert!(result.is_ok());
assert_eq!(result.unwrap(), RpcModuleSelection::Selection(Default::default()));
let result = RpcModuleSelection::from_str("NONE");
assert!(result.is_ok());
assert_eq!(result.unwrap(), RpcModuleSelection::Selection(Default::default()));
// Test valid selections: "eth,admin"
let result = RpcModuleSelection::from_str("eth,admin");
assert!(result.is_ok());
let expected_selection =
RpcModuleSelection::from([RethRpcModule::Eth, RethRpcModule::Admin]);
assert_eq!(result.unwrap(), expected_selection);
// Test valid selection with extra spaces: " eth , admin "
let result = RpcModuleSelection::from_str(" eth , admin ");
assert!(result.is_ok());
assert_eq!(result.unwrap(), expected_selection);
// Test invalid selection should return error
let result = RpcModuleSelection::from_str("invalid,unknown");
assert!(result.is_err());
assert_eq!(result.unwrap_err(), ParseError::VariantNotFound);
// Test single valid selection: "eth"
let result = RpcModuleSelection::from_str("eth");
assert!(result.is_ok());
let expected_selection = RpcModuleSelection::from([RethRpcModule::Eth]);
assert_eq!(result.unwrap(), expected_selection);
// Test single invalid selection: "unknown"
let result = RpcModuleSelection::from_str("unknown");
assert!(result.is_err());
assert_eq!(result.unwrap_err(), ParseError::VariantNotFound);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-server-types/src/result.rs | crates/rpc/rpc-server-types/src/result.rs | //! Additional helpers for converting errors.
use std::fmt;
use alloy_eips::BlockId;
use alloy_rpc_types_engine::PayloadError;
use jsonrpsee_core::RpcResult;
use reth_errors::ConsensusError;
/// Helper trait to easily convert various `Result` types into [`RpcResult`]
pub trait ToRpcResult<Ok, Err>: Sized {
/// Converts result to [`RpcResult`] by converting error variant to
/// [`jsonrpsee_types::error::ErrorObject`]
fn to_rpc_result(self) -> RpcResult<Ok>
where
Err: fmt::Display,
{
self.map_internal_err(|err| err.to_string())
}
/// Converts this type into an [`RpcResult`]
fn map_rpc_err<'a, F, M>(self, op: F) -> RpcResult<Ok>
where
F: FnOnce(Err) -> (i32, M, Option<&'a [u8]>),
M: Into<String>;
/// Converts this type into an [`RpcResult`] with the
/// [`jsonrpsee_types::error::INTERNAL_ERROR_CODE`] and the given message.
fn map_internal_err<F, M>(self, op: F) -> RpcResult<Ok>
where
F: FnOnce(Err) -> M,
M: Into<String>;
/// Converts this type into an [`RpcResult`] with the
/// [`jsonrpsee_types::error::INTERNAL_ERROR_CODE`] and given message and data.
fn map_internal_err_with_data<'a, F, M>(self, op: F) -> RpcResult<Ok>
where
F: FnOnce(Err) -> (M, &'a [u8]),
M: Into<String>;
/// Adds a message to the error variant and returns an internal Error.
///
/// This is shorthand for `Self::map_internal_err(|err| format!("{msg}: {err}"))`.
fn with_message(self, msg: &str) -> RpcResult<Ok>;
}
/// A macro that implements the `ToRpcResult` for a specific error type
#[macro_export]
macro_rules! impl_to_rpc_result {
($err:ty) => {
impl<Ok> ToRpcResult<Ok, $err> for Result<Ok, $err> {
#[inline]
fn map_rpc_err<'a, F, M>(self, op: F) -> jsonrpsee_core::RpcResult<Ok>
where
F: FnOnce($err) -> (i32, M, Option<&'a [u8]>),
M: Into<String>,
{
match self {
Ok(t) => Ok(t),
Err(err) => {
let (code, msg, data) = op(err);
Err($crate::result::rpc_err(code, msg, data))
}
}
}
#[inline]
fn map_internal_err<'a, F, M>(self, op: F) -> jsonrpsee_core::RpcResult<Ok>
where
F: FnOnce($err) -> M,
M: Into<String>,
{
self.map_err(|err| $crate::result::internal_rpc_err(op(err)))
}
#[inline]
fn map_internal_err_with_data<'a, F, M>(self, op: F) -> jsonrpsee_core::RpcResult<Ok>
where
F: FnOnce($err) -> (M, &'a [u8]),
M: Into<String>,
{
match self {
Ok(t) => Ok(t),
Err(err) => {
let (msg, data) = op(err);
Err($crate::result::internal_rpc_err_with_data(msg, data))
}
}
}
#[inline]
fn with_message(self, msg: &str) -> jsonrpsee_core::RpcResult<Ok> {
match self {
Ok(t) => Ok(t),
Err(err) => {
let msg = format!("{msg}: {err}");
Err($crate::result::internal_rpc_err(msg))
}
}
}
}
};
}
impl_to_rpc_result!(PayloadError);
impl_to_rpc_result!(ConsensusError);
impl_to_rpc_result!(reth_errors::RethError);
impl_to_rpc_result!(reth_errors::ProviderError);
impl_to_rpc_result!(reth_network_api::NetworkError);
/// Constructs an invalid params JSON-RPC error.
pub fn invalid_params_rpc_err(
msg: impl Into<String>,
) -> jsonrpsee_types::error::ErrorObject<'static> {
rpc_err(jsonrpsee_types::error::INVALID_PARAMS_CODE, msg, None)
}
/// Constructs an internal JSON-RPC error.
pub fn internal_rpc_err(msg: impl Into<String>) -> jsonrpsee_types::error::ErrorObject<'static> {
rpc_err(jsonrpsee_types::error::INTERNAL_ERROR_CODE, msg, None)
}
/// Constructs an internal JSON-RPC error with data
pub fn internal_rpc_err_with_data(
msg: impl Into<String>,
data: &[u8],
) -> jsonrpsee_types::error::ErrorObject<'static> {
rpc_err(jsonrpsee_types::error::INTERNAL_ERROR_CODE, msg, Some(data))
}
/// Constructs an internal JSON-RPC error with code and message
pub fn rpc_error_with_code(
code: i32,
msg: impl Into<String>,
) -> jsonrpsee_types::error::ErrorObject<'static> {
rpc_err(code, msg, None)
}
/// Constructs a JSON-RPC error, consisting of `code`, `message` and optional `data`.
pub fn rpc_err(
code: i32,
msg: impl Into<String>,
data: Option<&[u8]>,
) -> jsonrpsee_types::error::ErrorObject<'static> {
jsonrpsee_types::error::ErrorObject::owned(
code,
msg.into(),
data.map(|data| {
jsonrpsee_core::to_json_raw_value(&alloy_primitives::hex::encode_prefixed(data))
.expect("serializing String can't fail")
}),
)
}
/// Formats a [`BlockId`] into an error message.
pub fn block_id_to_str(id: BlockId) -> String {
match id {
BlockId::Hash(h) => {
if h.require_canonical == Some(true) {
format!("canonical hash {}", h.block_hash)
} else {
format!("hash {}", h.block_hash)
}
}
BlockId::Number(n) => format!("{n}"),
}
}
#[cfg(test)]
mod tests {
use super::*;
use reth_errors::{RethError, RethResult};
const fn assert_rpc_result<T, E, TRR: ToRpcResult<T, E>>() {}
#[test]
fn can_convert_rpc() {
assert_rpc_result::<(), RethError, RethResult<()>>();
let res = RethResult::Ok(100);
let rpc_res = res.map_internal_err(|_| "This is a message");
let val = rpc_res.unwrap();
assert_eq!(val, 100);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-server-types/src/constants.rs | crates/rpc/rpc-server-types/src/constants.rs | use std::cmp::max;
/// The default port for the http server
pub const DEFAULT_HTTP_RPC_PORT: u16 = 8545;
/// The default port for the ws server
pub const DEFAULT_WS_RPC_PORT: u16 = 8546;
/// The default port for the auth server.
pub const DEFAULT_AUTH_PORT: u16 = 8551;
/// The default maximum block range allowed to filter
pub const DEFAULT_MAX_BLOCKS_PER_FILTER: u64 = 100_000;
/// The default maximum of logs in a single response.
pub const DEFAULT_MAX_LOGS_PER_RESPONSE: usize = 20_000;
/// The default maximum number of blocks for `trace_filter` requests.
pub const DEFAULT_MAX_TRACE_FILTER_BLOCKS: u64 = 100;
/// The default maximum number tracing requests we're allowing concurrently.
/// Tracing is mostly CPU bound so we're limiting the number of concurrent requests to something
/// lower that the number of cores, in order to minimize the impact on the rest of the system.
pub fn default_max_tracing_requests() -> usize {
// We reserve 2 cores for the rest of the system
const RESERVED: usize = 2;
std::thread::available_parallelism()
.map_or(25, |cpus| max(cpus.get().saturating_sub(RESERVED), RESERVED))
}
/// The default number of getproof calls we are allowing to run concurrently.
pub const DEFAULT_PROOF_PERMITS: usize = 25;
/// The default IPC endpoint
#[cfg(windows)]
pub const DEFAULT_IPC_ENDPOINT: &str = r"\\.\pipe\reth.ipc";
/// The default IPC endpoint
#[cfg(not(windows))]
pub const DEFAULT_IPC_ENDPOINT: &str = "/tmp/reth.ipc";
/// The engine_api IPC endpoint
#[cfg(windows)]
pub const DEFAULT_ENGINE_API_IPC_ENDPOINT: &str = r"\\.\pipe\reth_engine_api.ipc";
/// The `engine_api` IPC endpoint
#[cfg(not(windows))]
pub const DEFAULT_ENGINE_API_IPC_ENDPOINT: &str = "/tmp/reth_engine_api.ipc";
/// The default limit for blocks count in `eth_simulateV1`.
pub const DEFAULT_MAX_SIMULATE_BLOCKS: u64 = 256;
/// The default eth historical proof window.
pub const DEFAULT_ETH_PROOF_WINDOW: u64 = 0;
/// The default eth tx fee cap is 1 ETH
pub const DEFAULT_TX_FEE_CAP_WEI: u128 = 1_000_000_000_000_000_000u128;
/// Maximum eth historical proof window. Equivalent to roughly 6 months of data on a 12
/// second block time, and a month on a 2 second block time.
pub const MAX_ETH_PROOF_WINDOW: u64 = 28 * 24 * 60 * 60 / 2;
/// GPO specific constants
pub mod gas_oracle {
use alloy_primitives::U256;
/// The number of transactions sampled in a block
pub const SAMPLE_NUMBER: usize = 3_usize;
/// The default maximum number of blocks to use for the gas price oracle.
pub const MAX_HEADER_HISTORY: u64 = 1024;
/// The default maximum number of allowed reward percentiles
pub const MAX_REWARD_PERCENTILE_COUNT: u64 = 100;
/// Number of recent blocks to check for gas price
pub const DEFAULT_GAS_PRICE_BLOCKS: u32 = 20;
/// The percentile of gas prices to use for the estimate
pub const DEFAULT_GAS_PRICE_PERCENTILE: u32 = 60;
/// Maximum transaction priority fee (or gas price before London Fork) to be recommended by the
/// gas price oracle
pub const DEFAULT_MAX_GAS_PRICE: U256 = U256::from_limbs([500_000_000_000u64, 0, 0, 0]);
/// The default minimum gas price, under which the sample will be ignored
pub const DEFAULT_IGNORE_GAS_PRICE: U256 = U256::from_limbs([2u64, 0, 0, 0]);
/// The default gas limit for `eth_call` and adjacent calls.
///
/// This is different from the default to regular 30M block gas limit
/// `ETHEREUM_BLOCK_GAS_LIMIT_30M` to allow for more complex calls.
pub const RPC_DEFAULT_GAS_CAP: u64 = 50_000_000;
/// Allowed error ratio for gas estimation
/// Taken from Geth's implementation in order to pass the hive tests
/// <https://github.com/ethereum/go-ethereum/blob/a5a4fa7032bb248f5a7c40f4e8df2b131c4186a4/internal/ethapi/api.go#L56>
pub const ESTIMATE_GAS_ERROR_RATIO: f64 = 0.015;
/// Gas required at the beginning of a call.
pub const CALL_STIPEND_GAS: u64 = 2_300;
}
/// Cache specific constants
pub mod cache {
/// Default cache size for the block cache: 5000 blocks.
pub const DEFAULT_BLOCK_CACHE_MAX_LEN: u32 = 5000;
/// Default cache size for the receipts cache: 2000 receipts.
pub const DEFAULT_RECEIPT_CACHE_MAX_LEN: u32 = 2000;
/// Default cache size for the header cache: 1000 headers.
pub const DEFAULT_HEADER_CACHE_MAX_LEN: u32 = 1000;
/// Default number of concurrent database requests.
pub const DEFAULT_CONCURRENT_DB_REQUESTS: usize = 512;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-eth-api/src/node.rs | crates/rpc/rpc-eth-api/src/node.rs | //! Helper trait for interfacing with [`FullNodeComponents`].
use reth_chain_state::CanonStateSubscriptions;
use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardforks, Hardforks};
use reth_evm::ConfigureEvm;
use reth_network_api::NetworkInfo;
use reth_node_api::{FullNodeComponents, NodePrimitives, PrimitivesTy};
use reth_primitives_traits::{BlockTy, HeaderTy, ReceiptTy, TxTy};
use reth_rpc_eth_types::EthStateCache;
use reth_storage_api::{
BlockReader, BlockReaderIdExt, StageCheckpointReader, StateProviderFactory,
};
use reth_transaction_pool::{PoolTransaction, TransactionPool};
/// Helper trait that provides the same interface as [`FullNodeComponents`] but without requiring
/// implementation of trait bounds.
///
/// This trait is structurally equivalent to [`FullNodeComponents`], exposing the same associated
/// types and methods. However, it doesn't enforce the trait bounds required by
/// [`FullNodeComponents`]. This makes it useful for RPC types that need access to node components
/// where the full trait bounds of the components are not necessary.
///
/// Every type that is a [`FullNodeComponents`] also implements this trait.
pub trait RpcNodeCore: Clone + Send + Sync + Unpin + 'static {
/// Blockchain data primitives.
type Primitives: NodePrimitives;
/// The provider type used to interact with the node.
type Provider: BlockReaderIdExt<
Block = BlockTy<Self::Primitives>,
Receipt = ReceiptTy<Self::Primitives>,
Header = HeaderTy<Self::Primitives>,
Transaction = TxTy<Self::Primitives>,
> + ChainSpecProvider<
ChainSpec: EthChainSpec<Header = HeaderTy<Self::Primitives>>
+ Hardforks
+ EthereumHardforks,
> + StateProviderFactory
+ CanonStateSubscriptions<Primitives = Self::Primitives>
+ StageCheckpointReader
+ Send
+ Sync
+ Clone
+ Unpin
+ 'static;
/// The transaction pool of the node.
type Pool: TransactionPool<Transaction: PoolTransaction<Consensus = TxTy<Self::Primitives>>>;
/// The node's EVM configuration, defining settings for the Ethereum Virtual Machine.
type Evm: ConfigureEvm<Primitives = Self::Primitives> + Send + Sync + 'static;
/// Network API.
type Network: NetworkInfo + Clone;
/// Returns the transaction pool of the node.
fn pool(&self) -> &Self::Pool;
/// Returns the node's evm config.
fn evm_config(&self) -> &Self::Evm;
/// Returns the handle to the network
fn network(&self) -> &Self::Network;
/// Returns the provider of the node.
fn provider(&self) -> &Self::Provider;
}
impl<T> RpcNodeCore for T
where
T: FullNodeComponents<Provider: ChainSpecProvider<ChainSpec: Hardforks + EthereumHardforks>>,
{
type Primitives = PrimitivesTy<T::Types>;
type Provider = T::Provider;
type Pool = T::Pool;
type Evm = T::Evm;
type Network = T::Network;
#[inline]
fn pool(&self) -> &Self::Pool {
FullNodeComponents::pool(self)
}
#[inline]
fn evm_config(&self) -> &Self::Evm {
FullNodeComponents::evm_config(self)
}
#[inline]
fn network(&self) -> &Self::Network {
FullNodeComponents::network(self)
}
#[inline]
fn provider(&self) -> &Self::Provider {
FullNodeComponents::provider(self)
}
}
/// Additional components, asides the core node components, needed to run `eth_` namespace API
/// server.
pub trait RpcNodeCoreExt: RpcNodeCore<Provider: BlockReader> {
/// Returns handle to RPC cache service.
fn cache(&self) -> &EthStateCache<Self::Primitives>;
}
/// An adapter that allows to construct [`RpcNodeCore`] from components.
#[derive(Debug, Clone)]
pub struct RpcNodeCoreAdapter<Provider, Pool, Network, Evm> {
provider: Provider,
pool: Pool,
network: Network,
evm_config: Evm,
}
impl<Provider, Pool, Network, Evm> RpcNodeCoreAdapter<Provider, Pool, Network, Evm> {
/// Creates a new `RpcNodeCoreAdapter` instance.
pub const fn new(provider: Provider, pool: Pool, network: Network, evm_config: Evm) -> Self {
Self { provider, pool, network, evm_config }
}
}
impl<Provider, Pool, Network, Evm> RpcNodeCore for RpcNodeCoreAdapter<Provider, Pool, Network, Evm>
where
Provider: BlockReaderIdExt<
Block = BlockTy<Evm::Primitives>,
Receipt = ReceiptTy<Evm::Primitives>,
Header = HeaderTy<Evm::Primitives>,
Transaction = TxTy<Evm::Primitives>,
> + ChainSpecProvider<
ChainSpec: EthChainSpec<Header = HeaderTy<Evm::Primitives>>
+ Hardforks
+ EthereumHardforks,
> + StateProviderFactory
+ CanonStateSubscriptions<Primitives = Evm::Primitives>
+ StageCheckpointReader
+ Send
+ Sync
+ Unpin
+ Clone
+ 'static,
Evm: ConfigureEvm + Clone + 'static,
Pool: TransactionPool<Transaction: PoolTransaction<Consensus = TxTy<Evm::Primitives>>>
+ Unpin
+ 'static,
Network: NetworkInfo + Clone + Unpin + 'static,
{
type Primitives = Evm::Primitives;
type Provider = Provider;
type Pool = Pool;
type Evm = Evm;
type Network = Network;
fn pool(&self) -> &Self::Pool {
&self.pool
}
fn evm_config(&self) -> &Self::Evm {
&self.evm_config
}
fn network(&self) -> &Self::Network {
&self.network
}
fn provider(&self) -> &Self::Provider {
&self.provider
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-eth-api/src/lib.rs | crates/rpc/rpc-eth-api/src/lib.rs | //! Reth RPC `eth_` API implementation
//!
//! ## Feature Flags
//!
//! - `client`: Enables JSON-RPC client support.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
pub mod bundle;
pub mod core;
pub mod ext;
pub mod filter;
pub mod helpers;
pub mod node;
pub mod pubsub;
pub mod types;
pub use bundle::{EthBundleApiServer, EthCallBundleApiServer};
pub use core::{EthApiServer, FullEthApiServer};
pub use ext::L2EthApiExtServer;
pub use filter::{EngineEthFilter, EthFilterApiServer, QueryLimits};
pub use node::{RpcNodeCore, RpcNodeCoreExt};
pub use pubsub::EthPubSubApiServer;
pub use reth_rpc_convert::*;
pub use reth_rpc_eth_types::error::{
AsEthApiError, FromEthApiError, FromEvmError, IntoEthApiError,
};
pub use types::{EthApiTypes, FullEthApiTypes, RpcBlock, RpcHeader, RpcReceipt, RpcTransaction};
#[cfg(feature = "client")]
pub use bundle::{EthBundleApiClient, EthCallBundleApiClient};
#[cfg(feature = "client")]
pub use core::EthApiClient;
#[cfg(feature = "client")]
pub use ext::L2EthApiExtClient;
#[cfg(feature = "client")]
pub use filter::EthFilterApiClient;
use reth_trie_common as _;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-eth-api/src/bundle.rs | crates/rpc/rpc-eth-api/src/bundle.rs | //! Additional `eth_` RPC API for bundles.
//!
//! See also <https://docs.flashbots.net/flashbots-auction/advanced/rpc-endpoint>
use alloy_primitives::{Bytes, B256};
use alloy_rpc_types_mev::{
EthBundleHash, EthCallBundle, EthCallBundleResponse, EthCancelBundle,
EthCancelPrivateTransaction, EthSendBundle, EthSendPrivateTransaction,
};
use jsonrpsee::proc_macros::rpc;
/// A subset of the [EthBundleApi] API interface that only supports `eth_callBundle`.
#[cfg_attr(not(feature = "client"), rpc(server, namespace = "eth"))]
#[cfg_attr(feature = "client", rpc(server, client, namespace = "eth"))]
pub trait EthCallBundleApi {
/// `eth_callBundle` can be used to simulate a bundle against a specific block number,
/// including simulating a bundle at the top of the next block.
#[method(name = "callBundle")]
async fn call_bundle(
&self,
request: EthCallBundle,
) -> jsonrpsee::core::RpcResult<EthCallBundleResponse>;
}
/// The __full__ Eth bundle rpc interface.
///
/// See also <https://docs.flashbots.net/flashbots-auction/advanced/rpc-endpoint>
#[cfg_attr(not(feature = "client"), rpc(server, namespace = "eth"))]
#[cfg_attr(feature = "client", rpc(server, client, namespace = "eth"))]
pub trait EthBundleApi {
/// `eth_sendBundle` can be used to send your bundles to the builder.
#[method(name = "sendBundle")]
async fn send_bundle(&self, bundle: EthSendBundle)
-> jsonrpsee::core::RpcResult<EthBundleHash>;
/// `eth_callBundle` can be used to simulate a bundle against a specific block number,
/// including simulating a bundle at the top of the next block.
#[method(name = "callBundle")]
async fn call_bundle(
&self,
request: EthCallBundle,
) -> jsonrpsee::core::RpcResult<EthCallBundleResponse>;
/// `eth_cancelBundle` is used to prevent a submitted bundle from being included on-chain. See [bundle cancellations](https://docs.flashbots.net/flashbots-auction/advanced/bundle-cancellations) for more information.
#[method(name = "cancelBundle")]
async fn cancel_bundle(&self, request: EthCancelBundle) -> jsonrpsee::core::RpcResult<()>;
/// `eth_sendPrivateTransaction` is used to send a single transaction to Flashbots. Flashbots will attempt to build a block including the transaction for the next 25 blocks. See [Private Transactions](https://docs.flashbots.net/flashbots-protect/additional-documentation/eth-sendPrivateTransaction) for more info.
#[method(name = "sendPrivateTransaction")]
async fn send_private_transaction(
&self,
request: EthSendPrivateTransaction,
) -> jsonrpsee::core::RpcResult<B256>;
/// The `eth_sendPrivateRawTransaction` method can be used to send private transactions to
/// the RPC endpoint. Private transactions are protected from frontrunning and kept
/// private until included in a block. A request to this endpoint needs to follow
/// the standard `eth_sendRawTransaction`
#[method(name = "sendPrivateRawTransaction")]
async fn send_private_raw_transaction(&self, bytes: Bytes) -> jsonrpsee::core::RpcResult<B256>;
/// The `eth_cancelPrivateTransaction` method stops private transactions from being
/// submitted for future blocks.
///
/// A transaction can only be cancelled if the request is signed by the same key as the
/// `eth_sendPrivateTransaction` call submitting the transaction in first place.
#[method(name = "cancelPrivateTransaction")]
async fn cancel_private_transaction(
&self,
request: EthCancelPrivateTransaction,
) -> jsonrpsee::core::RpcResult<bool>;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.