repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/payload/src/validator.rs | crates/optimism/payload/src/validator.rs | //! Validates execution payload wrt Optimism consensus rules
use alloc::sync::Arc;
use alloy_consensus::Block;
use alloy_rpc_types_engine::PayloadError;
use derive_more::{Constructor, Deref};
use op_alloy_rpc_types_engine::{OpExecutionData, OpPayloadError};
use reth_optimism_forks::OpHardforks;
use reth_payload_validator::{cancun, prague, shanghai};
use reth_primitives_traits::{Block as _, SealedBlock, SignedTransaction};
/// Execution payload validator.
#[derive(Clone, Debug, Deref, Constructor)]
pub struct OpExecutionPayloadValidator<ChainSpec> {
/// Chain spec to validate against.
#[deref]
inner: Arc<ChainSpec>,
}
impl<ChainSpec> OpExecutionPayloadValidator<ChainSpec>
where
ChainSpec: OpHardforks,
{
/// Returns reference to chain spec.
pub fn chain_spec(&self) -> &ChainSpec {
&self.inner
}
/// Ensures that the given payload does not violate any consensus rules that concern the block's
/// layout.
///
/// See also [`ensure_well_formed_payload`].
pub fn ensure_well_formed_payload<T: SignedTransaction>(
&self,
payload: OpExecutionData,
) -> Result<SealedBlock<Block<T>>, OpPayloadError> {
ensure_well_formed_payload(self.chain_spec(), payload)
}
}
/// Ensures that the given payload does not violate any consensus rules that concern the block's
/// layout, like:
/// - missing or invalid base fee
/// - invalid extra data
/// - invalid transactions
/// - incorrect hash
/// - block contains blob transactions or blob versioned hashes
/// - block contains l1 withdrawals
///
/// The checks are done in the order that conforms with the engine-API specification.
///
/// This is intended to be invoked after receiving the payload from the CLI.
/// The additional fields, starting with [`MaybeCancunPayloadFields`](alloy_rpc_types_engine::MaybeCancunPayloadFields), are not part of the payload, but are additional fields starting in the `engine_newPayloadV3` RPC call, See also <https://specs.optimism.io/protocol/exec-engine.html#engine_newpayloadv3>
///
/// If the cancun fields are provided this also validates that the versioned hashes in the block
/// are empty as well as those passed in the sidecar. If the payload fields are not provided.
///
/// Validation according to specs <https://specs.optimism.io/protocol/exec-engine.html#engine-api>.
pub fn ensure_well_formed_payload<ChainSpec, T>(
chain_spec: ChainSpec,
payload: OpExecutionData,
) -> Result<SealedBlock<Block<T>>, OpPayloadError>
where
ChainSpec: OpHardforks,
T: SignedTransaction,
{
let OpExecutionData { payload, sidecar } = payload;
let expected_hash = payload.block_hash();
// First parse the block
let sealed_block = payload.try_into_block_with_sidecar(&sidecar)?.seal_slow();
// Ensure the hash included in the payload matches the block hash
if expected_hash != sealed_block.hash() {
return Err(PayloadError::BlockHash {
execution: sealed_block.hash(),
consensus: expected_hash,
})?
}
shanghai::ensure_well_formed_fields(
sealed_block.body(),
chain_spec.is_shanghai_active_at_timestamp(sealed_block.timestamp),
)?;
cancun::ensure_well_formed_header_and_sidecar_fields(
&sealed_block,
sidecar.ecotone(),
chain_spec.is_cancun_active_at_timestamp(sealed_block.timestamp),
)?;
prague::ensure_well_formed_fields(
sealed_block.body(),
sidecar.isthmus(),
chain_spec.is_prague_active_at_timestamp(sealed_block.timestamp),
)?;
Ok(sealed_block)
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/payload/src/traits.rs | crates/optimism/payload/src/traits.rs | use alloy_consensus::BlockBody;
use reth_optimism_primitives::{transaction::OpTransaction, DepositReceipt};
use reth_payload_primitives::PayloadBuilderAttributes;
use reth_primitives_traits::{FullBlockHeader, NodePrimitives, SignedTransaction, WithEncoded};
use crate::OpPayloadBuilderAttributes;
/// Helper trait to encapsulate common bounds on [`NodePrimitives`] for OP payload builder.
pub trait OpPayloadPrimitives:
NodePrimitives<
Receipt: DepositReceipt,
SignedTx = Self::_TX,
BlockBody = BlockBody<Self::_TX, Self::_Header>,
BlockHeader = Self::_Header,
>
{
/// Helper AT to bound [`NodePrimitives::Block`] type without causing bound cycle.
type _TX: SignedTransaction + OpTransaction;
/// Helper AT to bound [`NodePrimitives::Block`] type without causing bound cycle.
type _Header: FullBlockHeader;
}
impl<Tx, T, Header> OpPayloadPrimitives for T
where
Tx: SignedTransaction + OpTransaction,
T: NodePrimitives<
SignedTx = Tx,
Receipt: DepositReceipt,
BlockBody = BlockBody<Tx, Header>,
BlockHeader = Header,
>,
Header: FullBlockHeader,
{
type _TX = Tx;
type _Header = Header;
}
/// Attributes for the OP payload builder.
pub trait OpAttributes: PayloadBuilderAttributes {
/// Primitive transaction type.
type Transaction: SignedTransaction;
/// Whether to use the transaction pool for the payload.
fn no_tx_pool(&self) -> bool;
/// Sequencer transactions to include in the payload.
fn sequencer_transactions(&self) -> &[WithEncoded<Self::Transaction>];
}
impl<T: SignedTransaction> OpAttributes for OpPayloadBuilderAttributes<T> {
type Transaction = T;
fn no_tx_pool(&self) -> bool {
self.no_tx_pool
}
fn sequencer_transactions(&self) -> &[WithEncoded<Self::Transaction>] {
&self.transactions
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/transaction-pool/src/config.rs | crates/transaction-pool/src/config.rs | use crate::{
maintain::MAX_QUEUED_TRANSACTION_LIFETIME,
pool::{NEW_TX_LISTENER_BUFFER_SIZE, PENDING_TX_LISTENER_BUFFER_SIZE},
PoolSize, TransactionOrigin,
};
use alloy_consensus::constants::EIP4844_TX_TYPE_ID;
use alloy_eips::eip1559::{ETHEREUM_BLOCK_GAS_LIMIT_30M, MIN_PROTOCOL_BASE_FEE};
use alloy_primitives::Address;
use std::{collections::HashSet, ops::Mul, time::Duration};
/// Guarantees max transactions for one sender, compatible with geth/erigon
pub const TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER: usize = 16;
/// The default maximum allowed number of transactions in the given subpool.
pub const TXPOOL_SUBPOOL_MAX_TXS_DEFAULT: usize = 10_000;
/// The default maximum allowed size of the given subpool.
pub const TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT: usize = 20;
/// The default additional validation tasks size.
pub const DEFAULT_TXPOOL_ADDITIONAL_VALIDATION_TASKS: usize = 1;
/// Default price bump (in %) for the transaction pool underpriced check.
pub const DEFAULT_PRICE_BUMP: u128 = 10;
/// Replace blob price bump (in %) for the transaction pool underpriced check.
///
/// This enforces that a blob transaction requires a 100% price bump to be replaced
pub const REPLACE_BLOB_PRICE_BUMP: u128 = 100;
/// Default maximum new transactions for broadcasting.
pub const MAX_NEW_PENDING_TXS_NOTIFICATIONS: usize = 200;
/// Default maximum allowed in flight delegated transactions per account.
pub const DEFAULT_MAX_INFLIGHT_DELEGATED_SLOTS: usize = 1;
/// Configuration options for the Transaction pool.
#[derive(Debug, Clone)]
pub struct PoolConfig {
/// Max number of transaction in the pending sub-pool
pub pending_limit: SubPoolLimit,
/// Max number of transaction in the basefee sub-pool
pub basefee_limit: SubPoolLimit,
/// Max number of transaction in the queued sub-pool
pub queued_limit: SubPoolLimit,
/// Max number of transactions in the blob sub-pool
pub blob_limit: SubPoolLimit,
/// Blob cache size
pub blob_cache_size: Option<u32>,
/// Max number of executable transaction slots guaranteed per account
pub max_account_slots: usize,
/// Price bump (in %) for the transaction pool underpriced check.
pub price_bumps: PriceBumpConfig,
/// Minimum base fee required by the protocol.
pub minimal_protocol_basefee: u64,
/// Minimum priority fee required for transaction acceptance into the pool.
pub minimum_priority_fee: Option<u128>,
/// The max gas limit for transactions in the pool
pub gas_limit: u64,
/// How to handle locally received transactions:
/// [`TransactionOrigin::Local`](TransactionOrigin).
pub local_transactions_config: LocalTransactionConfig,
/// Bound on number of pending transactions from `reth_network::TransactionsManager` to buffer.
pub pending_tx_listener_buffer_size: usize,
/// Bound on number of new transactions from `reth_network::TransactionsManager` to buffer.
pub new_tx_listener_buffer_size: usize,
/// How many new pending transactions to buffer and send iterators in progress.
pub max_new_pending_txs_notifications: usize,
/// Maximum lifetime for transactions in the pool
pub max_queued_lifetime: Duration,
/// The maximum allowed inflight transactions a delegated sender can have.
///
/// This restricts how many executable transaction a delegated sender can stack.
pub max_inflight_delegated_slot_limit: usize,
}
impl PoolConfig {
/// Sets the minimal protocol base fee to 0, effectively disabling checks that enforce that a
/// transaction's fee must be higher than the [`MIN_PROTOCOL_BASE_FEE`] which is the lowest
/// value the ethereum EIP-1559 base fee can reach.
pub const fn with_disabled_protocol_base_fee(self) -> Self {
self.with_protocol_base_fee(0)
}
/// Configures the minimal protocol base fee that should be enforced.
///
/// Ethereum's EIP-1559 base fee can't drop below [`MIN_PROTOCOL_BASE_FEE`] hence this is
/// enforced by default in the pool.
pub const fn with_protocol_base_fee(mut self, protocol_base_fee: u64) -> Self {
self.minimal_protocol_basefee = protocol_base_fee;
self
}
/// Configures how many slots are available for a delegated sender.
pub const fn with_max_inflight_delegated_slots(
mut self,
max_inflight_delegation_limit: usize,
) -> Self {
self.max_inflight_delegated_slot_limit = max_inflight_delegation_limit;
self
}
/// Returns whether the size and amount constraints in any sub-pools are exceeded.
#[inline]
pub const fn is_exceeded(&self, pool_size: PoolSize) -> bool {
self.blob_limit.is_exceeded(pool_size.blob, pool_size.blob_size) ||
self.pending_limit.is_exceeded(pool_size.pending, pool_size.pending_size) ||
self.basefee_limit.is_exceeded(pool_size.basefee, pool_size.basefee_size) ||
self.queued_limit.is_exceeded(pool_size.queued, pool_size.queued_size)
}
}
impl Default for PoolConfig {
fn default() -> Self {
Self {
pending_limit: Default::default(),
basefee_limit: Default::default(),
queued_limit: Default::default(),
blob_limit: Default::default(),
blob_cache_size: None,
max_account_slots: TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER,
price_bumps: Default::default(),
minimal_protocol_basefee: MIN_PROTOCOL_BASE_FEE,
minimum_priority_fee: None,
gas_limit: ETHEREUM_BLOCK_GAS_LIMIT_30M,
local_transactions_config: Default::default(),
pending_tx_listener_buffer_size: PENDING_TX_LISTENER_BUFFER_SIZE,
new_tx_listener_buffer_size: NEW_TX_LISTENER_BUFFER_SIZE,
max_new_pending_txs_notifications: MAX_NEW_PENDING_TXS_NOTIFICATIONS,
max_queued_lifetime: MAX_QUEUED_TRANSACTION_LIFETIME,
max_inflight_delegated_slot_limit: DEFAULT_MAX_INFLIGHT_DELEGATED_SLOTS,
}
}
}
/// Size limits for a sub-pool.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct SubPoolLimit {
/// Maximum amount of transaction in the pool.
pub max_txs: usize,
/// Maximum combined size (in bytes) of transactions in the pool.
pub max_size: usize,
}
impl SubPoolLimit {
/// Creates a new instance with the given limits.
pub const fn new(max_txs: usize, max_size: usize) -> Self {
Self { max_txs, max_size }
}
/// Creates an unlimited [`SubPoolLimit`]
pub const fn max() -> Self {
Self::new(usize::MAX, usize::MAX)
}
/// Returns whether the size or amount constraint is violated.
#[inline]
pub const fn is_exceeded(&self, txs: usize, size: usize) -> bool {
self.max_txs < txs || self.max_size < size
}
}
impl Mul<usize> for SubPoolLimit {
type Output = Self;
fn mul(self, rhs: usize) -> Self::Output {
let Self { max_txs, max_size } = self;
Self { max_txs: max_txs * rhs, max_size: max_size * rhs }
}
}
impl Default for SubPoolLimit {
fn default() -> Self {
// either 10k transactions or 20MB
Self {
max_txs: TXPOOL_SUBPOOL_MAX_TXS_DEFAULT,
max_size: TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT * 1024 * 1024,
}
}
}
/// Price bump config (in %) for the transaction pool underpriced check.
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub struct PriceBumpConfig {
/// Default price bump (in %) for the transaction pool underpriced check.
pub default_price_bump: u128,
/// Replace blob price bump (in %) for the transaction pool underpriced check.
pub replace_blob_tx_price_bump: u128,
}
impl PriceBumpConfig {
/// Returns the price bump required to replace the given transaction type.
#[inline]
pub const fn price_bump(&self, tx_type: u8) -> u128 {
if tx_type == EIP4844_TX_TYPE_ID {
return self.replace_blob_tx_price_bump
}
self.default_price_bump
}
}
impl Default for PriceBumpConfig {
fn default() -> Self {
Self {
default_price_bump: DEFAULT_PRICE_BUMP,
replace_blob_tx_price_bump: REPLACE_BLOB_PRICE_BUMP,
}
}
}
/// Configuration options for the locally received transactions:
/// [`TransactionOrigin::Local`](TransactionOrigin)
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct LocalTransactionConfig {
/// Apply no exemptions to the locally received transactions.
///
/// This includes:
/// - available slots are limited to the configured `max_account_slots` of [`PoolConfig`]
/// - no price exemptions
/// - no eviction exemptions
pub no_exemptions: bool,
/// Addresses that will be considered as local. Above exemptions apply.
pub local_addresses: HashSet<Address>,
/// Flag indicating whether local transactions should be propagated.
pub propagate_local_transactions: bool,
}
impl Default for LocalTransactionConfig {
fn default() -> Self {
Self {
no_exemptions: false,
local_addresses: HashSet::default(),
propagate_local_transactions: true,
}
}
}
impl LocalTransactionConfig {
/// Returns whether local transactions are not exempt from the configured limits.
#[inline]
pub const fn no_local_exemptions(&self) -> bool {
self.no_exemptions
}
/// Returns whether the local addresses vector contains the given address.
#[inline]
pub fn contains_local_address(&self, address: &Address) -> bool {
self.local_addresses.contains(address)
}
/// Returns whether the particular transaction should be considered local.
///
/// This always returns false if the local exemptions are disabled.
#[inline]
pub fn is_local(&self, origin: TransactionOrigin, sender: &Address) -> bool {
if self.no_local_exemptions() {
return false
}
origin.is_local() || self.contains_local_address(sender)
}
/// Sets toggle to propagate transactions received locally by this client (e.g
/// transactions from `eth_sendTransaction` to this nodes' RPC server)
///
/// If set to false, only transactions received by network peers (via
/// p2p) will be marked as propagated in the local transaction pool and returned on a
/// `GetPooledTransactions` p2p request
pub const fn set_propagate_local_transactions(mut self, propagate_local_txs: bool) -> Self {
self.propagate_local_transactions = propagate_local_txs;
self
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_pool_size_sanity() {
let pool_size = PoolSize {
pending: 0,
pending_size: 0,
basefee: 0,
basefee_size: 0,
queued: 0,
queued_size: 0,
blob: 0,
blob_size: 0,
..Default::default()
};
// the current size is zero so this should not exceed any limits
let config = PoolConfig::default();
assert!(!config.is_exceeded(pool_size));
// set them to be above the limits
let pool_size = PoolSize {
pending: config.pending_limit.max_txs + 1,
pending_size: config.pending_limit.max_size + 1,
basefee: config.basefee_limit.max_txs + 1,
basefee_size: config.basefee_limit.max_size + 1,
queued: config.queued_limit.max_txs + 1,
queued_size: config.queued_limit.max_size + 1,
blob: config.blob_limit.max_txs + 1,
blob_size: config.blob_limit.max_size + 1,
..Default::default()
};
// now this should be above the limits
assert!(config.is_exceeded(pool_size));
}
#[test]
fn test_default_config() {
let config = LocalTransactionConfig::default();
assert!(!config.no_exemptions);
assert!(config.local_addresses.is_empty());
assert!(config.propagate_local_transactions);
}
#[test]
fn test_no_local_exemptions() {
let config = LocalTransactionConfig { no_exemptions: true, ..Default::default() };
assert!(config.no_local_exemptions());
}
#[test]
fn test_contains_local_address() {
let address = Address::new([1; 20]);
let mut local_addresses = HashSet::default();
local_addresses.insert(address);
let config = LocalTransactionConfig { local_addresses, ..Default::default() };
// Should contain the inserted address
assert!(config.contains_local_address(&address));
// Should not contain another random address
assert!(!config.contains_local_address(&Address::new([2; 20])));
}
#[test]
fn test_is_local_with_no_exemptions() {
let address = Address::new([1; 20]);
let config = LocalTransactionConfig {
no_exemptions: true,
local_addresses: HashSet::default(),
..Default::default()
};
// Should return false as no exemptions is set to true
assert!(!config.is_local(TransactionOrigin::Local, &address));
}
#[test]
fn test_is_local_without_no_exemptions() {
let address = Address::new([1; 20]);
let mut local_addresses = HashSet::default();
local_addresses.insert(address);
let config =
LocalTransactionConfig { no_exemptions: false, local_addresses, ..Default::default() };
// Should return true as the transaction origin is local
assert!(config.is_local(TransactionOrigin::Local, &Address::new([2; 20])));
assert!(config.is_local(TransactionOrigin::Local, &address));
// Should return true as the address is in the local_addresses set
assert!(config.is_local(TransactionOrigin::External, &address));
// Should return false as the address is not in the local_addresses set
assert!(!config.is_local(TransactionOrigin::External, &Address::new([2; 20])));
}
#[test]
fn test_set_propagate_local_transactions() {
let config = LocalTransactionConfig::default();
assert!(config.propagate_local_transactions);
let new_config = config.set_propagate_local_transactions(false);
assert!(!new_config.propagate_local_transactions);
}
#[test]
fn scale_pool_limit() {
let limit = SubPoolLimit::default();
let double = limit * 2;
assert_eq!(
double,
SubPoolLimit { max_txs: limit.max_txs * 2, max_size: limit.max_size * 2 }
)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/transaction-pool/src/batcher.rs | crates/transaction-pool/src/batcher.rs | //! Transaction batching for `Pool` insertion for high-throughput scenarios
//!
//! This module provides transaction batching logic to reduce lock contention when processing
//! many concurrent transaction pool insertions.
use crate::{
error::PoolError, AddedTransactionOutcome, PoolTransaction, TransactionOrigin, TransactionPool,
};
use pin_project::pin_project;
use std::{
future::Future,
pin::Pin,
task::{ready, Context, Poll},
};
use tokio::sync::{mpsc, oneshot};
/// A single batch transaction request
/// All transactions processed through the batcher are considered local
/// transactions (`TransactionOrigin::Local`) when inserted into the pool.
#[derive(Debug)]
pub struct BatchTxRequest<T: PoolTransaction> {
/// Tx to be inserted in to the pool
pool_tx: T,
/// Channel to send result back to caller
response_tx: oneshot::Sender<Result<AddedTransactionOutcome, PoolError>>,
}
impl<T> BatchTxRequest<T>
where
T: PoolTransaction,
{
/// Create a new batch transaction request
pub const fn new(
pool_tx: T,
response_tx: oneshot::Sender<Result<AddedTransactionOutcome, PoolError>>,
) -> Self {
Self { pool_tx, response_tx }
}
}
/// Transaction batch processor that handles batch processing
#[pin_project]
#[derive(Debug)]
pub struct BatchTxProcessor<Pool: TransactionPool> {
pool: Pool,
max_batch_size: usize,
buf: Vec<BatchTxRequest<Pool::Transaction>>,
#[pin]
request_rx: mpsc::UnboundedReceiver<BatchTxRequest<Pool::Transaction>>,
}
impl<Pool> BatchTxProcessor<Pool>
where
Pool: TransactionPool + 'static,
{
/// Create a new `BatchTxProcessor`
pub fn new(
pool: Pool,
max_batch_size: usize,
) -> (Self, mpsc::UnboundedSender<BatchTxRequest<Pool::Transaction>>) {
let (request_tx, request_rx) = mpsc::unbounded_channel();
let processor = Self { pool, max_batch_size, buf: Vec::with_capacity(1), request_rx };
(processor, request_tx)
}
async fn process_request(pool: &Pool, req: BatchTxRequest<Pool::Transaction>) {
let BatchTxRequest { pool_tx, response_tx } = req;
let pool_result = pool.add_transaction(TransactionOrigin::Local, pool_tx).await;
let _ = response_tx.send(pool_result);
}
/// Process a batch of transaction requests, grouped by origin
async fn process_batch(pool: &Pool, mut batch: Vec<BatchTxRequest<Pool::Transaction>>) {
if batch.len() == 1 {
Self::process_request(pool, batch.remove(0)).await;
return
}
let (pool_transactions, response_tx): (Vec<_>, Vec<_>) =
batch.into_iter().map(|req| (req.pool_tx, req.response_tx)).unzip();
let pool_results = pool.add_transactions(TransactionOrigin::Local, pool_transactions).await;
for (response_tx, pool_result) in response_tx.into_iter().zip(pool_results) {
let _ = response_tx.send(pool_result);
}
}
}
impl<Pool> Future for BatchTxProcessor<Pool>
where
Pool: TransactionPool + 'static,
{
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut this = self.project();
loop {
// Drain all available requests from the receiver
ready!(this.request_rx.poll_recv_many(cx, this.buf, *this.max_batch_size));
if !this.buf.is_empty() {
let batch = std::mem::take(this.buf);
let pool = this.pool.clone();
tokio::spawn(async move {
Self::process_batch(&pool, batch).await;
});
this.buf.reserve(1);
continue;
}
// No requests available, return Pending to wait for more
return Poll::Pending;
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test_utils::{testing_pool, MockTransaction};
use futures::stream::{FuturesUnordered, StreamExt};
use std::time::Duration;
use tokio::time::timeout;
#[tokio::test]
async fn test_process_batch() {
let pool = testing_pool();
let mut batch_requests = Vec::new();
let mut responses = Vec::new();
for i in 0..100 {
let tx = MockTransaction::legacy().with_nonce(i).with_gas_price(100);
let (response_tx, response_rx) = tokio::sync::oneshot::channel();
batch_requests.push(BatchTxRequest::new(tx, response_tx));
responses.push(response_rx);
}
BatchTxProcessor::process_batch(&pool, batch_requests).await;
for response_rx in responses {
let result = timeout(Duration::from_millis(5), response_rx)
.await
.expect("Timeout waiting for response")
.expect("Response channel was closed unexpectedly");
assert!(result.is_ok());
}
}
#[tokio::test]
async fn test_batch_processor() {
let pool = testing_pool();
let (processor, request_tx) = BatchTxProcessor::new(pool.clone(), 1000);
// Spawn the processor
let handle = tokio::spawn(processor);
let mut responses = Vec::new();
for i in 0..50 {
let tx = MockTransaction::legacy().with_nonce(i).with_gas_price(100);
let (response_tx, response_rx) = tokio::sync::oneshot::channel();
request_tx.send(BatchTxRequest::new(tx, response_tx)).expect("Could not send batch tx");
responses.push(response_rx);
}
tokio::time::sleep(Duration::from_millis(10)).await;
for rx in responses {
let result = timeout(Duration::from_millis(10), rx)
.await
.expect("Timeout waiting for response")
.expect("Response channel was closed unexpectedly");
assert!(result.is_ok());
}
drop(request_tx);
handle.abort();
}
#[tokio::test]
async fn test_add_transaction() {
let pool = testing_pool();
let (processor, request_tx) = BatchTxProcessor::new(pool.clone(), 1000);
// Spawn the processor
let handle = tokio::spawn(processor);
let mut results = Vec::new();
for i in 0..10 {
let tx = MockTransaction::legacy().with_nonce(i).with_gas_price(100);
let (response_tx, response_rx) = tokio::sync::oneshot::channel();
let request = BatchTxRequest::new(tx, response_tx);
request_tx.send(request).expect("Could not send batch tx");
results.push(response_rx);
}
for res in results {
let result = timeout(Duration::from_millis(10), res)
.await
.expect("Timeout waiting for transaction result");
assert!(result.is_ok());
}
handle.abort();
}
#[tokio::test]
async fn test_max_batch_size() {
let pool = testing_pool();
let max_batch_size = 10;
let (processor, request_tx) = BatchTxProcessor::new(pool.clone(), max_batch_size);
// Spawn batch processor with threshold
let handle = tokio::spawn(processor);
let mut futures = FuturesUnordered::new();
for i in 0..max_batch_size {
let tx = MockTransaction::legacy().with_nonce(i as u64).with_gas_price(100);
let (response_tx, response_rx) = tokio::sync::oneshot::channel();
let request = BatchTxRequest::new(tx, response_tx);
let request_tx_clone = request_tx.clone();
let tx_fut = async move {
request_tx_clone.send(request).expect("Could not send batch tx");
response_rx.await.expect("Could not receive batch response")
};
futures.push(tx_fut);
}
while let Some(result) = timeout(Duration::from_millis(5), futures.next())
.await
.expect("Timeout waiting for transaction result")
{
assert!(result.is_ok());
}
handle.abort();
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/transaction-pool/src/lib.rs | crates/transaction-pool/src/lib.rs | //! Reth's transaction pool implementation.
//!
//! This crate provides a generic transaction pool implementation.
//!
//! ## Functionality
//!
//! The transaction pool is responsible for
//!
//! - recording incoming transactions
//! - providing existing transactions
//! - ordering and providing the best transactions for block production
//! - monitoring memory footprint and enforce pool size limits
//! - storing blob data for transactions in a separate blobstore on insertion
//!
//! ## Transaction Flow: From Network/RPC to Pool
//!
//! Transactions enter the pool through two main paths:
//!
//! ### 1. Network Path (P2P)
//!
//! ```text
//! Network Peer
//! β
//! Transactions or NewPooledTransactionHashes message
//! β
//! TransactionsManager (crates/net/network/src/transactions/mod.rs)
//! β
//! βββ For Transactions message:
//! β βββ Validates message format
//! β βββ Checks if transaction already known
//! β βββ Marks peer as having seen the transaction
//! β βββ Queues for import
//! β
//! βββ For NewPooledTransactionHashes message:
//! βββ Filters out already known transactions
//! βββ Queues unknown hashes for fetching
//! βββ Sends GetPooledTransactions request
//! βββ Receives PooledTransactions response
//! βββ Queues fetched transactions for import
//! β
//! pool.add_external_transactions() [Origin: External]
//! β
//! Transaction Validation & Pool Addition
//! ```
//!
//! ### 2. RPC Path (Local submission)
//!
//! ```text
//! eth_sendRawTransaction RPC call
//! βββ Decodes raw bytes
//! βββ Recovers sender
//! β
//! pool.add_transaction() [Origin: Local]
//! β
//! Transaction Validation & Pool Addition
//! ```
//!
//! ### Transaction Origins
//!
//! - **Local**: Transactions submitted via RPC (trusted, may have different fee requirements)
//! - **External**: Transactions from network peers (untrusted, subject to stricter validation)
//! - **Private**: Local transactions that should not be propagated to the network
//!
//! ## Validation Process
//!
//! ### Stateless Checks
//!
//! Ethereum transactions undergo several stateless checks:
//!
//! - **Transaction Type**: Fork-dependent support (Legacy always, EIP-2930/1559/4844/7702 need
//! activation)
//! - **Size**: Input data β€ 128KB (default)
//! - **Gas**: Limit β€ block gas limit
//! - **Fees**: Priority fee β€ max fee; local tx fee cap; external minimum priority fee
//! - **Chain ID**: Must match current chain
//! - **Intrinsic Gas**: Sufficient for data and access lists
//! - **Blobs** (EIP-4844): Valid count, KZG proofs
//!
//! ### Stateful Checks
//!
//! 1. **Sender**: No bytecode (unless EIP-7702 delegated in Prague)
//! 2. **Nonce**: β₯ account nonce
//! 3. **Balance**: Covers value + (`gas_limit` Γ `max_fee_per_gas`)
//!
//! ### Common Errors
//!
//! - [`NonceNotConsistent`](reth_primitives_traits::transaction::error::InvalidTransactionError::NonceNotConsistent): Nonce too low
//! - [`InsufficientFunds`](reth_primitives_traits::transaction::error::InvalidTransactionError::InsufficientFunds): Insufficient balance
//! - [`ExceedsGasLimit`](crate::error::InvalidPoolTransactionError::ExceedsGasLimit): Gas limit too
//! high
//! - [`SignerAccountHasBytecode`](reth_primitives_traits::transaction::error::InvalidTransactionError::SignerAccountHasBytecode): EOA has code
//! - [`Underpriced`](crate::error::InvalidPoolTransactionError::Underpriced): Fee too low
//! - [`ReplacementUnderpriced`](crate::error::PoolErrorKind::ReplacementUnderpriced): Replacement
//! transaction fee too low
//! - Blob errors:
//! - [`MissingEip4844BlobSidecar`](crate::error::Eip4844PoolTransactionError::MissingEip4844BlobSidecar): Missing sidecar
//! - [`InvalidEip4844Blob`](crate::error::Eip4844PoolTransactionError::InvalidEip4844Blob):
//! Invalid blob proofs
//! - [`NoEip4844Blobs`](crate::error::Eip4844PoolTransactionError::NoEip4844Blobs): EIP-4844
//! transaction without blobs
//! - [`TooManyEip4844Blobs`](crate::error::Eip4844PoolTransactionError::TooManyEip4844Blobs): Too
//! many blobs
//!
//! ## Subpool Design
//!
//! The pool maintains four distinct subpools, each serving a specific purpose
//!
//! ### Subpools
//!
//! 1. **Pending**: Ready for inclusion (no gaps, sufficient balance/fees)
//! 2. **Queued**: Future transactions (nonce gaps or insufficient balance)
//! 3. **`BaseFee`**: Valid but below current base fee
//! 4. **Blob**: EIP-4844 transactions not pending due to insufficient base fee or blob fee
//!
//! ### State Transitions
//!
//! Transactions move between subpools based on state changes:
//!
//! ```text
//! Queued ββββββββββ BaseFee/Blob βββββββββ Pending
//! β β β
//! β β β
//! ββββββββββββββββββββββ΄ββββββββββββββββββββββ
//! (demotions due to state changes)
//! ```
//!
//! **Promotions**: Nonce gaps filled, balance/fee improvements
//! **Demotions**: Nonce gaps created, balance/fee degradation
//!
//! ## Pool Maintenance
//!
//! 1. **Block Updates**: Removes mined txs, updates accounts/fees, triggers movements
//! 2. **Size Enforcement**: Discards worst transactions when limits exceeded
//! 3. **Propagation**: External (always), Local (configurable), Private (never)
//!
//! ## Assumptions
//!
//! ### Transaction type
//!
//! The pool expects certain ethereum related information from the generic transaction type of the
//! pool ([`PoolTransaction`]), this includes gas price, base fee (EIP-1559 transactions), nonce
//! etc. It makes no assumptions about the encoding format, but the transaction type must report its
//! size so pool size limits (memory) can be enforced.
//!
//! ### Transaction ordering
//!
//! The pending pool contains transactions that can be mined on the current state.
//! The order in which they're returned are determined by a `Priority` value returned by the
//! `TransactionOrdering` type this pool is configured with.
//!
//! This is only used in the _pending_ pool to yield the best transactions for block production. The
//! _base pool_ is ordered by base fee, and the _queued pool_ by current distance.
//!
//! ### Validation
//!
//! The pool itself does not validate incoming transactions, instead this should be provided by
//! implementing `TransactionsValidator`. Only transactions that the validator returns as valid are
//! included in the pool. It is assumed that transaction that are in the pool are either valid on
//! the current state or could become valid after certain state changes. Transactions that can never
//! become valid (e.g. nonce lower than current on chain nonce) will never be added to the pool and
//! instead are discarded right away.
//!
//! ### State Changes
//!
//! New blocks trigger pool updates via changesets (see Pool Maintenance).
//!
//! ## Implementation details
//!
//! The `TransactionPool` trait exposes all externally used functionality of the pool, such as
//! inserting, querying specific transactions by hash or retrieving the best transactions.
//! In addition, it enables the registration of event listeners that are notified of state changes.
//! Events are communicated via channels.
//!
//! ### Architecture
//!
//! The final `TransactionPool` is made up of two layers:
//!
//! The lowest layer is the actual pool implementations that manages (validated) transactions:
//! [`TxPool`](crate::pool::txpool::TxPool). This is contained in a higher level pool type that
//! guards the low level pool and handles additional listeners or metrics: [`PoolInner`].
//!
//! The transaction pool will be used by separate consumers (RPC, P2P), to make sharing easier, the
//! [`Pool`] type is just an `Arc` wrapper around `PoolInner`. This is the usable type that provides
//! the `TransactionPool` interface.
//!
//!
//! ## Blob Transactions
//!
//! Blob transaction can be quite large hence they are stored in a separate blobstore. The pool is
//! responsible for inserting blob data for new transactions into the blobstore.
//! See also [`ValidTransaction`](validate::ValidTransaction)
//!
//!
//! ## Examples
//!
//! Listen for new transactions and print them:
//!
//! ```
//! use reth_chainspec::MAINNET;
//! use reth_storage_api::StateProviderFactory;
//! use reth_tasks::TokioTaskExecutor;
//! use reth_chainspec::ChainSpecProvider;
//! use reth_transaction_pool::{TransactionValidationTaskExecutor, Pool, TransactionPool};
//! use reth_transaction_pool::blobstore::InMemoryBlobStore;
//! use reth_chainspec::EthereumHardforks;
//! async fn t<C>(client: C) where C: ChainSpecProvider<ChainSpec: EthereumHardforks> + StateProviderFactory + Clone + 'static{
//! let blob_store = InMemoryBlobStore::default();
//! let pool = Pool::eth_pool(
//! TransactionValidationTaskExecutor::eth(client, blob_store.clone(), TokioTaskExecutor::default()),
//! blob_store,
//! Default::default(),
//! );
//! let mut transactions = pool.pending_transactions_listener();
//! tokio::task::spawn( async move {
//! while let Some(tx) = transactions.recv().await {
//! println!("New transaction: {:?}", tx);
//! }
//! });
//!
//! // do something useful with the pool, like RPC integration
//!
//! # }
//! ```
//!
//! Spawn maintenance task to keep the pool updated
//!
//! ```
//! use futures_util::Stream;
//! use reth_chain_state::CanonStateNotification;
//! use reth_chainspec::{MAINNET, ChainSpecProvider, ChainSpec};
//! use reth_storage_api::{BlockReaderIdExt, StateProviderFactory};
//! use reth_tasks::TokioTaskExecutor;
//! use reth_tasks::TaskSpawner;
//! use reth_tasks::TaskManager;
//! use reth_transaction_pool::{TransactionValidationTaskExecutor, Pool};
//! use reth_transaction_pool::blobstore::InMemoryBlobStore;
//! use reth_transaction_pool::maintain::{maintain_transaction_pool_future};
//! use alloy_consensus::Header;
//!
//! async fn t<C, St>(client: C, stream: St)
//! where C: StateProviderFactory + BlockReaderIdExt<Header = Header> + ChainSpecProvider<ChainSpec = ChainSpec> + Clone + 'static,
//! St: Stream<Item = CanonStateNotification> + Send + Unpin + 'static,
//! {
//! let blob_store = InMemoryBlobStore::default();
//! let rt = tokio::runtime::Runtime::new().unwrap();
//! let manager = TaskManager::new(rt.handle().clone());
//! let executor = manager.executor();
//! let pool = Pool::eth_pool(
//! TransactionValidationTaskExecutor::eth(client.clone(), blob_store.clone(), executor.clone()),
//! blob_store,
//! Default::default(),
//! );
//!
//! // spawn a task that listens for new blocks and updates the pool's transactions, mined transactions etc..
//! tokio::task::spawn(maintain_transaction_pool_future(client, pool, stream, executor.clone(), Default::default()));
//!
//! # }
//! ```
//!
//! ## Feature Flags
//!
//! - `serde` (default): Enable serde support
//! - `test-utils`: Export utilities for testing
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
pub use crate::{
batcher::{BatchTxProcessor, BatchTxRequest},
blobstore::{BlobStore, BlobStoreError},
config::{
LocalTransactionConfig, PoolConfig, PriceBumpConfig, SubPoolLimit,
DEFAULT_MAX_INFLIGHT_DELEGATED_SLOTS, DEFAULT_PRICE_BUMP,
DEFAULT_TXPOOL_ADDITIONAL_VALIDATION_TASKS, MAX_NEW_PENDING_TXS_NOTIFICATIONS,
REPLACE_BLOB_PRICE_BUMP, TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER,
TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT, TXPOOL_SUBPOOL_MAX_TXS_DEFAULT,
},
error::PoolResult,
ordering::{CoinbaseTipOrdering, Priority, TransactionOrdering},
pool::{
blob_tx_priority, fee_delta, state::SubPool, AddedTransactionOutcome,
AllTransactionsEvents, FullTransactionEvent, NewTransactionEvent, TransactionEvent,
TransactionEvents, TransactionListenerKind,
},
traits::*,
validate::{
EthTransactionValidator, TransactionValidationOutcome, TransactionValidationTaskExecutor,
TransactionValidator, ValidPoolTransaction,
},
};
use crate::{identifier::TransactionId, pool::PoolInner};
use alloy_eips::{
eip4844::{BlobAndProofV1, BlobAndProofV2},
eip7594::BlobTransactionSidecarVariant,
};
use alloy_primitives::{Address, TxHash, B256, U256};
use aquamarine as _;
use reth_chainspec::{ChainSpecProvider, EthereumHardforks};
use reth_eth_wire_types::HandleMempoolData;
use reth_execution_types::ChangedAccount;
use reth_primitives_traits::{Block, Recovered};
use reth_storage_api::StateProviderFactory;
use std::{collections::HashSet, sync::Arc};
use tokio::sync::mpsc::Receiver;
use tracing::{instrument, trace};
pub mod error;
pub mod maintain;
pub mod metrics;
pub mod noop;
pub mod pool;
pub mod validate;
pub mod batcher;
pub mod blobstore;
mod config;
pub mod identifier;
mod ordering;
mod traits;
#[cfg(any(test, feature = "test-utils"))]
/// Common test helpers for mocking a pool
pub mod test_utils;
/// Type alias for default ethereum transaction pool
pub type EthTransactionPool<Client, S, T = EthPooledTransaction> = Pool<
TransactionValidationTaskExecutor<EthTransactionValidator<Client, T>>,
CoinbaseTipOrdering<T>,
S,
>;
/// A shareable, generic, customizable `TransactionPool` implementation.
#[derive(Debug)]
pub struct Pool<V, T: TransactionOrdering, S> {
/// Arc'ed instance of the pool internals
pool: Arc<PoolInner<V, T, S>>,
}
// === impl Pool ===
impl<V, T, S> Pool<V, T, S>
where
V: TransactionValidator,
T: TransactionOrdering<Transaction = <V as TransactionValidator>::Transaction>,
S: BlobStore,
{
/// Create a new transaction pool instance.
pub fn new(validator: V, ordering: T, blob_store: S, config: PoolConfig) -> Self {
Self { pool: Arc::new(PoolInner::new(validator, ordering, blob_store, config)) }
}
/// Returns the wrapped pool.
pub(crate) fn inner(&self) -> &PoolInner<V, T, S> {
&self.pool
}
/// Get the config the pool was configured with.
pub fn config(&self) -> &PoolConfig {
self.inner().config()
}
/// Validates the given transaction
async fn validate(
&self,
origin: TransactionOrigin,
transaction: V::Transaction,
) -> TransactionValidationOutcome<V::Transaction> {
self.pool.validator().validate_transaction(origin, transaction).await
}
/// Returns future that validates all transactions in the given iterator.
///
/// This returns the validated transactions in the iterator's order.
async fn validate_all(
&self,
origin: TransactionOrigin,
transactions: impl IntoIterator<Item = V::Transaction> + Send,
) -> Vec<TransactionValidationOutcome<V::Transaction>> {
self.pool.validator().validate_transactions_with_origin(origin, transactions).await
}
/// Validates all transactions with their individual origins.
///
/// This returns the validated transactions in the same order as input.
async fn validate_all_with_origins(
&self,
transactions: Vec<(TransactionOrigin, V::Transaction)>,
) -> Vec<(TransactionOrigin, TransactionValidationOutcome<V::Transaction>)> {
if transactions.len() == 1 {
let (origin, tx) = transactions.into_iter().next().unwrap();
let res = self.pool.validator().validate_transaction(origin, tx).await;
return vec![(origin, res)]
}
let origins: Vec<_> = transactions.iter().map(|(origin, _)| *origin).collect();
let tx_outcomes = self.pool.validator().validate_transactions(transactions).await;
origins.into_iter().zip(tx_outcomes).collect()
}
/// Number of transactions in the entire pool
pub fn len(&self) -> usize {
self.pool.len()
}
/// Whether the pool is empty
pub fn is_empty(&self) -> bool {
self.pool.is_empty()
}
/// Returns whether or not the pool is over its configured size and transaction count limits.
pub fn is_exceeded(&self) -> bool {
self.pool.is_exceeded()
}
/// Returns the configured blob store.
pub fn blob_store(&self) -> &S {
self.pool.blob_store()
}
}
impl<Client, S> EthTransactionPool<Client, S>
where
Client:
ChainSpecProvider<ChainSpec: EthereumHardforks> + StateProviderFactory + Clone + 'static,
S: BlobStore,
{
/// Returns a new [`Pool`] that uses the default [`TransactionValidationTaskExecutor`] when
/// validating [`EthPooledTransaction`]s and ords via [`CoinbaseTipOrdering`]
///
/// # Example
///
/// ```
/// use reth_chainspec::MAINNET;
/// use reth_storage_api::StateProviderFactory;
/// use reth_tasks::TokioTaskExecutor;
/// use reth_chainspec::ChainSpecProvider;
/// use reth_transaction_pool::{
/// blobstore::InMemoryBlobStore, Pool, TransactionValidationTaskExecutor,
/// };
/// use reth_chainspec::EthereumHardforks;
/// # fn t<C>(client: C) where C: ChainSpecProvider<ChainSpec: EthereumHardforks> + StateProviderFactory + Clone + 'static {
/// let blob_store = InMemoryBlobStore::default();
/// let pool = Pool::eth_pool(
/// TransactionValidationTaskExecutor::eth(
/// client,
/// blob_store.clone(),
/// TokioTaskExecutor::default(),
/// ),
/// blob_store,
/// Default::default(),
/// );
/// # }
/// ```
pub fn eth_pool(
validator: TransactionValidationTaskExecutor<
EthTransactionValidator<Client, EthPooledTransaction>,
>,
blob_store: S,
config: PoolConfig,
) -> Self {
Self::new(validator, CoinbaseTipOrdering::default(), blob_store, config)
}
}
/// implements the `TransactionPool` interface for various transaction pool API consumers.
impl<V, T, S> TransactionPool for Pool<V, T, S>
where
V: TransactionValidator,
<V as TransactionValidator>::Transaction: EthPoolTransaction,
T: TransactionOrdering<Transaction = <V as TransactionValidator>::Transaction>,
S: BlobStore,
{
type Transaction = T::Transaction;
fn pool_size(&self) -> PoolSize {
self.pool.size()
}
fn block_info(&self) -> BlockInfo {
self.pool.block_info()
}
async fn add_transaction_and_subscribe(
&self,
origin: TransactionOrigin,
transaction: Self::Transaction,
) -> PoolResult<TransactionEvents> {
let tx = self.validate(origin, transaction).await;
self.pool.add_transaction_and_subscribe(origin, tx)
}
async fn add_transaction(
&self,
origin: TransactionOrigin,
transaction: Self::Transaction,
) -> PoolResult<AddedTransactionOutcome> {
let tx = self.validate(origin, transaction).await;
let mut results = self.pool.add_transactions(origin, std::iter::once(tx));
results.pop().expect("result length is the same as the input")
}
async fn add_transactions(
&self,
origin: TransactionOrigin,
transactions: Vec<Self::Transaction>,
) -> Vec<PoolResult<AddedTransactionOutcome>> {
if transactions.is_empty() {
return Vec::new()
}
let validated = self.validate_all(origin, transactions).await;
self.pool.add_transactions(origin, validated.into_iter())
}
async fn add_transactions_with_origins(
&self,
transactions: Vec<(TransactionOrigin, Self::Transaction)>,
) -> Vec<PoolResult<AddedTransactionOutcome>> {
if transactions.is_empty() {
return Vec::new()
}
let validated = self.validate_all_with_origins(transactions).await;
self.pool.add_transactions_with_origins(validated)
}
fn transaction_event_listener(&self, tx_hash: TxHash) -> Option<TransactionEvents> {
self.pool.add_transaction_event_listener(tx_hash)
}
fn all_transactions_event_listener(&self) -> AllTransactionsEvents<Self::Transaction> {
self.pool.add_all_transactions_event_listener()
}
fn pending_transactions_listener_for(&self, kind: TransactionListenerKind) -> Receiver<TxHash> {
self.pool.add_pending_listener(kind)
}
fn blob_transaction_sidecars_listener(&self) -> Receiver<NewBlobSidecar> {
self.pool.add_blob_sidecar_listener()
}
fn new_transactions_listener_for(
&self,
kind: TransactionListenerKind,
) -> Receiver<NewTransactionEvent<Self::Transaction>> {
self.pool.add_new_transaction_listener(kind)
}
fn pooled_transaction_hashes(&self) -> Vec<TxHash> {
self.pool.pooled_transactions_hashes()
}
fn pooled_transaction_hashes_max(&self, max: usize) -> Vec<TxHash> {
self.pooled_transaction_hashes().into_iter().take(max).collect()
}
fn pooled_transactions(&self) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
self.pool.pooled_transactions()
}
fn pooled_transactions_max(
&self,
max: usize,
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
self.pool.pooled_transactions_max(max)
}
fn get_pooled_transaction_elements(
&self,
tx_hashes: Vec<TxHash>,
limit: GetPooledTransactionLimit,
) -> Vec<<<V as TransactionValidator>::Transaction as PoolTransaction>::Pooled> {
self.pool.get_pooled_transaction_elements(tx_hashes, limit)
}
fn get_pooled_transaction_element(
&self,
tx_hash: TxHash,
) -> Option<Recovered<<<V as TransactionValidator>::Transaction as PoolTransaction>::Pooled>>
{
self.pool.get_pooled_transaction_element(tx_hash)
}
fn best_transactions(
&self,
) -> Box<dyn BestTransactions<Item = Arc<ValidPoolTransaction<Self::Transaction>>>> {
Box::new(self.pool.best_transactions())
}
fn best_transactions_with_attributes(
&self,
best_transactions_attributes: BestTransactionsAttributes,
) -> Box<dyn BestTransactions<Item = Arc<ValidPoolTransaction<Self::Transaction>>>> {
self.pool.best_transactions_with_attributes(best_transactions_attributes)
}
fn pending_transactions(&self) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
self.pool.pending_transactions()
}
fn pending_transactions_max(
&self,
max: usize,
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
self.pool.pending_transactions_max(max)
}
fn queued_transactions(&self) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
self.pool.queued_transactions()
}
fn pending_and_queued_txn_count(&self) -> (usize, usize) {
let data = self.pool.get_pool_data();
let pending = data.pending_transactions_count();
let queued = data.queued_transactions_count();
(pending, queued)
}
fn all_transactions(&self) -> AllPoolTransactions<Self::Transaction> {
self.pool.all_transactions()
}
fn all_transaction_hashes(&self) -> Vec<TxHash> {
self.pool.all_transaction_hashes()
}
fn remove_transactions(
&self,
hashes: Vec<TxHash>,
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
self.pool.remove_transactions(hashes)
}
fn remove_transactions_and_descendants(
&self,
hashes: Vec<TxHash>,
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
self.pool.remove_transactions_and_descendants(hashes)
}
fn remove_transactions_by_sender(
&self,
sender: Address,
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
self.pool.remove_transactions_by_sender(sender)
}
fn retain_unknown<A>(&self, announcement: &mut A)
where
A: HandleMempoolData,
{
self.pool.retain_unknown(announcement)
}
fn get(&self, tx_hash: &TxHash) -> Option<Arc<ValidPoolTransaction<Self::Transaction>>> {
self.inner().get(tx_hash)
}
fn get_all(&self, txs: Vec<TxHash>) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
self.inner().get_all(txs)
}
fn on_propagated(&self, txs: PropagatedTransactions) {
self.inner().on_propagated(txs)
}
fn get_transactions_by_sender(
&self,
sender: Address,
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
self.pool.get_transactions_by_sender(sender)
}
fn get_pending_transactions_with_predicate(
&self,
predicate: impl FnMut(&ValidPoolTransaction<Self::Transaction>) -> bool,
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
self.pool.pending_transactions_with_predicate(predicate)
}
fn get_pending_transactions_by_sender(
&self,
sender: Address,
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
self.pool.get_pending_transactions_by_sender(sender)
}
fn get_queued_transactions_by_sender(
&self,
sender: Address,
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
self.pool.get_queued_transactions_by_sender(sender)
}
fn get_highest_transaction_by_sender(
&self,
sender: Address,
) -> Option<Arc<ValidPoolTransaction<Self::Transaction>>> {
self.pool.get_highest_transaction_by_sender(sender)
}
fn get_highest_consecutive_transaction_by_sender(
&self,
sender: Address,
on_chain_nonce: u64,
) -> Option<Arc<ValidPoolTransaction<Self::Transaction>>> {
self.pool.get_highest_consecutive_transaction_by_sender(sender, on_chain_nonce)
}
fn get_transaction_by_sender_and_nonce(
&self,
sender: Address,
nonce: u64,
) -> Option<Arc<ValidPoolTransaction<Self::Transaction>>> {
let transaction_id = TransactionId::new(self.pool.get_sender_id(sender), nonce);
self.inner().get_pool_data().all().get(&transaction_id).map(|tx| tx.transaction.clone())
}
fn get_transactions_by_origin(
&self,
origin: TransactionOrigin,
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
self.pool.get_transactions_by_origin(origin)
}
/// Returns all pending transactions filtered by [`TransactionOrigin`]
fn get_pending_transactions_by_origin(
&self,
origin: TransactionOrigin,
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
self.pool.get_pending_transactions_by_origin(origin)
}
fn unique_senders(&self) -> HashSet<Address> {
self.pool.unique_senders()
}
fn get_blob(
&self,
tx_hash: TxHash,
) -> Result<Option<Arc<BlobTransactionSidecarVariant>>, BlobStoreError> {
self.pool.blob_store().get(tx_hash)
}
fn get_all_blobs(
&self,
tx_hashes: Vec<TxHash>,
) -> Result<Vec<(TxHash, Arc<BlobTransactionSidecarVariant>)>, BlobStoreError> {
self.pool.blob_store().get_all(tx_hashes)
}
fn get_all_blobs_exact(
&self,
tx_hashes: Vec<TxHash>,
) -> Result<Vec<Arc<BlobTransactionSidecarVariant>>, BlobStoreError> {
self.pool.blob_store().get_exact(tx_hashes)
}
fn get_blobs_for_versioned_hashes_v1(
&self,
versioned_hashes: &[B256],
) -> Result<Vec<Option<BlobAndProofV1>>, BlobStoreError> {
self.pool.blob_store().get_by_versioned_hashes_v1(versioned_hashes)
}
fn get_blobs_for_versioned_hashes_v2(
&self,
versioned_hashes: &[B256],
) -> Result<Option<Vec<BlobAndProofV2>>, BlobStoreError> {
self.pool.blob_store().get_by_versioned_hashes_v2(versioned_hashes)
}
}
impl<V, T, S> TransactionPoolExt for Pool<V, T, S>
where
V: TransactionValidator,
<V as TransactionValidator>::Transaction: EthPoolTransaction,
T: TransactionOrdering<Transaction = <V as TransactionValidator>::Transaction>,
S: BlobStore,
{
#[instrument(skip(self), target = "txpool")]
fn set_block_info(&self, info: BlockInfo) {
trace!(target: "txpool", "updating pool block info");
self.pool.set_block_info(info)
}
fn on_canonical_state_change<B>(&self, update: CanonicalStateUpdate<'_, B>)
where
B: Block,
{
self.pool.on_canonical_state_change(update);
}
fn update_accounts(&self, accounts: Vec<ChangedAccount>) {
self.pool.update_accounts(accounts);
}
fn delete_blob(&self, tx: TxHash) {
self.pool.delete_blob(tx)
}
fn delete_blobs(&self, txs: Vec<TxHash>) {
self.pool.delete_blobs(txs)
}
fn cleanup_blobs(&self) {
self.pool.cleanup_blobs()
}
}
impl<V, T: TransactionOrdering, S> Clone for Pool<V, T, S> {
fn clone(&self) -> Self {
Self { pool: Arc::clone(&self.pool) }
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/transaction-pool/src/noop.rs | crates/transaction-pool/src/noop.rs | //! A transaction pool implementation that does nothing.
//!
//! This is useful for wiring components together that don't require an actual pool but still need
//! to be generic over it.
use crate::{
blobstore::BlobStoreError,
error::{InvalidPoolTransactionError, PoolError},
pool::TransactionListenerKind,
traits::{BestTransactionsAttributes, GetPooledTransactionLimit, NewBlobSidecar},
validate::ValidTransaction,
AddedTransactionOutcome, AllPoolTransactions, AllTransactionsEvents, BestTransactions,
BlockInfo, EthPoolTransaction, EthPooledTransaction, NewTransactionEvent, PoolResult, PoolSize,
PoolTransaction, PropagatedTransactions, TransactionEvents, TransactionOrigin, TransactionPool,
TransactionValidationOutcome, TransactionValidator, ValidPoolTransaction,
};
use alloy_eips::{
eip1559::ETHEREUM_BLOCK_GAS_LIMIT_30M,
eip4844::{BlobAndProofV1, BlobAndProofV2},
eip7594::BlobTransactionSidecarVariant,
};
use alloy_primitives::{Address, TxHash, B256, U256};
use reth_eth_wire_types::HandleMempoolData;
use reth_primitives_traits::Recovered;
use std::{collections::HashSet, marker::PhantomData, sync::Arc};
use tokio::sync::{mpsc, mpsc::Receiver};
/// A [`TransactionPool`] implementation that does nothing.
///
/// All transactions are rejected and no events are emitted.
/// This type will never hold any transactions and is only useful for wiring components together.
#[derive(Debug, Clone)]
#[non_exhaustive]
pub struct NoopTransactionPool<T = EthPooledTransaction> {
/// Type marker
_marker: PhantomData<T>,
}
impl<T> NoopTransactionPool<T> {
/// Creates a new [`NoopTransactionPool`].
pub fn new() -> Self {
Self { _marker: Default::default() }
}
}
impl Default for NoopTransactionPool<EthPooledTransaction> {
fn default() -> Self {
Self { _marker: Default::default() }
}
}
impl<T: EthPoolTransaction> TransactionPool for NoopTransactionPool<T> {
type Transaction = T;
fn pool_size(&self) -> PoolSize {
Default::default()
}
fn block_info(&self) -> BlockInfo {
BlockInfo {
block_gas_limit: ETHEREUM_BLOCK_GAS_LIMIT_30M,
last_seen_block_hash: Default::default(),
last_seen_block_number: 0,
pending_basefee: 0,
pending_blob_fee: None,
}
}
async fn add_transaction_and_subscribe(
&self,
_origin: TransactionOrigin,
transaction: Self::Transaction,
) -> PoolResult<TransactionEvents> {
let hash = *transaction.hash();
Err(PoolError::other(hash, Box::new(NoopInsertError::new(transaction))))
}
async fn add_transaction(
&self,
_origin: TransactionOrigin,
transaction: Self::Transaction,
) -> PoolResult<AddedTransactionOutcome> {
let hash = *transaction.hash();
Err(PoolError::other(hash, Box::new(NoopInsertError::new(transaction))))
}
async fn add_transactions(
&self,
_origin: TransactionOrigin,
transactions: Vec<Self::Transaction>,
) -> Vec<PoolResult<AddedTransactionOutcome>> {
transactions
.into_iter()
.map(|transaction| {
let hash = *transaction.hash();
Err(PoolError::other(hash, Box::new(NoopInsertError::new(transaction))))
})
.collect()
}
async fn add_transactions_with_origins(
&self,
transactions: Vec<(TransactionOrigin, Self::Transaction)>,
) -> Vec<PoolResult<AddedTransactionOutcome>> {
transactions
.into_iter()
.map(|(_, transaction)| {
let hash = *transaction.hash();
Err(PoolError::other(hash, Box::new(NoopInsertError::new(transaction))))
})
.collect()
}
fn transaction_event_listener(&self, _tx_hash: TxHash) -> Option<TransactionEvents> {
None
}
fn all_transactions_event_listener(&self) -> AllTransactionsEvents<Self::Transaction> {
AllTransactionsEvents::new(mpsc::channel(1).1)
}
fn pending_transactions_listener_for(
&self,
_kind: TransactionListenerKind,
) -> Receiver<TxHash> {
mpsc::channel(1).1
}
fn new_transactions_listener(&self) -> Receiver<NewTransactionEvent<Self::Transaction>> {
mpsc::channel(1).1
}
fn blob_transaction_sidecars_listener(&self) -> Receiver<NewBlobSidecar> {
mpsc::channel(1).1
}
fn new_transactions_listener_for(
&self,
_kind: TransactionListenerKind,
) -> Receiver<NewTransactionEvent<Self::Transaction>> {
mpsc::channel(1).1
}
fn pooled_transaction_hashes(&self) -> Vec<TxHash> {
vec![]
}
fn pooled_transaction_hashes_max(&self, _max: usize) -> Vec<TxHash> {
vec![]
}
fn pooled_transactions(&self) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
vec![]
}
fn pooled_transactions_max(
&self,
_max: usize,
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
vec![]
}
fn get_pooled_transaction_elements(
&self,
_tx_hashes: Vec<TxHash>,
_limit: GetPooledTransactionLimit,
) -> Vec<<Self::Transaction as PoolTransaction>::Pooled> {
vec![]
}
fn get_pooled_transaction_element(
&self,
_tx_hash: TxHash,
) -> Option<Recovered<<Self::Transaction as PoolTransaction>::Pooled>> {
None
}
fn best_transactions(
&self,
) -> Box<dyn BestTransactions<Item = Arc<ValidPoolTransaction<Self::Transaction>>>> {
Box::new(std::iter::empty())
}
fn best_transactions_with_attributes(
&self,
_: BestTransactionsAttributes,
) -> Box<dyn BestTransactions<Item = Arc<ValidPoolTransaction<Self::Transaction>>>> {
Box::new(std::iter::empty())
}
fn pending_transactions(&self) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
vec![]
}
fn pending_transactions_max(
&self,
_max: usize,
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
vec![]
}
fn queued_transactions(&self) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
vec![]
}
fn pending_and_queued_txn_count(&self) -> (usize, usize) {
(0, 0)
}
fn all_transactions(&self) -> AllPoolTransactions<Self::Transaction> {
AllPoolTransactions::default()
}
fn all_transaction_hashes(&self) -> Vec<TxHash> {
vec![]
}
fn remove_transactions(
&self,
_hashes: Vec<TxHash>,
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
vec![]
}
fn remove_transactions_and_descendants(
&self,
_hashes: Vec<TxHash>,
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
vec![]
}
fn remove_transactions_by_sender(
&self,
_sender: Address,
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
vec![]
}
fn retain_unknown<A>(&self, _announcement: &mut A)
where
A: HandleMempoolData,
{
}
fn get(&self, _tx_hash: &TxHash) -> Option<Arc<ValidPoolTransaction<Self::Transaction>>> {
None
}
fn get_all(&self, _txs: Vec<TxHash>) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
vec![]
}
fn on_propagated(&self, _txs: PropagatedTransactions) {}
fn get_transactions_by_sender(
&self,
_sender: Address,
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
vec![]
}
fn get_pending_transactions_with_predicate(
&self,
_predicate: impl FnMut(&ValidPoolTransaction<Self::Transaction>) -> bool,
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
vec![]
}
fn get_pending_transactions_by_sender(
&self,
_sender: Address,
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
vec![]
}
fn get_queued_transactions_by_sender(
&self,
_sender: Address,
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
vec![]
}
fn get_highest_transaction_by_sender(
&self,
_sender: Address,
) -> Option<Arc<ValidPoolTransaction<Self::Transaction>>> {
None
}
fn get_highest_consecutive_transaction_by_sender(
&self,
_sender: Address,
_on_chain_nonce: u64,
) -> Option<Arc<ValidPoolTransaction<Self::Transaction>>> {
None
}
fn get_transaction_by_sender_and_nonce(
&self,
_sender: Address,
_nonce: u64,
) -> Option<Arc<ValidPoolTransaction<Self::Transaction>>> {
None
}
fn get_transactions_by_origin(
&self,
_origin: TransactionOrigin,
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
vec![]
}
fn get_pending_transactions_by_origin(
&self,
_origin: TransactionOrigin,
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
vec![]
}
fn unique_senders(&self) -> HashSet<Address> {
Default::default()
}
fn get_blob(
&self,
_tx_hash: TxHash,
) -> Result<Option<Arc<BlobTransactionSidecarVariant>>, BlobStoreError> {
Ok(None)
}
fn get_all_blobs(
&self,
_tx_hashes: Vec<TxHash>,
) -> Result<Vec<(TxHash, Arc<BlobTransactionSidecarVariant>)>, BlobStoreError> {
Ok(vec![])
}
fn get_all_blobs_exact(
&self,
tx_hashes: Vec<TxHash>,
) -> Result<Vec<Arc<BlobTransactionSidecarVariant>>, BlobStoreError> {
if tx_hashes.is_empty() {
return Ok(vec![])
}
Err(BlobStoreError::MissingSidecar(tx_hashes[0]))
}
fn get_blobs_for_versioned_hashes_v1(
&self,
versioned_hashes: &[B256],
) -> Result<Vec<Option<BlobAndProofV1>>, BlobStoreError> {
Ok(vec![None; versioned_hashes.len()])
}
fn get_blobs_for_versioned_hashes_v2(
&self,
_versioned_hashes: &[B256],
) -> Result<Option<Vec<BlobAndProofV2>>, BlobStoreError> {
Ok(None)
}
}
/// A [`TransactionValidator`] that does nothing.
#[derive(Debug, Clone)]
#[non_exhaustive]
pub struct MockTransactionValidator<T> {
propagate_local: bool,
return_invalid: bool,
_marker: PhantomData<T>,
}
impl<T: EthPoolTransaction> TransactionValidator for MockTransactionValidator<T> {
type Transaction = T;
async fn validate_transaction(
&self,
origin: TransactionOrigin,
mut transaction: Self::Transaction,
) -> TransactionValidationOutcome<Self::Transaction> {
if self.return_invalid {
return TransactionValidationOutcome::Invalid(
transaction,
InvalidPoolTransactionError::Underpriced,
);
}
let maybe_sidecar = transaction.take_blob().maybe_sidecar().cloned();
// we return `balance: U256::MAX` to simulate a valid transaction which will never go into
// overdraft
TransactionValidationOutcome::Valid {
balance: U256::MAX,
state_nonce: 0,
bytecode_hash: None,
transaction: ValidTransaction::new(transaction, maybe_sidecar),
propagate: match origin {
TransactionOrigin::External => true,
TransactionOrigin::Local => self.propagate_local,
TransactionOrigin::Private => false,
},
authorities: None,
}
}
}
impl<T> MockTransactionValidator<T> {
/// Creates a new [`MockTransactionValidator`] that does not allow local transactions to be
/// propagated.
pub fn no_propagate_local() -> Self {
Self { propagate_local: false, return_invalid: false, _marker: Default::default() }
}
/// Creates a new [`MockTransactionValidator`] that always return a invalid outcome.
pub fn return_invalid() -> Self {
Self { propagate_local: false, return_invalid: true, _marker: Default::default() }
}
}
impl<T> Default for MockTransactionValidator<T> {
fn default() -> Self {
Self { propagate_local: true, return_invalid: false, _marker: Default::default() }
}
}
/// An error that contains the transaction that failed to be inserted into the noop pool.
#[derive(Debug, Clone, thiserror::Error)]
#[error("can't insert transaction into the noop pool that does nothing")]
pub struct NoopInsertError<T: EthPoolTransaction = EthPooledTransaction> {
tx: T,
}
impl<T: EthPoolTransaction> NoopInsertError<T> {
const fn new(tx: T) -> Self {
Self { tx }
}
/// Returns the transaction that failed to be inserted.
pub fn into_inner(self) -> T {
self.tx
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/transaction-pool/src/ordering.rs | crates/transaction-pool/src/ordering.rs | use crate::traits::PoolTransaction;
use alloy_primitives::U256;
use std::{cmp::Ordering, fmt::Debug, marker::PhantomData};
/// Priority of the transaction that can be missing.
///
/// Transactions with missing priorities are ranked lower.
#[derive(PartialEq, Eq, Clone, Debug)]
pub enum Priority<T: Ord + Clone> {
/// The value of the priority of the transaction.
Value(T),
/// Missing priority due to ordering internals.
None,
}
impl<T: Ord + Clone> From<Option<T>> for Priority<T> {
fn from(value: Option<T>) -> Self {
value.map_or(Self::None, Priority::Value)
}
}
impl<T: Ord + Clone> PartialOrd for Priority<T> {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl<T: Ord + Clone> Ord for Priority<T> {
fn cmp(&self, other: &Self) -> Ordering {
match (self, other) {
(Self::Value(a), Self::Value(b)) => a.cmp(b),
// Note: None should be smaller than Value.
(Self::Value(_), Self::None) => Ordering::Greater,
(Self::None, Self::Value(_)) => Ordering::Less,
(Self::None, Self::None) => Ordering::Equal,
}
}
}
/// Transaction ordering trait to determine the order of transactions.
///
/// Decides how transactions should be ordered within the pool, depending on a `Priority` value.
///
/// The returned priority must reflect [total order](https://en.wikipedia.org/wiki/Total_order).
pub trait TransactionOrdering: Debug + Send + Sync + 'static {
/// Priority of a transaction.
///
/// Higher is better.
type PriorityValue: Ord + Clone + Default + Debug + Send + Sync;
/// The transaction type to determine the priority of.
type Transaction: PoolTransaction;
/// Returns the priority score for the given transaction.
fn priority(
&self,
transaction: &Self::Transaction,
base_fee: u64,
) -> Priority<Self::PriorityValue>;
}
/// Default ordering for the pool.
///
/// The transactions are ordered by their coinbase tip.
/// The higher the coinbase tip is, the higher the priority of the transaction.
#[derive(Debug)]
#[non_exhaustive]
pub struct CoinbaseTipOrdering<T>(PhantomData<T>);
impl<T> TransactionOrdering for CoinbaseTipOrdering<T>
where
T: PoolTransaction + 'static,
{
type PriorityValue = U256;
type Transaction = T;
/// Source: <https://github.com/ethereum/go-ethereum/blob/7f756dc1185d7f1eeeacb1d12341606b7135f9ea/core/txpool/legacypool/list.go#L469-L482>.
///
/// NOTE: The implementation is incomplete for missing base fee.
fn priority(
&self,
transaction: &Self::Transaction,
base_fee: u64,
) -> Priority<Self::PriorityValue> {
transaction.effective_tip_per_gas(base_fee).map(U256::from).into()
}
}
impl<T> Default for CoinbaseTipOrdering<T> {
fn default() -> Self {
Self(Default::default())
}
}
impl<T> Clone for CoinbaseTipOrdering<T> {
fn clone(&self) -> Self {
Self::default()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_priority_ordering() {
let p1 = Priority::Value(3);
let p2 = Priority::Value(1);
let p3 = Priority::None;
assert!(p1 > p2); // 3 > 1
assert!(p1 > p3); // Value(3) > None
assert!(p2 > p3); // Value(1) > None
assert_eq!(p3, Priority::None);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/transaction-pool/src/error.rs | crates/transaction-pool/src/error.rs | //! Transaction pool errors
use std::any::Any;
use alloy_eips::eip4844::BlobTransactionValidationError;
use alloy_primitives::{Address, TxHash, U256};
use reth_primitives_traits::transaction::error::InvalidTransactionError;
/// Transaction pool result type.
pub type PoolResult<T> = Result<T, PoolError>;
/// A trait for additional errors that can be thrown by the transaction pool.
///
/// For example during validation
/// [`TransactionValidator::validate_transaction`](crate::validate::TransactionValidator::validate_transaction)
pub trait PoolTransactionError: core::error::Error + Send + Sync {
/// Returns `true` if the error was caused by a transaction that is considered bad in the
/// context of the transaction pool and warrants peer penalization.
///
/// See [`PoolError::is_bad_transaction`].
fn is_bad_transaction(&self) -> bool;
/// Returns a reference to `self` as a `&dyn Any`, enabling downcasting.
fn as_any(&self) -> &dyn Any;
}
// Needed for `#[error(transparent)]`
impl core::error::Error for Box<dyn PoolTransactionError> {
fn source(&self) -> Option<&(dyn core::error::Error + 'static)> {
(**self).source()
}
}
/// Transaction pool error.
#[derive(Debug, thiserror::Error)]
#[error("[{hash}]: {kind}")]
pub struct PoolError {
/// The transaction hash that caused the error.
pub hash: TxHash,
/// The error kind.
pub kind: PoolErrorKind,
}
/// Transaction pool error kind.
#[derive(Debug, thiserror::Error)]
pub enum PoolErrorKind {
/// Same transaction already imported
#[error("already imported")]
AlreadyImported,
/// Thrown if a replacement transaction's gas price is below the already imported transaction
#[error("insufficient gas price to replace existing transaction")]
ReplacementUnderpriced,
/// The fee cap of the transaction is below the minimum fee cap determined by the protocol
#[error("transaction feeCap {0} below chain minimum")]
FeeCapBelowMinimumProtocolFeeCap(u128),
/// Thrown when the number of unique transactions of a sender exceeded the slot capacity.
#[error("rejected due to {0} being identified as a spammer")]
SpammerExceededCapacity(Address),
/// Thrown when a new transaction is added to the pool, but then immediately discarded to
/// respect the size limits of the pool.
#[error("transaction discarded outright due to pool size constraints")]
DiscardedOnInsert,
/// Thrown when the transaction is considered invalid.
#[error(transparent)]
InvalidTransaction(#[from] InvalidPoolTransactionError),
/// Thrown if the mutual exclusivity constraint (blob vs normal transaction) is violated.
#[error("transaction type {1} conflicts with existing transaction for {0}")]
ExistingConflictingTransactionType(Address, u8),
/// Any other error that occurred while inserting/validating a transaction. e.g. IO database
/// error
#[error(transparent)]
Other(#[from] Box<dyn core::error::Error + Send + Sync>),
}
// === impl PoolError ===
impl PoolError {
/// Creates a new pool error.
pub fn new(hash: TxHash, kind: impl Into<PoolErrorKind>) -> Self {
Self { hash, kind: kind.into() }
}
/// Creates a new pool error with the `Other` kind.
pub fn other(
hash: TxHash,
error: impl Into<Box<dyn core::error::Error + Send + Sync>>,
) -> Self {
Self { hash, kind: PoolErrorKind::Other(error.into()) }
}
/// Returns `true` if the error was caused by a transaction that is considered bad in the
/// context of the transaction pool and warrants peer penalization.
///
/// Not all error variants are caused by the incorrect composition of the transaction (See also
/// [`InvalidPoolTransactionError`]) and can be caused by the current state of the transaction
/// pool. For example the transaction pool is already full or the error was caused by an
/// internal error, such as database errors.
///
/// This function returns true only if the transaction will never make it into the pool because
/// its composition is invalid and the original sender should have detected this as well. This
/// is used to determine whether the original sender should be penalized for sending an
/// erroneous transaction.
#[inline]
pub fn is_bad_transaction(&self) -> bool {
#[expect(clippy::match_same_arms)]
match &self.kind {
PoolErrorKind::AlreadyImported => {
// already imported but not bad
false
}
PoolErrorKind::ReplacementUnderpriced => {
// already imported but not bad
false
}
PoolErrorKind::FeeCapBelowMinimumProtocolFeeCap(_) => {
// fee cap of the tx below the technical minimum determined by the protocol, see
// [MINIMUM_PROTOCOL_FEE_CAP](alloy_primitives::constants::MIN_PROTOCOL_BASE_FEE)
// although this transaction will always be invalid, we do not want to penalize the
// sender because this check simply could not be implemented by the client
false
}
PoolErrorKind::SpammerExceededCapacity(_) => {
// the sender exceeded the slot capacity, we should not penalize the peer for
// sending the tx because we don't know if all the transactions are sent from the
// same peer, there's also a chance that old transactions haven't been cleared yet
// (pool lags behind) and old transaction still occupy a slot in the pool
false
}
PoolErrorKind::DiscardedOnInsert => {
// valid tx but dropped due to size constraints
false
}
PoolErrorKind::InvalidTransaction(err) => {
// transaction rejected because it violates constraints
err.is_bad_transaction()
}
PoolErrorKind::Other(_) => {
// internal error unrelated to the transaction
false
}
PoolErrorKind::ExistingConflictingTransactionType(_, _) => {
// this is not a protocol error but an implementation error since the pool enforces
// exclusivity (blob vs normal tx) for all senders
false
}
}
}
}
/// Represents all errors that can happen when validating transactions for the pool for EIP-4844
/// transactions
#[derive(Debug, thiserror::Error)]
pub enum Eip4844PoolTransactionError {
/// Thrown if we're unable to find the blob for a transaction that was previously extracted
#[error("blob sidecar not found for EIP4844 transaction")]
MissingEip4844BlobSidecar,
/// Thrown if an EIP-4844 transaction without any blobs arrives
#[error("blobless blob transaction")]
NoEip4844Blobs,
/// Thrown if an EIP-4844 transaction without any blobs arrives
#[error("too many blobs in transaction: have {have}, permitted {permitted}")]
TooManyEip4844Blobs {
/// Number of blobs the transaction has
have: u64,
/// Number of maximum blobs the transaction can have
permitted: u64,
},
/// Thrown if validating the blob sidecar for the transaction failed.
#[error(transparent)]
InvalidEip4844Blob(BlobTransactionValidationError),
/// EIP-4844 transactions are only accepted if they're gapless, meaning the previous nonce of
/// the transaction (`tx.nonce -1`) must either be in the pool or match the on chain nonce of
/// the sender.
///
/// This error is thrown on validation if a valid blob transaction arrives with a nonce that
/// would introduce gap in the nonce sequence.
#[error("nonce too high")]
Eip4844NonceGap,
/// Thrown if blob transaction has an EIP-7594 style sidecar before Osaka.
#[error("unexpected eip-7594 sidecar before osaka")]
UnexpectedEip7594SidecarBeforeOsaka,
/// Thrown if blob transaction has an EIP-4844 style sidecar after Osaka.
#[error("unexpected eip-4844 sidecar after osaka")]
UnexpectedEip4844SidecarAfterOsaka,
}
/// Represents all errors that can happen when validating transactions for the pool for EIP-7702
/// transactions
#[derive(Debug, thiserror::Error)]
pub enum Eip7702PoolTransactionError {
/// Thrown if the transaction has no items in its authorization list
#[error("no items in authorization list for EIP7702 transaction")]
MissingEip7702AuthorizationList,
/// Returned when a transaction with a nonce
/// gap is received from accounts with a deployed delegation or pending delegation.
#[error("gapped-nonce tx from delegated accounts")]
OutOfOrderTxFromDelegated,
/// Returned when the maximum number of in-flight
/// transactions is reached for specific accounts.
#[error("in-flight transaction limit reached for delegated accounts")]
InflightTxLimitReached,
/// Returned if a transaction has an authorization
/// signed by an address which already has in-flight transactions known to the
/// pool.
#[error("authority already reserved")]
AuthorityReserved,
}
/// Represents errors that can happen when validating transactions for the pool
///
/// See [`TransactionValidator`](crate::TransactionValidator).
#[derive(Debug, thiserror::Error)]
pub enum InvalidPoolTransactionError {
/// Hard consensus errors
#[error(transparent)]
Consensus(#[from] InvalidTransactionError),
/// Thrown when a new transaction is added to the pool, but then immediately discarded to
/// respect the size limits of the pool.
#[error("transaction's gas limit {0} exceeds block's gas limit {1}")]
ExceedsGasLimit(u64, u64),
/// Thrown when a transaction's gas limit exceeds the configured maximum per-transaction limit.
#[error("transaction's gas limit {0} exceeds maximum per-transaction gas limit {1}")]
MaxTxGasLimitExceeded(u64, u64),
/// Thrown when a new transaction is added to the pool, but then immediately discarded to
/// respect the tx fee exceeds the configured cap
#[error("tx fee ({max_tx_fee_wei} wei) exceeds the configured cap ({tx_fee_cap_wei} wei)")]
ExceedsFeeCap {
/// max fee in wei of new tx submitted to the pull (e.g. 0.11534 ETH)
max_tx_fee_wei: u128,
/// configured tx fee cap in wei (e.g. 1.0 ETH)
tx_fee_cap_wei: u128,
},
/// Thrown when a new transaction is added to the pool, but then immediately discarded to
/// respect the `max_init_code_size`.
#[error("transaction's input size {0} exceeds max_init_code_size {1}")]
ExceedsMaxInitCodeSize(usize, usize),
/// Thrown if the input data of a transaction is greater
/// than some meaningful limit a user might use. This is not a consensus error
/// making the transaction invalid, rather a DOS protection.
#[error("input data too large")]
OversizedData(usize, usize),
/// Thrown if the transaction's fee is below the minimum fee
#[error("transaction underpriced")]
Underpriced,
/// Thrown if the transaction's would require an account to be overdrawn
#[error("transaction overdraws from account, balance: {balance}, cost: {cost}")]
Overdraft {
/// Cost transaction is allowed to consume. See `reth_transaction_pool::PoolTransaction`.
cost: U256,
/// Balance of account.
balance: U256,
},
/// EIP-2681 error thrown if the nonce is higher or equal than `U64::max`
/// `<https://eips.ethereum.org/EIPS/eip-2681>`
#[error("nonce exceeds u64 limit")]
Eip2681,
/// EIP-4844 related errors
#[error(transparent)]
Eip4844(#[from] Eip4844PoolTransactionError),
/// EIP-7702 related errors
#[error(transparent)]
Eip7702(#[from] Eip7702PoolTransactionError),
/// Any other error that occurred while inserting/validating that is transaction specific
#[error(transparent)]
Other(Box<dyn PoolTransactionError>),
/// The transaction is specified to use less gas than required to start the
/// invocation.
#[error("intrinsic gas too low")]
IntrinsicGasTooLow,
/// The transaction priority fee is below the minimum required priority fee.
#[error("transaction priority fee below minimum required priority fee {minimum_priority_fee}")]
PriorityFeeBelowMinimum {
/// Minimum required priority fee.
minimum_priority_fee: u128,
},
}
// === impl InvalidPoolTransactionError ===
impl InvalidPoolTransactionError {
/// Returns a new [`InvalidPoolTransactionError::Other`] instance with the given
/// [`PoolTransactionError`].
pub fn other<E: PoolTransactionError + 'static>(err: E) -> Self {
Self::Other(Box::new(err))
}
/// Returns `true` if the error was caused by a transaction that is considered bad in the
/// context of the transaction pool and warrants peer penalization.
///
/// See [`PoolError::is_bad_transaction`].
#[expect(clippy::match_same_arms)]
#[inline]
fn is_bad_transaction(&self) -> bool {
match self {
Self::Consensus(err) => {
// transaction considered invalid by the consensus rules
// We do not consider the following errors to be erroneous transactions, since they
// depend on dynamic environmental conditions and should not be assumed to have been
// intentionally caused by the sender
match err {
InvalidTransactionError::InsufficientFunds { .. } |
InvalidTransactionError::NonceNotConsistent { .. } => {
// transaction could just have arrived late/early
false
}
InvalidTransactionError::GasTooLow |
InvalidTransactionError::GasTooHigh |
InvalidTransactionError::TipAboveFeeCap => {
// these are technically not invalid
false
}
InvalidTransactionError::FeeCapTooLow => {
// dynamic, but not used during validation
false
}
InvalidTransactionError::Eip2930Disabled |
InvalidTransactionError::Eip1559Disabled |
InvalidTransactionError::Eip4844Disabled |
InvalidTransactionError::Eip7702Disabled => {
// settings
false
}
InvalidTransactionError::FailedToDecryptSeismicTx => false,
InvalidTransactionError::OldLegacyChainId |
InvalidTransactionError::ChainIdMismatch |
InvalidTransactionError::GasUintOverflow |
InvalidTransactionError::TxTypeNotSupported |
InvalidTransactionError::SignerAccountHasBytecode |
InvalidTransactionError::GasLimitTooHigh => true,
}
}
Self::ExceedsGasLimit(_, _) => true,
Self::MaxTxGasLimitExceeded(_, _) => {
// local setting
false
}
Self::ExceedsFeeCap { max_tx_fee_wei: _, tx_fee_cap_wei: _ } => true,
Self::ExceedsMaxInitCodeSize(_, _) => true,
Self::OversizedData(_, _) => true,
Self::Underpriced => {
// local setting
false
}
Self::IntrinsicGasTooLow => true,
Self::Overdraft { .. } => false,
Self::Other(err) => err.is_bad_transaction(),
Self::Eip2681 => true,
Self::Eip4844(eip4844_err) => {
match eip4844_err {
Eip4844PoolTransactionError::MissingEip4844BlobSidecar => {
// this is only reachable when blob transactions are reinjected and we're
// unable to find the previously extracted blob
false
}
Eip4844PoolTransactionError::InvalidEip4844Blob(_) => {
// This is only reachable when the blob is invalid
true
}
Eip4844PoolTransactionError::Eip4844NonceGap => {
// it is possible that the pool sees `nonce n` before `nonce n-1` and this
// is only thrown for valid(good) blob transactions
false
}
Eip4844PoolTransactionError::NoEip4844Blobs => {
// this is a malformed transaction and should not be sent over the network
true
}
Eip4844PoolTransactionError::TooManyEip4844Blobs { .. } => {
// this is a malformed transaction and should not be sent over the network
true
}
Eip4844PoolTransactionError::UnexpectedEip4844SidecarAfterOsaka |
Eip4844PoolTransactionError::UnexpectedEip7594SidecarBeforeOsaka => {
// for now we do not want to penalize peers for broadcasting different
// sidecars
false
}
}
}
Self::Eip7702(eip7702_err) => match eip7702_err {
Eip7702PoolTransactionError::MissingEip7702AuthorizationList => {
// as EIP-7702 specifies, 7702 transactions must have an non-empty authorization
// list so this is a malformed transaction and should not be
// sent over the network
true
}
Eip7702PoolTransactionError::OutOfOrderTxFromDelegated => false,
Eip7702PoolTransactionError::InflightTxLimitReached => false,
Eip7702PoolTransactionError::AuthorityReserved => false,
},
Self::PriorityFeeBelowMinimum { .. } => false,
}
}
/// Returns `true` if an import failed due to an oversized transaction
pub const fn is_oversized(&self) -> bool {
matches!(self, Self::OversizedData(_, _))
}
/// Returns `true` if an import failed due to nonce gap.
pub const fn is_nonce_gap(&self) -> bool {
matches!(self, Self::Consensus(InvalidTransactionError::NonceNotConsistent { .. })) ||
matches!(self, Self::Eip4844(Eip4844PoolTransactionError::Eip4844NonceGap))
}
/// Returns the arbitrary error if it is [`InvalidPoolTransactionError::Other`]
pub fn as_other(&self) -> Option<&dyn PoolTransactionError> {
match self {
Self::Other(err) => Some(&**err),
_ => None,
}
}
/// Returns a reference to the [`InvalidPoolTransactionError::Other`] value if this type is a
/// [`InvalidPoolTransactionError::Other`] of that type. Returns None otherwise.
pub fn downcast_other_ref<T: core::error::Error + 'static>(&self) -> Option<&T> {
let other = self.as_other()?;
other.as_any().downcast_ref()
}
/// Returns true if the this type is a [`InvalidPoolTransactionError::Other`] of that error
/// type. Returns false otherwise.
pub fn is_other<T: core::error::Error + 'static>(&self) -> bool {
self.as_other().map(|err| err.as_any().is::<T>()).unwrap_or(false)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[derive(thiserror::Error, Debug)]
#[error("err")]
struct E;
impl PoolTransactionError for E {
fn is_bad_transaction(&self) -> bool {
false
}
fn as_any(&self) -> &dyn Any {
self
}
}
#[test]
fn other_downcast() {
let err = InvalidPoolTransactionError::Other(Box::new(E));
assert!(err.is_other::<E>());
assert!(err.downcast_other_ref::<E>().is_some());
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/transaction-pool/src/identifier.rs | crates/transaction-pool/src/identifier.rs | //! Identifier types for transactions and senders.
use alloy_primitives::{map::HashMap, Address};
use rustc_hash::FxHashMap;
/// An internal mapping of addresses.
///
/// This assigns a _unique_ [`SenderId`] for a new [`Address`].
/// It has capacity for 2^64 unique addresses.
#[derive(Debug, Default)]
pub struct SenderIdentifiers {
/// The identifier to use next.
id: u64,
/// Assigned [`SenderId`] for an [`Address`].
address_to_id: HashMap<Address, SenderId>,
/// Reverse mapping of [`SenderId`] to [`Address`].
sender_to_address: FxHashMap<SenderId, Address>,
}
impl SenderIdentifiers {
/// Returns the address for the given identifier.
pub fn address(&self, id: &SenderId) -> Option<&Address> {
self.sender_to_address.get(id)
}
/// Returns the [`SenderId`] that belongs to the given address, if it exists
pub fn sender_id(&self, addr: &Address) -> Option<SenderId> {
self.address_to_id.get(addr).copied()
}
/// Returns the existing [`SenderId`] or assigns a new one if it's missing
pub fn sender_id_or_create(&mut self, addr: Address) -> SenderId {
self.sender_id(&addr).unwrap_or_else(|| {
let id = self.next_id();
self.address_to_id.insert(addr, id);
self.sender_to_address.insert(id, addr);
id
})
}
/// Returns the existing [`SenderId`] or assigns a new one if it's missing
pub fn sender_ids_or_create(
&mut self,
addrs: impl IntoIterator<Item = Address>,
) -> Vec<SenderId> {
addrs.into_iter().map(|addr| self.sender_id_or_create(addr)).collect()
}
/// Returns the current identifier and increments the counter.
fn next_id(&mut self) -> SenderId {
let id = self.id;
self.id = self.id.wrapping_add(1);
id.into()
}
}
/// A _unique_ identifier for a sender of an address.
///
/// This is the identifier of an internal `address` mapping that is valid in the context of this
/// program.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct SenderId(u64);
impl SenderId {
/// Returns a `Bound` for [`TransactionId`] starting with nonce `0`
pub const fn start_bound(self) -> std::ops::Bound<TransactionId> {
std::ops::Bound::Included(TransactionId::new(self, 0))
}
/// Converts the sender to a [`TransactionId`] with the given nonce.
pub const fn into_transaction_id(self, nonce: u64) -> TransactionId {
TransactionId::new(self, nonce)
}
}
impl From<u64> for SenderId {
fn from(value: u64) -> Self {
Self(value)
}
}
/// A unique identifier of a transaction of a Sender.
///
/// This serves as an identifier for dependencies of a transaction:
/// A transaction with a nonce higher than the current state nonce depends on `tx.nonce - 1`.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct TransactionId {
/// Sender of this transaction
pub sender: SenderId,
/// Nonce of this transaction
pub nonce: u64,
}
impl TransactionId {
/// Create a new identifier pair
pub const fn new(sender: SenderId, nonce: u64) -> Self {
Self { sender, nonce }
}
/// Returns the [`TransactionId`] this transaction depends on.
///
/// This returns `transaction_nonce - 1` if `transaction_nonce` is higher than the
/// `on_chain_nonce`
pub fn ancestor(transaction_nonce: u64, on_chain_nonce: u64, sender: SenderId) -> Option<Self> {
// SAFETY: transaction_nonce > on_chain_nonce β transaction_nonce >= 1
(transaction_nonce > on_chain_nonce).then(|| Self::new(sender, transaction_nonce - 1))
}
/// Returns the [`TransactionId`] that would come before this transaction.
pub fn unchecked_ancestor(&self) -> Option<Self> {
// SAFETY: self.nonce != 0 β self.nonce >= 1
(self.nonce != 0).then(|| Self::new(self.sender, self.nonce - 1))
}
/// Returns the [`TransactionId`] that directly follows this transaction: `self.nonce + 1`
pub const fn descendant(&self) -> Self {
Self::new(self.sender, self.next_nonce())
}
/// Returns the nonce that follows immediately after this one.
#[inline]
pub const fn next_nonce(&self) -> u64 {
self.nonce + 1
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::collections::BTreeSet;
#[test]
fn test_transaction_id_new() {
let sender = SenderId(1);
let tx_id = TransactionId::new(sender, 5);
assert_eq!(tx_id.sender, sender);
assert_eq!(tx_id.nonce, 5);
}
#[test]
fn test_transaction_id_ancestor() {
let sender = SenderId(1);
// Special case with nonce 0 and higher on-chain nonce
let tx_id = TransactionId::ancestor(0, 1, sender);
assert_eq!(tx_id, None);
// Special case with nonce 0 and same on-chain nonce
let tx_id = TransactionId::ancestor(0, 0, sender);
assert_eq!(tx_id, None);
// Ancestor is the previous nonce if the transaction nonce is higher than the on-chain nonce
let tx_id = TransactionId::ancestor(5, 0, sender);
assert_eq!(tx_id, Some(TransactionId::new(sender, 4)));
// No ancestor if the transaction nonce is the same as the on-chain nonce
let tx_id = TransactionId::ancestor(5, 5, sender);
assert_eq!(tx_id, None);
// No ancestor if the transaction nonce is lower than the on-chain nonce
let tx_id = TransactionId::ancestor(5, 15, sender);
assert_eq!(tx_id, None);
}
#[test]
fn test_transaction_id_unchecked_ancestor() {
let sender = SenderId(1);
// Ancestor is the previous nonce if transaction nonce is higher than 0
let tx_id = TransactionId::new(sender, 5);
assert_eq!(tx_id.unchecked_ancestor(), Some(TransactionId::new(sender, 4)));
// No ancestor if transaction nonce is 0
let tx_id = TransactionId::new(sender, 0);
assert_eq!(tx_id.unchecked_ancestor(), None);
}
#[test]
fn test_transaction_id_descendant() {
let sender = SenderId(1);
let tx_id = TransactionId::new(sender, 5);
let descendant = tx_id.descendant();
assert_eq!(descendant, TransactionId::new(sender, 6));
}
#[test]
fn test_transaction_id_next_nonce() {
let sender = SenderId(1);
let tx_id = TransactionId::new(sender, 5);
assert_eq!(tx_id.next_nonce(), 6);
}
#[test]
fn test_transaction_id_ord_eq_sender() {
let tx1 = TransactionId::new(100u64.into(), 0u64);
let tx2 = TransactionId::new(100u64.into(), 1u64);
assert!(tx2 > tx1);
let set = BTreeSet::from([tx1, tx2]);
assert_eq!(set.into_iter().collect::<Vec<_>>(), vec![tx1, tx2]);
}
#[test]
fn test_transaction_id_ord() {
let tx1 = TransactionId::new(99u64.into(), 0u64);
let tx2 = TransactionId::new(100u64.into(), 1u64);
assert!(tx2 > tx1);
let set = BTreeSet::from([tx1, tx2]);
assert_eq!(set.into_iter().collect::<Vec<_>>(), vec![tx1, tx2]);
}
#[test]
fn test_address_retrieval() {
let mut identifiers = SenderIdentifiers::default();
let address = Address::new([1; 20]);
let id = identifiers.sender_id_or_create(address);
assert_eq!(identifiers.address(&id), Some(&address));
}
#[test]
fn test_sender_id_retrieval() {
let mut identifiers = SenderIdentifiers::default();
let address = Address::new([1; 20]);
let id = identifiers.sender_id_or_create(address);
assert_eq!(identifiers.sender_id(&address), Some(id));
}
#[test]
fn test_sender_id_or_create_existing() {
let mut identifiers = SenderIdentifiers::default();
let address = Address::new([1; 20]);
let id1 = identifiers.sender_id_or_create(address);
let id2 = identifiers.sender_id_or_create(address);
assert_eq!(id1, id2);
}
#[test]
fn test_sender_id_or_create_new() {
let mut identifiers = SenderIdentifiers::default();
let address1 = Address::new([1; 20]);
let address2 = Address::new([2; 20]);
let id1 = identifiers.sender_id_or_create(address1);
let id2 = identifiers.sender_id_or_create(address2);
assert_ne!(id1, id2);
}
#[test]
fn test_next_id_wrapping() {
let mut identifiers = SenderIdentifiers { id: u64::MAX, ..Default::default() };
// The current ID is `u64::MAX`, the next ID should wrap around to 0.
let id1 = identifiers.next_id();
assert_eq!(id1, SenderId(u64::MAX));
// The next ID should now be 0 because of wrapping.
let id2 = identifiers.next_id();
assert_eq!(id2, SenderId(0));
// And then 1, continuing incrementing.
let id3 = identifiers.next_id();
assert_eq!(id3, SenderId(1));
}
#[test]
fn test_sender_id_start_bound() {
let sender = SenderId(1);
let start_bound = sender.start_bound();
if let std::ops::Bound::Included(tx_id) = start_bound {
assert_eq!(tx_id, TransactionId::new(sender, 0));
} else {
panic!("Expected included bound");
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/transaction-pool/src/maintain.rs | crates/transaction-pool/src/maintain.rs | //! Support for maintaining the state of the transaction pool
use crate::{
blobstore::{BlobStoreCanonTracker, BlobStoreUpdates},
error::PoolError,
metrics::MaintainPoolMetrics,
traits::{CanonicalStateUpdate, EthPoolTransaction, TransactionPool, TransactionPoolExt},
BlockInfo, PoolTransaction, PoolUpdateKind, TransactionOrigin,
};
use alloy_consensus::{BlockHeader, Typed2718};
use alloy_eips::{BlockNumberOrTag, Decodable2718, Encodable2718};
use alloy_primitives::{Address, BlockHash, BlockNumber};
use alloy_rlp::{Bytes, Encodable};
use futures_util::{
future::{BoxFuture, Fuse, FusedFuture},
FutureExt, Stream, StreamExt,
};
use reth_chain_state::CanonStateNotification;
use reth_chainspec::{ChainSpecProvider, EthChainSpec};
use reth_execution_types::ChangedAccount;
use reth_fs_util::FsPathError;
use reth_primitives_traits::{
transaction::signed::SignedTransaction, NodePrimitives, SealedHeader,
};
use reth_storage_api::{errors::provider::ProviderError, BlockReaderIdExt, StateProviderFactory};
use reth_tasks::TaskSpawner;
use serde::{Deserialize, Serialize};
use std::{
borrow::Borrow,
collections::HashSet,
hash::{Hash, Hasher},
path::{Path, PathBuf},
sync::Arc,
};
use tokio::{
sync::oneshot,
time::{self, Duration},
};
use tracing::{debug, error, info, trace, warn};
/// Maximum amount of time non-executable transaction are queued.
pub const MAX_QUEUED_TRANSACTION_LIFETIME: Duration = Duration::from_secs(3 * 60 * 60);
/// Additional settings for maintaining the transaction pool
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct MaintainPoolConfig {
/// Maximum (reorg) depth we handle when updating the transaction pool: `new.number -
/// last_seen.number`
///
/// Default: 64 (2 epochs)
pub max_update_depth: u64,
/// Maximum number of accounts to reload from state at once when updating the transaction pool.
///
/// Default: 100
pub max_reload_accounts: usize,
/// Maximum amount of time non-executable, non local transactions are queued.
/// Default: 3 hours
pub max_tx_lifetime: Duration,
/// Apply no exemptions to the locally received transactions.
///
/// This includes:
/// - no price exemptions
/// - no eviction exemptions
pub no_local_exemptions: bool,
}
impl Default for MaintainPoolConfig {
fn default() -> Self {
Self {
max_update_depth: 64,
max_reload_accounts: 100,
max_tx_lifetime: MAX_QUEUED_TRANSACTION_LIFETIME,
no_local_exemptions: false,
}
}
}
/// Settings for local transaction backup task
#[derive(Debug, Clone, Default)]
pub struct LocalTransactionBackupConfig {
/// Path to transactions backup file
pub transactions_path: Option<PathBuf>,
}
impl LocalTransactionBackupConfig {
/// Receive path to transactions backup and return initialized config
pub const fn with_local_txs_backup(transactions_path: PathBuf) -> Self {
Self { transactions_path: Some(transactions_path) }
}
}
/// Returns a spawnable future for maintaining the state of the transaction pool.
pub fn maintain_transaction_pool_future<N, Client, P, St, Tasks>(
client: Client,
pool: P,
events: St,
task_spawner: Tasks,
config: MaintainPoolConfig,
) -> BoxFuture<'static, ()>
where
N: NodePrimitives,
Client: StateProviderFactory
+ BlockReaderIdExt<Header = N::BlockHeader>
+ ChainSpecProvider<ChainSpec: EthChainSpec<Header = N::BlockHeader>>
+ Clone
+ 'static,
P: TransactionPoolExt<Transaction: PoolTransaction<Consensus = N::SignedTx>> + 'static,
St: Stream<Item = CanonStateNotification<N>> + Send + Unpin + 'static,
Tasks: TaskSpawner + 'static,
{
async move {
maintain_transaction_pool(client, pool, events, task_spawner, config).await;
}
.boxed()
}
/// Maintains the state of the transaction pool by handling new blocks and reorgs.
///
/// This listens for any new blocks and reorgs and updates the transaction pool's state accordingly
pub async fn maintain_transaction_pool<N, Client, P, St, Tasks>(
client: Client,
pool: P,
mut events: St,
task_spawner: Tasks,
config: MaintainPoolConfig,
) where
N: NodePrimitives,
Client: StateProviderFactory
+ BlockReaderIdExt<Header = N::BlockHeader>
+ ChainSpecProvider<ChainSpec: EthChainSpec<Header = N::BlockHeader>>
+ Clone
+ 'static,
P: TransactionPoolExt<Transaction: PoolTransaction<Consensus = N::SignedTx>> + 'static,
St: Stream<Item = CanonStateNotification<N>> + Send + Unpin + 'static,
Tasks: TaskSpawner + 'static,
{
let metrics = MaintainPoolMetrics::default();
let MaintainPoolConfig { max_update_depth, max_reload_accounts, .. } = config;
// ensure the pool points to latest state
if let Ok(Some(latest)) = client.header_by_number_or_tag(BlockNumberOrTag::Latest) {
let latest = SealedHeader::seal_slow(latest);
let chain_spec = client.chain_spec();
let info = BlockInfo {
block_gas_limit: latest.gas_limit(),
last_seen_block_hash: latest.hash(),
last_seen_block_number: latest.number(),
pending_basefee: chain_spec
.next_block_base_fee(latest.header(), latest.timestamp_seconds())
.unwrap_or_default(),
pending_blob_fee: latest.maybe_next_block_blob_fee(
chain_spec.blob_params_at_timestamp(latest.timestamp_seconds()),
),
};
pool.set_block_info(info);
}
// keeps track of mined blob transaction so we can clean finalized transactions
let mut blob_store_tracker = BlobStoreCanonTracker::default();
// keeps track of the latest finalized block
let mut last_finalized_block =
FinalizedBlockTracker::new(client.finalized_block_number().ok().flatten());
// keeps track of any dirty accounts that we know of are out of sync with the pool
let mut dirty_addresses = HashSet::default();
// keeps track of the state of the pool wrt to blocks
let mut maintained_state = MaintainedPoolState::InSync;
// the future that reloads accounts from state
let mut reload_accounts_fut = Fuse::terminated();
// eviction interval for stale non local txs
let mut stale_eviction_interval = time::interval(config.max_tx_lifetime);
// toggle for the first notification
let mut first_event = true;
// The update loop that waits for new blocks and reorgs and performs pool updated
// Listen for new chain events and derive the update action for the pool
loop {
trace!(target: "txpool", state=?maintained_state, "awaiting new block or reorg");
metrics.set_dirty_accounts_len(dirty_addresses.len());
let pool_info = pool.block_info();
// after performing a pool update after a new block we have some time to properly update
// dirty accounts and correct if the pool drifted from current state, for example after
// restart or a pipeline run
if maintained_state.is_drifted() {
metrics.inc_drift();
// assuming all senders are dirty
dirty_addresses = pool.unique_senders();
// make sure we toggle the state back to in sync
maintained_state = MaintainedPoolState::InSync;
}
// if we have accounts that are out of sync with the pool, we reload them in chunks
if !dirty_addresses.is_empty() && reload_accounts_fut.is_terminated() {
let (tx, rx) = oneshot::channel();
let c = client.clone();
let at = pool_info.last_seen_block_hash;
let fut = if dirty_addresses.len() > max_reload_accounts {
// need to chunk accounts to reload
let accs_to_reload =
dirty_addresses.iter().copied().take(max_reload_accounts).collect::<Vec<_>>();
for acc in &accs_to_reload {
// make sure we remove them from the dirty set
dirty_addresses.remove(acc);
}
async move {
let res = load_accounts(c, at, accs_to_reload.into_iter());
let _ = tx.send(res);
}
.boxed()
} else {
// can fetch all dirty accounts at once
let accs_to_reload = std::mem::take(&mut dirty_addresses);
async move {
let res = load_accounts(c, at, accs_to_reload.into_iter());
let _ = tx.send(res);
}
.boxed()
};
reload_accounts_fut = rx.fuse();
task_spawner.spawn_blocking(fut);
}
// check if we have a new finalized block
if let Some(finalized) =
last_finalized_block.update(client.finalized_block_number().ok().flatten())
{
if let BlobStoreUpdates::Finalized(blobs) =
blob_store_tracker.on_finalized_block(finalized)
{
metrics.inc_deleted_tracked_blobs(blobs.len());
// remove all finalized blobs from the blob store
pool.delete_blobs(blobs);
// and also do periodic cleanup
let pool = pool.clone();
task_spawner.spawn_blocking(Box::pin(async move {
debug!(target: "txpool", finalized_block = %finalized, "cleaning up blob store");
pool.cleanup_blobs();
}));
}
}
// outcomes of the futures we are waiting on
let mut event = None;
let mut reloaded = None;
// select of account reloads and new canonical state updates which should arrive at the rate
// of the block time
tokio::select! {
res = &mut reload_accounts_fut => {
reloaded = Some(res);
}
ev = events.next() => {
if ev.is_none() {
// the stream ended, we are done
break;
}
event = ev;
// on receiving the first event on start up, mark the pool as drifted to explicitly
// trigger revalidation and clear out outdated txs.
if first_event {
maintained_state = MaintainedPoolState::Drifted;
first_event = false
}
}
_ = stale_eviction_interval.tick() => {
let stale_txs: Vec<_> = pool
.queued_transactions()
.into_iter()
.filter(|tx| {
// filter stale transactions based on config
(tx.origin.is_external() || config.no_local_exemptions) && tx.timestamp.elapsed() > config.max_tx_lifetime
})
.map(|tx| *tx.hash())
.collect();
debug!(target: "txpool", count=%stale_txs.len(), "removing stale transactions");
pool.remove_transactions(stale_txs);
}
}
// handle the result of the account reload
match reloaded {
Some(Ok(Ok(LoadedAccounts { accounts, failed_to_load }))) => {
// reloaded accounts successfully
// extend accounts we failed to load from database
dirty_addresses.extend(failed_to_load);
// update the pool with the loaded accounts
pool.update_accounts(accounts);
}
Some(Ok(Err(res))) => {
// Failed to load accounts from state
let (accs, err) = *res;
debug!(target: "txpool", %err, "failed to load accounts");
dirty_addresses.extend(accs);
}
Some(Err(_)) => {
// failed to receive the accounts, sender dropped, only possible if task panicked
maintained_state = MaintainedPoolState::Drifted;
}
None => {}
}
// handle the new block or reorg
let Some(event) = event else { continue };
match event {
CanonStateNotification::Reorg { old, new } => {
let (old_blocks, old_state) = old.inner();
let (new_blocks, new_state) = new.inner();
let new_tip = new_blocks.tip();
let new_first = new_blocks.first();
let old_first = old_blocks.first();
// check if the reorg is not canonical with the pool's block
if !(old_first.parent_hash() == pool_info.last_seen_block_hash ||
new_first.parent_hash() == pool_info.last_seen_block_hash)
{
// the new block points to a higher block than the oldest block in the old chain
maintained_state = MaintainedPoolState::Drifted;
}
let chain_spec = client.chain_spec();
// fees for the next block: `new_tip+1`
let pending_block_base_fee = chain_spec
.next_block_base_fee(new_tip.header(), new_tip.timestamp_seconds())
.unwrap_or_default();
let pending_block_blob_fee = new_tip.header().maybe_next_block_blob_fee(
chain_spec.blob_params_at_timestamp(new_tip.timestamp_seconds()),
);
// we know all changed account in the new chain
let new_changed_accounts: HashSet<_> =
new_state.changed_accounts().map(ChangedAccountEntry).collect();
// find all accounts that were changed in the old chain but _not_ in the new chain
let missing_changed_acc = old_state
.accounts_iter()
.map(|(a, _)| a)
.filter(|addr| !new_changed_accounts.contains(addr));
// for these we need to fetch the nonce+balance from the db at the new tip
let mut changed_accounts =
match load_accounts(client.clone(), new_tip.hash(), missing_changed_acc) {
Ok(LoadedAccounts { accounts, failed_to_load }) => {
// extend accounts we failed to load from database
dirty_addresses.extend(failed_to_load);
accounts
}
Err(err) => {
let (addresses, err) = *err;
debug!(
target: "txpool",
%err,
"failed to load missing changed accounts at new tip: {:?}",
new_tip.hash()
);
dirty_addresses.extend(addresses);
vec![]
}
};
// also include all accounts from new chain
// we can use extend here because they are unique
changed_accounts.extend(new_changed_accounts.into_iter().map(|entry| entry.0));
// all transactions mined in the new chain
let new_mined_transactions: HashSet<_> = new_blocks.transaction_hashes().collect();
// update the pool then re-inject the pruned transactions
// find all transactions that were mined in the old chain but not in the new chain
let pruned_old_transactions = old_blocks
.transactions_ecrecovered()
.filter(|tx| !new_mined_transactions.contains(tx.tx_hash()))
.filter_map(|tx| {
if tx.is_eip4844() {
// reorged blobs no longer include the blob, which is necessary for
// validating the transaction. Even though the transaction could have
// been validated previously, we still need the blob in order to
// accurately set the transaction's
// encoded-length which is propagated over the network.
pool.get_blob(*tx.tx_hash())
.ok()
.flatten()
.map(Arc::unwrap_or_clone)
.and_then(|sidecar| {
<P as TransactionPool>::Transaction::try_from_eip4844(
tx, sidecar,
)
})
} else {
<P as TransactionPool>::Transaction::try_from_consensus(tx).ok()
}
})
.collect::<Vec<_>>();
// update the pool first
let update = CanonicalStateUpdate {
new_tip: new_tip.sealed_block(),
pending_block_base_fee,
pending_block_blob_fee,
changed_accounts,
// all transactions mined in the new chain need to be removed from the pool
mined_transactions: new_blocks.transaction_hashes().collect(),
update_kind: PoolUpdateKind::Reorg,
};
pool.on_canonical_state_change(update);
// all transactions that were mined in the old chain but not in the new chain need
// to be re-injected
//
// Note: we no longer know if the tx was local or external
// Because the transactions are not finalized, the corresponding blobs are still in
// blob store (if we previously received them from the network)
metrics.inc_reinserted_transactions(pruned_old_transactions.len());
let _ = pool.add_external_transactions(pruned_old_transactions).await;
// keep track of new mined blob transactions
blob_store_tracker.add_new_chain_blocks(&new_blocks);
}
CanonStateNotification::Commit { new } => {
let (blocks, state) = new.inner();
let tip = blocks.tip();
let chain_spec = client.chain_spec();
// fees for the next block: `tip+1`
let pending_block_base_fee = chain_spec
.next_block_base_fee(tip.header(), tip.timestamp_seconds())
.unwrap_or_default();
let pending_block_blob_fee = tip.header().maybe_next_block_blob_fee(
chain_spec.blob_params_at_timestamp(tip.timestamp_seconds()),
);
let first_block = blocks.first();
trace!(
target: "txpool",
first = first_block.number(),
tip = tip.number(),
pool_block = pool_info.last_seen_block_number,
"update pool on new commit"
);
// check if the depth is too large and should be skipped, this could happen after
// initial sync or long re-sync
let depth = tip.number().abs_diff(pool_info.last_seen_block_number);
if depth > max_update_depth {
maintained_state = MaintainedPoolState::Drifted;
debug!(target: "txpool", ?depth, "skipping deep canonical update");
let info = BlockInfo {
block_gas_limit: tip.header().gas_limit(),
last_seen_block_hash: tip.hash(),
last_seen_block_number: tip.number(),
pending_basefee: pending_block_base_fee,
pending_blob_fee: pending_block_blob_fee,
};
pool.set_block_info(info);
// keep track of mined blob transactions
blob_store_tracker.add_new_chain_blocks(&blocks);
continue
}
let mut changed_accounts = Vec::with_capacity(state.state().len());
for acc in state.changed_accounts() {
// we can always clear the dirty flag for this account
dirty_addresses.remove(&acc.address);
changed_accounts.push(acc);
}
let mined_transactions = blocks.transaction_hashes().collect();
// check if the range of the commit is canonical with the pool's block
if first_block.parent_hash() != pool_info.last_seen_block_hash {
// we received a new canonical chain commit but the commit is not canonical with
// the pool's block, this could happen after initial sync or
// long re-sync
maintained_state = MaintainedPoolState::Drifted;
}
// Canonical update
let update = CanonicalStateUpdate {
new_tip: tip.sealed_block(),
pending_block_base_fee,
pending_block_blob_fee,
changed_accounts,
mined_transactions,
update_kind: PoolUpdateKind::Commit,
};
pool.on_canonical_state_change(update);
// keep track of mined blob transactions
blob_store_tracker.add_new_chain_blocks(&blocks);
}
}
}
}
struct FinalizedBlockTracker {
last_finalized_block: Option<BlockNumber>,
}
impl FinalizedBlockTracker {
const fn new(last_finalized_block: Option<BlockNumber>) -> Self {
Self { last_finalized_block }
}
/// Updates the tracked finalized block and returns the new finalized block if it changed
fn update(&mut self, finalized_block: Option<BlockNumber>) -> Option<BlockNumber> {
let finalized = finalized_block?;
self.last_finalized_block
.replace(finalized)
.is_none_or(|last| last < finalized)
.then_some(finalized)
}
}
/// Keeps track of the pool's state, whether the accounts in the pool are in sync with the actual
/// state.
#[derive(Debug, PartialEq, Eq)]
enum MaintainedPoolState {
/// Pool is assumed to be in sync with the current state
InSync,
/// Pool could be out of sync with the state
Drifted,
}
impl MaintainedPoolState {
/// Returns `true` if the pool is assumed to be out of sync with the current state.
#[inline]
const fn is_drifted(&self) -> bool {
matches!(self, Self::Drifted)
}
}
/// A unique [`ChangedAccount`] identified by its address that can be used for deduplication
#[derive(Eq)]
struct ChangedAccountEntry(ChangedAccount);
impl PartialEq for ChangedAccountEntry {
fn eq(&self, other: &Self) -> bool {
self.0.address == other.0.address
}
}
impl Hash for ChangedAccountEntry {
fn hash<H: Hasher>(&self, state: &mut H) {
self.0.address.hash(state);
}
}
impl Borrow<Address> for ChangedAccountEntry {
fn borrow(&self) -> &Address {
&self.0.address
}
}
#[derive(Default)]
struct LoadedAccounts {
/// All accounts that were loaded
accounts: Vec<ChangedAccount>,
/// All accounts that failed to load
failed_to_load: Vec<Address>,
}
/// Loads all accounts at the given state
///
/// Returns an error with all given addresses if the state is not available.
///
/// Note: this expects _unique_ addresses
fn load_accounts<Client, I>(
client: Client,
at: BlockHash,
addresses: I,
) -> Result<LoadedAccounts, Box<(HashSet<Address>, ProviderError)>>
where
I: IntoIterator<Item = Address>,
Client: StateProviderFactory,
{
let addresses = addresses.into_iter();
let mut res = LoadedAccounts::default();
let state = match client.history_by_block_hash(at) {
Ok(state) => state,
Err(err) => return Err(Box::new((addresses.collect(), err))),
};
for addr in addresses {
if let Ok(maybe_acc) = state.basic_account(&addr) {
let acc = maybe_acc
.map(|acc| ChangedAccount { address: addr, nonce: acc.nonce, balance: acc.balance })
.unwrap_or_else(|| ChangedAccount::empty(addr));
res.accounts.push(acc)
} else {
// failed to load account.
res.failed_to_load.push(addr);
}
}
Ok(res)
}
/// Loads transactions from a file, decodes them from the JSON or RLP format, and
/// inserts them into the transaction pool on node boot up.
/// The file is removed after the transactions have been successfully processed.
async fn load_and_reinsert_transactions<P>(
pool: P,
file_path: &Path,
) -> Result<(), TransactionsBackupError>
where
P: TransactionPool<Transaction: PoolTransaction<Consensus: SignedTransaction>>,
{
if !file_path.exists() {
return Ok(())
}
debug!(target: "txpool", txs_file =?file_path, "Check local persistent storage for saved transactions");
let data = reth_fs_util::read(file_path)?;
if data.is_empty() {
return Ok(())
}
let pool_transactions: Vec<(TransactionOrigin, <P as TransactionPool>::Transaction)> =
if let Ok(tx_backups) = serde_json::from_slice::<Vec<TxBackup>>(&data) {
tx_backups
.into_iter()
.filter_map(|backup| {
let tx_signed = <P::Transaction as PoolTransaction>::Consensus::decode_2718(
&mut backup.rlp.as_ref(),
)
.ok()?;
let recovered = tx_signed.try_into_recovered().ok()?;
let pool_tx =
<P::Transaction as PoolTransaction>::try_from_consensus(recovered).ok()?;
Some((backup.origin, pool_tx))
})
.collect()
} else {
let txs_signed: Vec<<P::Transaction as PoolTransaction>::Consensus> =
alloy_rlp::Decodable::decode(&mut data.as_slice())?;
txs_signed
.into_iter()
.filter_map(|tx| tx.try_into_recovered().ok())
.filter_map(|tx| {
<P::Transaction as PoolTransaction>::try_from_consensus(tx)
.ok()
.map(|pool_tx| (TransactionOrigin::Local, pool_tx))
})
.collect()
};
let inserted = futures_util::future::join_all(
pool_transactions.into_iter().map(|(origin, tx)| pool.add_transaction(origin, tx)),
)
.await;
info!(target: "txpool", txs_file =?file_path, num_txs=%inserted.len(), "Successfully reinserted local transactions from file");
reth_fs_util::remove_file(file_path)?;
Ok(())
}
fn save_local_txs_backup<P>(pool: P, file_path: &Path)
where
P: TransactionPool<Transaction: PoolTransaction<Consensus: Encodable>>,
{
let local_transactions = pool.get_local_transactions();
if local_transactions.is_empty() {
trace!(target: "txpool", "no local transactions to save");
return
}
let local_transactions = local_transactions
.into_iter()
.map(|tx| {
let consensus_tx = tx.transaction.clone_into_consensus().into_inner();
let rlp_data = consensus_tx.encoded_2718();
TxBackup { rlp: rlp_data.into(), origin: tx.origin }
})
.collect::<Vec<_>>();
let json_data = match serde_json::to_string(&local_transactions) {
Ok(data) => data,
Err(err) => {
warn!(target: "txpool", %err, txs_file=?file_path, "failed to serialize local transactions to json");
return
}
};
info!(target: "txpool", txs_file =?file_path, num_txs=%local_transactions.len(), "Saving current local transactions");
let parent_dir = file_path.parent().map(std::fs::create_dir_all).transpose();
match parent_dir.map(|_| reth_fs_util::write(file_path, json_data)) {
Ok(_) => {
info!(target: "txpool", txs_file=?file_path, "Wrote local transactions to file");
}
Err(err) => {
warn!(target: "txpool", %err, txs_file=?file_path, "Failed to write local transactions to file");
}
}
}
/// A transaction backup that is saved as json to a file for
/// reinsertion into the pool
#[derive(Debug, Deserialize, Serialize)]
pub struct TxBackup {
/// Encoded transaction
pub rlp: Bytes,
/// The origin of the transaction
pub origin: TransactionOrigin,
}
/// Errors possible during txs backup load and decode
#[derive(thiserror::Error, Debug)]
pub enum TransactionsBackupError {
/// Error during RLP decoding of transactions
#[error("failed to apply transactions backup. Encountered RLP decode error: {0}")]
Decode(#[from] alloy_rlp::Error),
/// Error during json decoding of transactions
#[error("failed to apply transactions backup. Encountered JSON decode error: {0}")]
Json(#[from] serde_json::Error),
/// Error during file upload
#[error("failed to apply transactions backup. Encountered file error: {0}")]
FsPath(#[from] FsPathError),
/// Error adding transactions to the transaction pool
#[error("failed to insert transactions to the transactions pool. Encountered pool error: {0}")]
Pool(#[from] PoolError),
}
/// Task which manages saving local transactions to the persistent file in case of shutdown.
/// Reloads the transactions from the file on the boot up and inserts them into the pool.
pub async fn backup_local_transactions_task<P>(
shutdown: reth_tasks::shutdown::GracefulShutdown,
pool: P,
config: LocalTransactionBackupConfig,
) where
P: TransactionPool<Transaction: PoolTransaction<Consensus: SignedTransaction>> + Clone,
{
let Some(transactions_path) = config.transactions_path else {
// nothing to do
return
};
if let Err(err) = load_and_reinsert_transactions(pool.clone(), &transactions_path).await {
error!(target: "txpool", "{}", err)
}
let graceful_guard = shutdown.await;
// write transactions to disk
save_local_txs_backup(pool, &transactions_path);
drop(graceful_guard)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
blobstore::InMemoryBlobStore, validate::EthTransactionValidatorBuilder,
CoinbaseTipOrdering, EthPooledTransaction, Pool, TransactionOrigin,
};
use alloy_eips::eip2718::Decodable2718;
use alloy_primitives::{hex, U256};
use reth_ethereum_primitives::PooledTransactionVariant;
use reth_fs_util as fs;
use reth_provider::test_utils::{ExtendedAccount, MockEthProvider};
use reth_tasks::TaskManager;
#[test]
fn changed_acc_entry() {
let changed_acc = ChangedAccountEntry(ChangedAccount::empty(Address::random()));
let mut copy = changed_acc.0;
copy.nonce = 10;
assert!(changed_acc.eq(&ChangedAccountEntry(copy)));
}
const EXTENSION: &str = "json";
const FILENAME: &str = "test_transactions_backup";
#[tokio::test(flavor = "multi_thread")]
async fn test_save_local_txs_backup() {
let temp_dir = tempfile::tempdir().unwrap();
let transactions_path = temp_dir.path().join(FILENAME).with_extension(EXTENSION);
let tx_bytes = hex!(
"02f87201830655c2808505ef61f08482565f94388c818ca8b9251b393131c08a736a67ccb192978801049e39c4b5b1f580c001a01764ace353514e8abdfb92446de356b260e3c1225b73fc4c8876a6258d12a129a04f02294aa61ca7676061cd99f29275491218b4754b46a0248e5e42bc5091f507"
);
let tx = PooledTransactionVariant::decode_2718(&mut &tx_bytes[..]).unwrap();
let provider = MockEthProvider::default();
let transaction = EthPooledTransaction::from_pooled(tx.try_into_recovered().unwrap());
let tx_to_cmp = transaction.clone();
let sender = hex!("1f9090aaE28b8a3dCeaDf281B0F12828e676c326").into();
provider.add_account(sender, ExtendedAccount::new(42, U256::MAX));
let blob_store = InMemoryBlobStore::default();
let validator = EthTransactionValidatorBuilder::new(provider).build(blob_store.clone());
let txpool = Pool::new(
validator,
CoinbaseTipOrdering::default(),
blob_store.clone(),
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | true |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/transaction-pool/src/metrics.rs | crates/transaction-pool/src/metrics.rs | //! Transaction pool metrics.
use reth_metrics::{
metrics::{Counter, Gauge, Histogram},
Metrics,
};
/// Transaction pool metrics
#[derive(Metrics)]
#[metrics(scope = "transaction_pool")]
pub struct TxPoolMetrics {
/// Number of transactions inserted in the pool
pub(crate) inserted_transactions: Counter,
/// Number of invalid transactions
pub(crate) invalid_transactions: Counter,
/// Number of removed transactions from the pool
pub(crate) removed_transactions: Counter,
/// Number of transactions in the pending sub-pool
pub(crate) pending_pool_transactions: Gauge,
/// Total amount of memory used by the transactions in the pending sub-pool in bytes
pub(crate) pending_pool_size_bytes: Gauge,
/// Number of transactions in the basefee sub-pool
pub(crate) basefee_pool_transactions: Gauge,
/// Total amount of memory used by the transactions in the basefee sub-pool in bytes
pub(crate) basefee_pool_size_bytes: Gauge,
/// Number of transactions in the queued sub-pool
pub(crate) queued_pool_transactions: Gauge,
/// Total amount of memory used by the transactions in the queued sub-pool in bytes
pub(crate) queued_pool_size_bytes: Gauge,
/// Number of transactions in the blob sub-pool
pub(crate) blob_pool_transactions: Gauge,
/// Total amount of memory used by the transactions in the blob sub-pool in bytes
pub(crate) blob_pool_size_bytes: Gauge,
/// Number of all transactions of all sub-pools: pending + basefee + queued + blob
pub(crate) total_transactions: Gauge,
/// Number of all legacy transactions in the pool
pub(crate) total_legacy_transactions: Gauge,
/// Number of all EIP-2930 transactions in the pool
pub(crate) total_eip2930_transactions: Gauge,
/// Number of all EIP-1559 transactions in the pool
pub(crate) total_eip1559_transactions: Gauge,
/// Number of all EIP-4844 transactions in the pool
pub(crate) total_eip4844_transactions: Gauge,
/// Number of all EIP-7702 transactions in the pool
pub(crate) total_eip7702_transactions: Gauge,
/// How often the pool was updated after the canonical state changed
pub(crate) performed_state_updates: Counter,
/// Counter for the number of pending transactions evicted
pub(crate) pending_transactions_evicted: Counter,
/// Counter for the number of basefee transactions evicted
pub(crate) basefee_transactions_evicted: Counter,
/// Counter for the number of blob transactions evicted
pub(crate) blob_transactions_evicted: Counter,
/// Counter for the number of queued transactions evicted
pub(crate) queued_transactions_evicted: Counter,
}
/// Transaction pool blobstore metrics
#[derive(Metrics)]
#[metrics(scope = "transaction_pool")]
pub struct BlobStoreMetrics {
/// Number of failed inserts into the blobstore
pub(crate) blobstore_failed_inserts: Counter,
/// Number of failed deletes into the blobstore
pub(crate) blobstore_failed_deletes: Counter,
/// The number of bytes the blobs in the blobstore take up
pub(crate) blobstore_byte_size: Gauge,
/// How many blobs are currently in the blobstore
pub(crate) blobstore_entries: Gauge,
}
/// Transaction pool maintenance metrics
#[derive(Metrics)]
#[metrics(scope = "transaction_pool")]
pub struct MaintainPoolMetrics {
/// Gauge indicating the number of addresses with pending updates in the pool,
/// requiring their account information to be fetched.
pub(crate) dirty_accounts: Gauge,
/// Counter for the number of times the pool state diverged from the canonical blockchain
/// state.
pub(crate) drift_count: Counter,
/// Counter for the number of transactions reinserted into the pool following a blockchain
/// reorganization (reorg).
pub(crate) reinserted_transactions: Counter,
/// Counter for the number of finalized blob transactions that have been removed from tracking.
pub(crate) deleted_tracked_finalized_blobs: Counter,
}
impl MaintainPoolMetrics {
#[inline]
pub(crate) fn set_dirty_accounts_len(&self, count: usize) {
self.dirty_accounts.set(count as f64);
}
#[inline]
pub(crate) fn inc_reinserted_transactions(&self, count: usize) {
self.reinserted_transactions.increment(count as u64);
}
#[inline]
pub(crate) fn inc_deleted_tracked_blobs(&self, count: usize) {
self.deleted_tracked_finalized_blobs.increment(count as u64);
}
#[inline]
pub(crate) fn inc_drift(&self) {
self.drift_count.increment(1);
}
}
/// All Transactions metrics
#[derive(Metrics)]
#[metrics(scope = "transaction_pool")]
pub struct AllTransactionsMetrics {
/// Number of all transactions by hash in the pool
pub(crate) all_transactions_by_hash: Gauge,
/// Number of all transactions by id in the pool
pub(crate) all_transactions_by_id: Gauge,
/// Number of all transactions by all senders in the pool
pub(crate) all_transactions_by_all_senders: Gauge,
/// Number of blob transactions nonce gaps.
pub(crate) blob_transactions_nonce_gaps: Counter,
/// The current blob base fee
pub(crate) blob_base_fee: Gauge,
/// The current base fee
pub(crate) base_fee: Gauge,
}
/// Transaction pool validation metrics
#[derive(Metrics)]
#[metrics(scope = "transaction_pool")]
pub struct TxPoolValidationMetrics {
/// How long to successfully validate a blob
pub(crate) blob_validation_duration: Histogram,
}
/// Transaction pool validator task metrics
#[derive(Metrics)]
#[metrics(scope = "transaction_pool")]
pub struct TxPoolValidatorMetrics {
/// Number of in-flight validation job sends waiting for channel capacity
pub(crate) inflight_validation_jobs: Gauge,
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/transaction-pool/src/traits.rs | crates/transaction-pool/src/traits.rs | //! Transaction Pool Traits and Types
//!
//! This module defines the core abstractions for transaction pool implementations,
//! handling the complexity of different transaction representations across the
//! network, mempool, and the chain itself.
//!
//! ## Key Concepts
//!
//! ### Transaction Representations
//!
//! Transactions exist in different formats throughout their lifecycle:
//!
//! 1. **Consensus Format** ([`PoolTransaction::Consensus`])
//! - The canonical format stored in blocks
//! - Minimal size for efficient storage
//! - Example: EIP-4844 transactions store only blob hashes: ([`TransactionSigned::Eip4844`])
//!
//! 2. **Pooled Format** ([`PoolTransaction::Pooled`])
//! - Extended format for network propagation
//! - Includes additional validation data
//! - Example: EIP-4844 transactions include full blob sidecars: ([`PooledTransactionVariant`])
//!
//! ### Type Relationships
//!
//! ```text
//! NodePrimitives::SignedTx βββ NetworkPrimitives::BroadcastedTransaction
//! β β
//! β (consensus format) β (announced to peers)
//! β β
//! ββββββββββββ ββββββββββββββββββ
//! βΌ βΌ
//! PoolTransaction::Consensus
//! β β²
//! β β from pooled (always succeeds)
//! β β
//! βΌ β try_from consensus (may fail)
//! PoolTransaction::Pooled ββββ NetworkPrimitives::PooledTransaction
//! (sent on request)
//! ```
//!
//! ### Special Cases
//!
//! #### EIP-4844 Blob Transactions
//! - Consensus format: Only blob hashes (32 bytes each)
//! - Pooled format: Full blobs + commitments + proofs (large data per blob)
//! - Network behavior: Not broadcast automatically, only sent on explicit request
//!
//! #### Optimism Deposit Transactions
//! - Only exist in consensus format
//! - Never enter the mempool (system transactions)
//! - Conversion from consensus to pooled always fails
use crate::{
blobstore::BlobStoreError,
error::{InvalidPoolTransactionError, PoolError, PoolResult},
pool::{
state::SubPool, BestTransactionFilter, NewTransactionEvent, TransactionEvents,
TransactionListenerKind,
},
validate::ValidPoolTransaction,
AddedTransactionOutcome, AllTransactionsEvents,
};
use alloy_consensus::{error::ValueError, BlockHeader, Signed, Typed2718};
use alloy_eips::{
eip2718::{Encodable2718, WithEncoded},
eip2930::AccessList,
eip4844::{
env_settings::KzgSettings, BlobAndProofV1, BlobAndProofV2, BlobTransactionValidationError,
},
eip7594::BlobTransactionSidecarVariant,
eip7702::SignedAuthorization,
};
use alloy_primitives::{Address, Bytes, TxHash, TxKind, B256, U256};
use futures_util::{ready, Stream};
use reth_eth_wire_types::HandleMempoolData;
use reth_ethereum_primitives::{PooledTransactionVariant, TransactionSigned};
use reth_execution_types::ChangedAccount;
use reth_primitives_traits::{Block, InMemorySize, Recovered, SealedBlock, SignedTransaction};
use serde::{Deserialize, Serialize};
use std::{
collections::{HashMap, HashSet},
fmt,
fmt::Debug,
future::Future,
pin::Pin,
sync::Arc,
task::{Context, Poll},
};
use tokio::sync::mpsc::Receiver;
/// The `PeerId` type.
pub type PeerId = alloy_primitives::B512;
/// Helper type alias to access [`PoolTransaction`] for a given [`TransactionPool`].
pub type PoolTx<P> = <P as TransactionPool>::Transaction;
/// Helper type alias to access [`PoolTransaction::Consensus`] for a given [`TransactionPool`].
pub type PoolConsensusTx<P> = <<P as TransactionPool>::Transaction as PoolTransaction>::Consensus;
/// Helper type alias to access [`PoolTransaction::Pooled`] for a given [`TransactionPool`].
pub type PoolPooledTx<P> = <<P as TransactionPool>::Transaction as PoolTransaction>::Pooled;
/// General purpose abstraction of a transaction-pool.
///
/// This is intended to be used by API-consumers such as RPC that need inject new incoming,
/// unverified transactions. And by block production that needs to get transactions to execute in a
/// new block.
///
/// Note: This requires `Clone` for convenience, since it is assumed that this will be implemented
/// for a wrapped `Arc` type, see also [`Pool`](crate::Pool).
#[auto_impl::auto_impl(&, Arc)]
pub trait TransactionPool: Clone + Debug + Send + Sync {
/// The transaction type of the pool
type Transaction: EthPoolTransaction;
/// Returns stats about the pool and all sub-pools.
fn pool_size(&self) -> PoolSize;
/// Returns the block the pool is currently tracking.
///
/// This tracks the block that the pool has last seen.
fn block_info(&self) -> BlockInfo;
/// Imports an _external_ transaction.
///
/// This is intended to be used by the network to insert incoming transactions received over the
/// p2p network.
///
/// Consumer: P2P
fn add_external_transaction(
&self,
transaction: Self::Transaction,
) -> impl Future<Output = PoolResult<AddedTransactionOutcome>> + Send {
self.add_transaction(TransactionOrigin::External, transaction)
}
/// Imports all _external_ transactions
///
/// Consumer: Utility
fn add_external_transactions(
&self,
transactions: Vec<Self::Transaction>,
) -> impl Future<Output = Vec<PoolResult<AddedTransactionOutcome>>> + Send {
self.add_transactions(TransactionOrigin::External, transactions)
}
/// Adds an _unvalidated_ transaction into the pool and subscribe to state changes.
///
/// This is the same as [`TransactionPool::add_transaction`] but returns an event stream for the
/// given transaction.
///
/// Consumer: Custom
fn add_transaction_and_subscribe(
&self,
origin: TransactionOrigin,
transaction: Self::Transaction,
) -> impl Future<Output = PoolResult<TransactionEvents>> + Send;
/// Adds an _unvalidated_ transaction into the pool.
///
/// Consumer: RPC
fn add_transaction(
&self,
origin: TransactionOrigin,
transaction: Self::Transaction,
) -> impl Future<Output = PoolResult<AddedTransactionOutcome>> + Send;
/// Adds the given _unvalidated_ transactions into the pool.
///
/// All transactions will use the same `origin`.
///
/// Returns a list of results.
///
/// Consumer: RPC
fn add_transactions(
&self,
origin: TransactionOrigin,
transactions: Vec<Self::Transaction>,
) -> impl Future<Output = Vec<PoolResult<AddedTransactionOutcome>>> + Send;
/// Adds multiple _unvalidated_ transactions with individual origins.
///
/// Each transaction can have its own [`TransactionOrigin`].
///
/// Consumer: RPC
fn add_transactions_with_origins(
&self,
transactions: Vec<(TransactionOrigin, Self::Transaction)>,
) -> impl Future<Output = Vec<PoolResult<AddedTransactionOutcome>>> + Send;
/// Submit a consensus transaction directly to the pool
fn add_consensus_transaction(
&self,
tx: Recovered<<Self::Transaction as PoolTransaction>::Consensus>,
origin: TransactionOrigin,
) -> impl Future<Output = PoolResult<AddedTransactionOutcome>> + Send {
async move {
let tx_hash = *tx.tx_hash();
let pool_transaction = match Self::Transaction::try_from_consensus(tx) {
Ok(tx) => tx,
Err(e) => return Err(PoolError::other(tx_hash, e.to_string())),
};
self.add_transaction(origin, pool_transaction).await
}
}
/// Submit a consensus transaction and subscribe to event stream
fn add_consensus_transaction_and_subscribe(
&self,
tx: Recovered<<Self::Transaction as PoolTransaction>::Consensus>,
origin: TransactionOrigin,
) -> impl Future<Output = PoolResult<TransactionEvents>> + Send {
async move {
let tx_hash = *tx.tx_hash();
let pool_transaction = match Self::Transaction::try_from_consensus(tx) {
Ok(tx) => tx,
Err(e) => return Err(PoolError::other(tx_hash, e.to_string())),
};
self.add_transaction_and_subscribe(origin, pool_transaction).await
}
}
/// Returns a new transaction change event stream for the given transaction.
///
/// Returns `None` if the transaction is not in the pool.
fn transaction_event_listener(&self, tx_hash: TxHash) -> Option<TransactionEvents>;
/// Returns a new transaction change event stream for _all_ transactions in the pool.
fn all_transactions_event_listener(&self) -> AllTransactionsEvents<Self::Transaction>;
/// Returns a new Stream that yields transactions hashes for new __pending__ transactions
/// inserted into the pool that are allowed to be propagated.
///
/// Note: This is intended for networking and will __only__ yield transactions that are allowed
/// to be propagated over the network, see also [`TransactionListenerKind`].
///
/// Consumer: RPC/P2P
fn pending_transactions_listener(&self) -> Receiver<TxHash> {
self.pending_transactions_listener_for(TransactionListenerKind::PropagateOnly)
}
/// Returns a new [Receiver] that yields transactions hashes for new __pending__ transactions
/// inserted into the pending pool depending on the given [`TransactionListenerKind`] argument.
fn pending_transactions_listener_for(&self, kind: TransactionListenerKind) -> Receiver<TxHash>;
/// Returns a new stream that yields new valid transactions added to the pool.
fn new_transactions_listener(&self) -> Receiver<NewTransactionEvent<Self::Transaction>> {
self.new_transactions_listener_for(TransactionListenerKind::PropagateOnly)
}
/// Returns a new [Receiver] that yields blob "sidecars" (blobs w/ assoc. kzg
/// commitments/proofs) for eip-4844 transactions inserted into the pool
fn blob_transaction_sidecars_listener(&self) -> Receiver<NewBlobSidecar>;
/// Returns a new stream that yields new valid transactions added to the pool
/// depending on the given [`TransactionListenerKind`] argument.
fn new_transactions_listener_for(
&self,
kind: TransactionListenerKind,
) -> Receiver<NewTransactionEvent<Self::Transaction>>;
/// Returns a new Stream that yields new transactions added to the pending sub-pool.
///
/// This is a convenience wrapper around [`Self::new_transactions_listener`] that filters for
/// [`SubPool::Pending`](crate::SubPool).
fn new_pending_pool_transactions_listener(
&self,
) -> NewSubpoolTransactionStream<Self::Transaction> {
NewSubpoolTransactionStream::new(
self.new_transactions_listener_for(TransactionListenerKind::PropagateOnly),
SubPool::Pending,
)
}
/// Returns a new Stream that yields new transactions added to the basefee sub-pool.
///
/// This is a convenience wrapper around [`Self::new_transactions_listener`] that filters for
/// [`SubPool::BaseFee`](crate::SubPool).
fn new_basefee_pool_transactions_listener(
&self,
) -> NewSubpoolTransactionStream<Self::Transaction> {
NewSubpoolTransactionStream::new(self.new_transactions_listener(), SubPool::BaseFee)
}
/// Returns a new Stream that yields new transactions added to the queued-pool.
///
/// This is a convenience wrapper around [`Self::new_transactions_listener`] that filters for
/// [`SubPool::Queued`](crate::SubPool).
fn new_queued_transactions_listener(&self) -> NewSubpoolTransactionStream<Self::Transaction> {
NewSubpoolTransactionStream::new(self.new_transactions_listener(), SubPool::Queued)
}
/// Returns the _hashes_ of all transactions in the pool that are allowed to be propagated.
///
/// This excludes hashes that aren't allowed to be propagated.
///
/// Note: This returns a `Vec` but should guarantee that all hashes are unique.
///
/// Consumer: P2P
fn pooled_transaction_hashes(&self) -> Vec<TxHash>;
/// Returns only the first `max` hashes of transactions in the pool.
///
/// Consumer: P2P
fn pooled_transaction_hashes_max(&self, max: usize) -> Vec<TxHash>;
/// Returns the _full_ transaction objects all transactions in the pool that are allowed to be
/// propagated.
///
/// This is intended to be used by the network for the initial exchange of pooled transaction
/// _hashes_
///
/// Note: This returns a `Vec` but should guarantee that all transactions are unique.
///
/// Caution: In case of blob transactions, this does not include the sidecar.
///
/// Consumer: P2P
fn pooled_transactions(&self) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>>;
/// Returns only the first `max` transactions in the pool.
///
/// Consumer: P2P
fn pooled_transactions_max(
&self,
max: usize,
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>>;
/// Returns converted [`PooledTransactionVariant`] for the given transaction hashes that are
/// allowed to be propagated.
///
/// This adheres to the expected behavior of
/// [`GetPooledTransactions`](https://github.com/ethereum/devp2p/blob/master/caps/eth.md#getpooledtransactions-0x09):
///
/// The transactions must be in same order as in the request, but it is OK to skip transactions
/// which are not available.
///
/// If the transaction is a blob transaction, the sidecar will be included.
///
/// Consumer: P2P
fn get_pooled_transaction_elements(
&self,
tx_hashes: Vec<TxHash>,
limit: GetPooledTransactionLimit,
) -> Vec<<Self::Transaction as PoolTransaction>::Pooled>;
/// Returns the pooled transaction variant for the given transaction hash.
///
/// This adheres to the expected behavior of
/// [`GetPooledTransactions`](https://github.com/ethereum/devp2p/blob/master/caps/eth.md#getpooledtransactions-0x09):
///
/// If the transaction is a blob transaction, the sidecar will be included.
///
/// It is expected that this variant represents the valid p2p format for full transactions.
/// E.g. for EIP-4844 transactions this is the consensus transaction format with the blob
/// sidecar.
///
/// Consumer: P2P
fn get_pooled_transaction_element(
&self,
tx_hash: TxHash,
) -> Option<Recovered<<Self::Transaction as PoolTransaction>::Pooled>>;
/// Returns an iterator that yields transactions that are ready for block production.
///
/// Consumer: Block production
fn best_transactions(
&self,
) -> Box<dyn BestTransactions<Item = Arc<ValidPoolTransaction<Self::Transaction>>>>;
/// Returns an iterator that yields transactions that are ready for block production with the
/// given base fee and optional blob fee attributes.
///
/// Consumer: Block production
fn best_transactions_with_attributes(
&self,
best_transactions_attributes: BestTransactionsAttributes,
) -> Box<dyn BestTransactions<Item = Arc<ValidPoolTransaction<Self::Transaction>>>>;
/// Returns all transactions that can be included in the next block.
///
/// This is primarily used for the `txpool_` RPC namespace:
/// <https://geth.ethereum.org/docs/interacting-with-geth/rpc/ns-txpool> which distinguishes
/// between `pending` and `queued` transactions, where `pending` are transactions ready for
/// inclusion in the next block and `queued` are transactions that are ready for inclusion in
/// future blocks.
///
/// Consumer: RPC
fn pending_transactions(&self) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>>;
/// Returns first `max` transactions that can be included in the next block.
/// See <https://github.com/paradigmxyz/reth/issues/12767#issuecomment-2493223579>
///
/// Consumer: Block production
fn pending_transactions_max(
&self,
max: usize,
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>>;
/// Returns all transactions that can be included in _future_ blocks.
///
/// This and [`Self::pending_transactions`] are mutually exclusive.
///
/// Consumer: RPC
fn queued_transactions(&self) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>>;
/// Returns the number of transactions that are ready for inclusion in the next block and the
/// number of transactions that are ready for inclusion in future blocks: `(pending, queued)`.
fn pending_and_queued_txn_count(&self) -> (usize, usize);
/// Returns all transactions that are currently in the pool grouped by whether they are ready
/// for inclusion in the next block or not.
///
/// This is primarily used for the `txpool_` namespace: <https://geth.ethereum.org/docs/interacting-with-geth/rpc/ns-txpool>
///
/// Consumer: RPC
fn all_transactions(&self) -> AllPoolTransactions<Self::Transaction>;
/// Returns the _hashes_ of all transactions regardless of whether they can be propagated or
/// not.
///
/// Unlike [`Self::pooled_transaction_hashes`] this doesn't consider whether the transaction can
/// be propagated or not.
///
/// Note: This returns a `Vec` but should guarantee that all hashes are unique.
///
/// Consumer: Utility
fn all_transaction_hashes(&self) -> Vec<TxHash>;
/// Removes all transactions corresponding to the given hashes.
///
/// Note: This removes the transactions as if they got discarded (_not_ mined).
///
/// Consumer: Utility
fn remove_transactions(
&self,
hashes: Vec<TxHash>,
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>>;
/// Removes all transactions corresponding to the given hashes.
///
/// Also removes all _dependent_ transactions.
///
/// Consumer: Utility
fn remove_transactions_and_descendants(
&self,
hashes: Vec<TxHash>,
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>>;
/// Removes all transactions from the given sender
///
/// Consumer: Utility
fn remove_transactions_by_sender(
&self,
sender: Address,
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>>;
/// Retains only those hashes that are unknown to the pool.
/// In other words, removes all transactions from the given set that are currently present in
/// the pool. Returns hashes already known to the pool.
///
/// Consumer: P2P
fn retain_unknown<A>(&self, announcement: &mut A)
where
A: HandleMempoolData;
/// Returns if the transaction for the given hash is already included in this pool.
fn contains(&self, tx_hash: &TxHash) -> bool {
self.get(tx_hash).is_some()
}
/// Returns the transaction for the given hash.
fn get(&self, tx_hash: &TxHash) -> Option<Arc<ValidPoolTransaction<Self::Transaction>>>;
/// Returns all transactions objects for the given hashes.
///
/// Caution: This in case of blob transactions, this does not include the sidecar.
fn get_all(&self, txs: Vec<TxHash>) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>>;
/// Notify the pool about transactions that are propagated to peers.
///
/// Consumer: P2P
fn on_propagated(&self, txs: PropagatedTransactions);
/// Returns all transactions sent by a given user
fn get_transactions_by_sender(
&self,
sender: Address,
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>>;
/// Returns all pending transactions filtered by predicate
fn get_pending_transactions_with_predicate(
&self,
predicate: impl FnMut(&ValidPoolTransaction<Self::Transaction>) -> bool,
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>>;
/// Returns all pending transactions sent by a given user
fn get_pending_transactions_by_sender(
&self,
sender: Address,
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>>;
/// Returns all queued transactions sent by a given user
fn get_queued_transactions_by_sender(
&self,
sender: Address,
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>>;
/// Returns the highest transaction sent by a given user
fn get_highest_transaction_by_sender(
&self,
sender: Address,
) -> Option<Arc<ValidPoolTransaction<Self::Transaction>>>;
/// Returns the transaction with the highest nonce that is executable given the on chain nonce.
/// In other words the highest non nonce gapped transaction.
///
/// Note: The next pending pooled transaction must have the on chain nonce.
///
/// For example, for a given on chain nonce of `5`, the next transaction must have that nonce.
/// If the pool contains txs `[5,6,7]` this returns tx `7`.
/// If the pool contains txs `[6,7]` this returns `None` because the next valid nonce (5) is
/// missing, which means txs `[6,7]` are nonce gapped.
fn get_highest_consecutive_transaction_by_sender(
&self,
sender: Address,
on_chain_nonce: u64,
) -> Option<Arc<ValidPoolTransaction<Self::Transaction>>>;
/// Returns a transaction sent by a given user and a nonce
fn get_transaction_by_sender_and_nonce(
&self,
sender: Address,
nonce: u64,
) -> Option<Arc<ValidPoolTransaction<Self::Transaction>>>;
/// Returns all transactions that where submitted with the given [`TransactionOrigin`]
fn get_transactions_by_origin(
&self,
origin: TransactionOrigin,
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>>;
/// Returns all pending transactions filtered by [`TransactionOrigin`]
fn get_pending_transactions_by_origin(
&self,
origin: TransactionOrigin,
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>>;
/// Returns all transactions that where submitted as [`TransactionOrigin::Local`]
fn get_local_transactions(&self) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
self.get_transactions_by_origin(TransactionOrigin::Local)
}
/// Returns all transactions that where submitted as [`TransactionOrigin::Private`]
fn get_private_transactions(&self) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
self.get_transactions_by_origin(TransactionOrigin::Private)
}
/// Returns all transactions that where submitted as [`TransactionOrigin::External`]
fn get_external_transactions(&self) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
self.get_transactions_by_origin(TransactionOrigin::External)
}
/// Returns all pending transactions that where submitted as [`TransactionOrigin::Local`]
fn get_local_pending_transactions(&self) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
self.get_pending_transactions_by_origin(TransactionOrigin::Local)
}
/// Returns all pending transactions that where submitted as [`TransactionOrigin::Private`]
fn get_private_pending_transactions(
&self,
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
self.get_pending_transactions_by_origin(TransactionOrigin::Private)
}
/// Returns all pending transactions that where submitted as [`TransactionOrigin::External`]
fn get_external_pending_transactions(
&self,
) -> Vec<Arc<ValidPoolTransaction<Self::Transaction>>> {
self.get_pending_transactions_by_origin(TransactionOrigin::External)
}
/// Returns a set of all senders of transactions in the pool
fn unique_senders(&self) -> HashSet<Address>;
/// Returns the [`BlobTransactionSidecarVariant`] for the given transaction hash if it exists in
/// the blob store.
fn get_blob(
&self,
tx_hash: TxHash,
) -> Result<Option<Arc<BlobTransactionSidecarVariant>>, BlobStoreError>;
/// Returns all [`BlobTransactionSidecarVariant`] for the given transaction hashes if they
/// exists in the blob store.
///
/// This only returns the blobs that were found in the store.
/// If there's no blob it will not be returned.
fn get_all_blobs(
&self,
tx_hashes: Vec<TxHash>,
) -> Result<Vec<(TxHash, Arc<BlobTransactionSidecarVariant>)>, BlobStoreError>;
/// Returns the exact [`BlobTransactionSidecarVariant`] for the given transaction hashes in the
/// order they were requested.
///
/// Returns an error if any of the blobs are not found in the blob store.
fn get_all_blobs_exact(
&self,
tx_hashes: Vec<TxHash>,
) -> Result<Vec<Arc<BlobTransactionSidecarVariant>>, BlobStoreError>;
/// Return the [`BlobAndProofV1`]s for a list of blob versioned hashes.
fn get_blobs_for_versioned_hashes_v1(
&self,
versioned_hashes: &[B256],
) -> Result<Vec<Option<BlobAndProofV1>>, BlobStoreError>;
/// Return the [`BlobAndProofV2`]s for a list of blob versioned hashes.
/// Blobs and proofs are returned only if they are present for _all_ of the requested versioned
/// hashes.
fn get_blobs_for_versioned_hashes_v2(
&self,
versioned_hashes: &[B256],
) -> Result<Option<Vec<BlobAndProofV2>>, BlobStoreError>;
}
/// Extension for [`TransactionPool`] trait that allows to set the current block info.
#[auto_impl::auto_impl(&, Arc)]
pub trait TransactionPoolExt: TransactionPool {
/// Sets the current block info for the pool.
fn set_block_info(&self, info: BlockInfo);
/// Event listener for when the pool needs to be updated.
///
/// Implementers need to update the pool accordingly:
///
/// ## Fee changes
///
/// The [`CanonicalStateUpdate`] includes the base and blob fee of the pending block, which
/// affects the dynamic fee requirement of pending transactions in the pool.
///
/// ## EIP-4844 Blob transactions
///
/// Mined blob transactions need to be removed from the pool, but from the pool only. The blob
/// sidecar must not be removed from the blob store. Only after a blob transaction is
/// finalized, its sidecar is removed from the blob store. This ensures that in case of a reorg,
/// the sidecar is still available.
fn on_canonical_state_change<B>(&self, update: CanonicalStateUpdate<'_, B>)
where
B: Block;
/// Updates the accounts in the pool
fn update_accounts(&self, accounts: Vec<ChangedAccount>);
/// Deletes the blob sidecar for the given transaction from the blob store
fn delete_blob(&self, tx: B256);
/// Deletes multiple blob sidecars from the blob store
fn delete_blobs(&self, txs: Vec<B256>);
/// Maintenance function to cleanup blobs that are no longer needed.
fn cleanup_blobs(&self);
}
/// A Helper type that bundles all transactions in the pool.
#[derive(Debug, Clone)]
pub struct AllPoolTransactions<T: PoolTransaction> {
/// Transactions that are ready for inclusion in the next block.
pub pending: Vec<Arc<ValidPoolTransaction<T>>>,
/// Transactions that are ready for inclusion in _future_ blocks, but are currently parked,
/// because they depend on other transactions that are not yet included in the pool (nonce gap)
/// or otherwise blocked.
pub queued: Vec<Arc<ValidPoolTransaction<T>>>,
}
// === impl AllPoolTransactions ===
impl<T: PoolTransaction> AllPoolTransactions<T> {
/// Returns the combined number of all transactions.
pub const fn count(&self) -> usize {
self.pending.len() + self.queued.len()
}
/// Returns an iterator over all pending [`Recovered`] transactions.
pub fn pending_recovered(&self) -> impl Iterator<Item = Recovered<T::Consensus>> + '_ {
self.pending.iter().map(|tx| tx.transaction.clone().into_consensus())
}
/// Returns an iterator over all queued [`Recovered`] transactions.
pub fn queued_recovered(&self) -> impl Iterator<Item = Recovered<T::Consensus>> + '_ {
self.queued.iter().map(|tx| tx.transaction.clone().into_consensus())
}
/// Returns an iterator over all transactions, both pending and queued.
pub fn all(&self) -> impl Iterator<Item = Recovered<T::Consensus>> + '_ {
self.pending
.iter()
.chain(self.queued.iter())
.map(|tx| tx.transaction.clone().into_consensus())
}
}
impl<T: PoolTransaction> Default for AllPoolTransactions<T> {
fn default() -> Self {
Self { pending: Default::default(), queued: Default::default() }
}
}
/// Represents transactions that were propagated over the network.
#[derive(Debug, Clone, Eq, PartialEq, Default)]
pub struct PropagatedTransactions(pub HashMap<TxHash, Vec<PropagateKind>>);
/// Represents how a transaction was propagated over the network.
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum PropagateKind {
/// The full transaction object was sent to the peer.
///
/// This is equivalent to the `Transaction` message
Full(PeerId),
/// Only the Hash was propagated to the peer.
Hash(PeerId),
}
// === impl PropagateKind ===
impl PropagateKind {
/// Returns the peer the transaction was sent to
pub const fn peer(&self) -> &PeerId {
match self {
Self::Full(peer) | Self::Hash(peer) => peer,
}
}
/// Returns true if the transaction was sent as a full transaction
pub const fn is_full(&self) -> bool {
matches!(self, Self::Full(_))
}
/// Returns true if the transaction was sent as a hash
pub const fn is_hash(&self) -> bool {
matches!(self, Self::Hash(_))
}
}
impl From<PropagateKind> for PeerId {
fn from(value: PropagateKind) -> Self {
match value {
PropagateKind::Full(peer) | PropagateKind::Hash(peer) => peer,
}
}
}
/// This type represents a new blob sidecar that has been stored in the transaction pool's
/// blobstore; it includes the `TransactionHash` of the blob transaction along with the assoc.
/// sidecar (blobs, commitments, proofs)
#[derive(Debug, Clone)]
pub struct NewBlobSidecar {
/// hash of the EIP-4844 transaction.
pub tx_hash: TxHash,
/// the blob transaction sidecar.
pub sidecar: Arc<BlobTransactionSidecarVariant>,
}
/// Where the transaction originates from.
///
/// Depending on where the transaction was picked up, it affects how the transaction is handled
/// internally, e.g. limits for simultaneous transaction of one sender.
#[derive(Debug, Copy, Clone, PartialEq, Eq, Default, Deserialize, Serialize)]
pub enum TransactionOrigin {
/// Transaction is coming from a local source.
#[default]
Local,
/// Transaction has been received externally.
///
/// This is usually considered an "untrusted" source, for example received from another in the
/// network.
External,
/// Transaction is originated locally and is intended to remain private.
///
/// This type of transaction should not be propagated to the network. It's meant for
/// private usage within the local node only.
Private,
}
// === impl TransactionOrigin ===
impl TransactionOrigin {
/// Whether the transaction originates from a local source.
pub const fn is_local(&self) -> bool {
matches!(self, Self::Local)
}
/// Whether the transaction originates from an external source.
pub const fn is_external(&self) -> bool {
matches!(self, Self::External)
}
/// Whether the transaction originates from a private source.
pub const fn is_private(&self) -> bool {
matches!(self, Self::Private)
}
}
/// Represents the kind of update to the canonical state.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum PoolUpdateKind {
/// The update was due to a block commit.
Commit,
/// The update was due to a reorganization.
Reorg,
}
/// Represents changes after a new canonical block or range of canonical blocks was added to the
/// chain.
///
/// It is expected that this is only used if the added blocks are canonical to the pool's last known
/// block hash. In other words, the first added block of the range must be the child of the last
/// known block hash.
///
/// This is used to update the pool state accordingly.
#[derive(Clone, Debug)]
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | true |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/transaction-pool/src/pool/pending.rs | crates/transaction-pool/src/pool/pending.rs | //! Pending transactions
use crate::{
identifier::{SenderId, TransactionId},
pool::{
best::{BestTransactions, BestTransactionsWithFees},
size::SizeTracker,
},
Priority, SubPoolLimit, TransactionOrdering, ValidPoolTransaction,
};
use rustc_hash::{FxHashMap, FxHashSet};
use std::{
cmp::Ordering,
collections::{hash_map::Entry, BTreeMap},
ops::Bound::Unbounded,
sync::Arc,
};
use tokio::sync::broadcast;
/// A pool of validated and gapless transactions that are ready to be executed on the current state
/// and are waiting to be included in a block.
///
/// This pool distinguishes between `independent` transactions and pending transactions. A
/// transaction is `independent`, if it is in the pending pool, and it has the current on chain
/// nonce of the sender. Meaning `independent` transactions can be executed right away, other
/// pending transactions depend on at least one `independent` transaction.
///
/// Once an `independent` transaction was executed it *unlocks* the next nonce, if this transaction
/// is also pending, then this will be moved to the `independent` queue.
#[derive(Debug, Clone)]
pub struct PendingPool<T: TransactionOrdering> {
/// How to order transactions.
ordering: T,
/// Keeps track of transactions inserted in the pool.
///
/// This way we can determine when transactions were submitted to the pool.
submission_id: u64,
/// _All_ Transactions that are currently inside the pool grouped by their identifier.
by_id: BTreeMap<TransactionId, PendingTransaction<T>>,
/// The highest nonce transactions for each sender - like the `independent` set, but the
/// highest instead of lowest nonce.
highest_nonces: FxHashMap<SenderId, PendingTransaction<T>>,
/// Independent transactions that can be included directly and don't require other
/// transactions.
independent_transactions: FxHashMap<SenderId, PendingTransaction<T>>,
/// Keeps track of the size of this pool.
///
/// See also [`reth_primitives_traits::InMemorySize::size`].
size_of: SizeTracker,
/// Used to broadcast new transactions that have been added to the `PendingPool` to existing
/// `static_files` of this pool.
new_transaction_notifier: broadcast::Sender<PendingTransaction<T>>,
}
// === impl PendingPool ===
impl<T: TransactionOrdering> PendingPool<T> {
/// Create a new pending pool instance.
pub fn new(ordering: T) -> Self {
Self::with_buffer(ordering, 200)
}
/// Create a new pool instance with the given buffer capacity.
pub fn with_buffer(ordering: T, buffer_capacity: usize) -> Self {
let (new_transaction_notifier, _) = broadcast::channel(buffer_capacity);
Self {
ordering,
submission_id: 0,
by_id: Default::default(),
independent_transactions: Default::default(),
highest_nonces: Default::default(),
size_of: Default::default(),
new_transaction_notifier,
}
}
/// Clear all transactions from the pool without resetting other values.
/// Used for atomic reordering during basefee update.
///
/// # Returns
///
/// Returns all transactions by id.
fn clear_transactions(&mut self) -> BTreeMap<TransactionId, PendingTransaction<T>> {
self.independent_transactions.clear();
self.highest_nonces.clear();
self.size_of.reset();
std::mem::take(&mut self.by_id)
}
/// Returns an iterator over all transactions that are _currently_ ready.
///
/// 1. The iterator _always_ returns transactions in order: it never returns a transaction with
/// an unsatisfied dependency and only returns them if dependency transaction were yielded
/// previously. In other words: the nonces of transactions with the same sender will _always_
/// increase by exactly 1.
///
/// The order of transactions which satisfy (1.) is determined by their computed priority: a
/// transaction with a higher priority is returned before a transaction with a lower priority.
///
/// If two transactions have the same priority score, then the transactions which spent more
/// time in pool (were added earlier) are returned first.
///
/// NOTE: while this iterator returns transaction that pool considers valid at this point, they
/// could potentially become invalid at point of execution. Therefore, this iterator
/// provides a way to mark transactions that the consumer of this iterator considers invalid. In
/// which case the transaction's subgraph is also automatically marked invalid, See (1.).
/// Invalid transactions are skipped.
pub fn best(&self) -> BestTransactions<T> {
BestTransactions {
all: self.by_id.clone(),
independent: self.independent_transactions.values().cloned().collect(),
invalid: Default::default(),
new_transaction_receiver: Some(self.new_transaction_notifier.subscribe()),
last_priority: None,
skip_blobs: false,
}
}
/// Same as `best` but only returns transactions that satisfy the given basefee and blobfee.
pub(crate) fn best_with_basefee_and_blobfee(
&self,
base_fee: u64,
base_fee_per_blob_gas: u64,
) -> BestTransactionsWithFees<T> {
BestTransactionsWithFees { best: self.best(), base_fee, base_fee_per_blob_gas }
}
/// Same as `best` but also includes the given unlocked transactions.
///
/// This mimics the [`Self::add_transaction`] method, but does not insert the transactions into
/// pool but only into the returned iterator.
///
/// Note: this does not insert the unlocked transactions into the pool.
///
/// # Panics
///
/// if the transaction is already included
pub(crate) fn best_with_unlocked_and_attributes(
&self,
unlocked: Vec<Arc<ValidPoolTransaction<T::Transaction>>>,
base_fee: u64,
base_fee_per_blob_gas: u64,
) -> BestTransactionsWithFees<T> {
let mut best = self.best();
let mut submission_id = self.submission_id;
for tx in unlocked {
submission_id += 1;
debug_assert!(!best.all.contains_key(tx.id()), "transaction already included");
let priority = self.ordering.priority(&tx.transaction, base_fee);
let tx_id = *tx.id();
let transaction = PendingTransaction { submission_id, transaction: tx, priority };
if best.ancestor(&tx_id).is_none() {
best.independent.insert(transaction.clone());
}
best.all.insert(tx_id, transaction);
}
BestTransactionsWithFees { best, base_fee, base_fee_per_blob_gas }
}
/// Returns an iterator over all transactions in the pool
pub(crate) fn all(
&self,
) -> impl ExactSizeIterator<Item = Arc<ValidPoolTransaction<T::Transaction>>> + '_ {
self.by_id.values().map(|tx| tx.transaction.clone())
}
/// Updates the pool with the new blob fee. Removes
/// from the subpool all transactions and their dependents that no longer satisfy the given
/// blob fee (`tx.max_blob_fee < blob_fee`).
///
/// Note: the transactions are not returned in a particular order.
///
/// # Returns
///
/// Removed transactions that no longer satisfy the blob fee.
pub(crate) fn update_blob_fee(
&mut self,
blob_fee: u128,
) -> Vec<Arc<ValidPoolTransaction<T::Transaction>>> {
// Create a collection for removed transactions.
let mut removed = Vec::new();
// Drain and iterate over all transactions.
let mut transactions_iter = self.clear_transactions().into_iter().peekable();
while let Some((id, tx)) = transactions_iter.next() {
if tx.transaction.is_eip4844() && tx.transaction.max_fee_per_blob_gas() < Some(blob_fee)
{
// Add this tx to the removed collection since it no longer satisfies the blob fee
// condition. Decrease the total pool size.
removed.push(Arc::clone(&tx.transaction));
// Remove all dependent transactions.
'this: while let Some((next_id, next_tx)) = transactions_iter.peek() {
if next_id.sender != id.sender {
break 'this
}
removed.push(Arc::clone(&next_tx.transaction));
transactions_iter.next();
}
} else {
self.size_of += tx.transaction.size();
self.update_independents_and_highest_nonces(&tx);
self.by_id.insert(id, tx);
}
}
removed
}
/// Updates the pool with the new base fee. Reorders transactions by new priorities. Removes
/// from the subpool all transactions and their dependents that no longer satisfy the given
/// base fee (`tx.fee < base_fee`).
///
/// Note: the transactions are not returned in a particular order.
///
/// # Returns
///
/// Removed transactions that no longer satisfy the base fee.
pub(crate) fn update_base_fee(
&mut self,
base_fee: u64,
) -> Vec<Arc<ValidPoolTransaction<T::Transaction>>> {
// Create a collection for removed transactions.
let mut removed = Vec::new();
// Drain and iterate over all transactions.
let mut transactions_iter = self.clear_transactions().into_iter().peekable();
while let Some((id, mut tx)) = transactions_iter.next() {
if tx.transaction.max_fee_per_gas() < base_fee as u128 {
// Add this tx to the removed collection since it no longer satisfies the base fee
// condition. Decrease the total pool size.
removed.push(Arc::clone(&tx.transaction));
// Remove all dependent transactions.
'this: while let Some((next_id, next_tx)) = transactions_iter.peek() {
if next_id.sender != id.sender {
break 'this
}
removed.push(Arc::clone(&next_tx.transaction));
transactions_iter.next();
}
} else {
// Re-insert the transaction with new priority.
tx.priority = self.ordering.priority(&tx.transaction.transaction, base_fee);
self.size_of += tx.transaction.size();
self.update_independents_and_highest_nonces(&tx);
self.by_id.insert(id, tx);
}
}
removed
}
/// Updates the independent transaction and highest nonces set, assuming the given transaction
/// is being _added_ to the pool.
fn update_independents_and_highest_nonces(&mut self, tx: &PendingTransaction<T>) {
match self.highest_nonces.entry(tx.transaction.sender_id()) {
Entry::Occupied(mut entry) => {
if entry.get().transaction.nonce() < tx.transaction.nonce() {
*entry.get_mut() = tx.clone();
}
}
Entry::Vacant(entry) => {
entry.insert(tx.clone());
}
}
match self.independent_transactions.entry(tx.transaction.sender_id()) {
Entry::Occupied(mut entry) => {
if entry.get().transaction.nonce() > tx.transaction.nonce() {
*entry.get_mut() = tx.clone();
}
}
Entry::Vacant(entry) => {
entry.insert(tx.clone());
}
}
}
/// Returns the ancestor the given transaction, the transaction with `nonce - 1`.
///
/// Note: for a transaction with nonce higher than the current on chain nonce this will always
/// return an ancestor since all transaction in this pool are gapless.
fn ancestor(&self, id: &TransactionId) -> Option<&PendingTransaction<T>> {
self.get(&id.unchecked_ancestor()?)
}
/// Adds a new transactions to the pending queue.
///
/// # Panics
///
/// if the transaction is already included
pub fn add_transaction(
&mut self,
tx: Arc<ValidPoolTransaction<T::Transaction>>,
base_fee: u64,
) {
debug_assert!(
!self.contains(tx.id()),
"transaction already included {:?}",
self.get(tx.id()).unwrap().transaction
);
// keep track of size
self.size_of += tx.size();
let tx_id = *tx.id();
let submission_id = self.next_id();
let priority = self.ordering.priority(&tx.transaction, base_fee);
let tx = PendingTransaction { submission_id, transaction: tx, priority };
self.update_independents_and_highest_nonces(&tx);
// send the new transaction to any existing pendingpool static file iterators
if self.new_transaction_notifier.receiver_count() > 0 {
let _ = self.new_transaction_notifier.send(tx.clone());
}
self.by_id.insert(tx_id, tx);
}
/// Removes the transaction from the pool.
///
/// Note: If the transaction has a descendant transaction
/// it will advance it to the best queue.
pub(crate) fn remove_transaction(
&mut self,
id: &TransactionId,
) -> Option<Arc<ValidPoolTransaction<T::Transaction>>> {
if let Some(lowest) = self.independent_transactions.get(&id.sender) {
if lowest.transaction.nonce() == id.nonce {
self.independent_transactions.remove(&id.sender);
// mark the next as independent if it exists
if let Some(unlocked) = self.get(&id.descendant()) {
self.independent_transactions.insert(id.sender, unlocked.clone());
}
}
}
let tx = self.by_id.remove(id)?;
self.size_of -= tx.transaction.size();
if let Some(highest) = self.highest_nonces.get(&id.sender) {
if highest.transaction.nonce() == id.nonce {
self.highest_nonces.remove(&id.sender);
}
if let Some(ancestor) = self.ancestor(id) {
self.highest_nonces.insert(id.sender, ancestor.clone());
}
}
Some(tx.transaction)
}
const fn next_id(&mut self) -> u64 {
let id = self.submission_id;
self.submission_id = self.submission_id.wrapping_add(1);
id
}
/// Traverses the pool, starting at the highest nonce set, removing the transactions which
/// would put the pool under the specified limits.
///
/// This attempts to remove transactions by roughly the same amount for each sender. This is
/// done by removing the highest-nonce transactions for each sender.
///
/// If the `remove_locals` flag is unset, transactions will be removed per-sender until a
/// local transaction is the highest nonce transaction for that sender. If all senders have a
/// local highest-nonce transaction, the pool will not be truncated further.
///
/// Otherwise, if the `remove_locals` flag is set, transactions will be removed per-sender
/// until the pool is under the given limits.
///
/// Any removed transactions will be added to the `end_removed` vector.
pub fn remove_to_limit(
&mut self,
limit: &SubPoolLimit,
remove_locals: bool,
end_removed: &mut Vec<Arc<ValidPoolTransaction<T::Transaction>>>,
) {
// This serves as a termination condition for the loop - it represents the number of
// _valid_ unique senders that might have descendants in the pool.
//
// If `remove_locals` is false, a value of zero means that there are no non-local txs in the
// pool that can be removed.
//
// If `remove_locals` is true, a value of zero means that there are no txs in the pool that
// can be removed.
let mut non_local_senders = self.highest_nonces.len();
// keeps track of unique senders from previous iterations, to understand how many unique
// senders were removed in the last iteration
let mut unique_senders = self.highest_nonces.len();
// keeps track of which senders we've marked as local
let mut local_senders = FxHashSet::default();
// keep track of transactions to remove and how many have been removed so far
let original_length = self.len();
let mut removed = Vec::new();
let mut total_removed = 0;
// track total `size` of transactions to remove
let original_size = self.size();
let mut total_size = 0;
loop {
// check how many unique senders were removed last iteration
let unique_removed = unique_senders - self.highest_nonces.len();
// the new number of unique senders
unique_senders = self.highest_nonces.len();
non_local_senders -= unique_removed;
// we can reuse the temp array
removed.clear();
// we prefer removing transactions with lower ordering
let mut worst_transactions = self.highest_nonces.values().collect::<Vec<_>>();
worst_transactions.sort();
// loop through the highest nonces set, removing transactions until we reach the limit
for tx in worst_transactions {
// return early if the pool is under limits
if !limit.is_exceeded(original_length - total_removed, original_size - total_size) ||
non_local_senders == 0
{
// need to remove remaining transactions before exiting
for id in &removed {
if let Some(tx) = self.remove_transaction(id) {
end_removed.push(tx);
}
}
return
}
if !remove_locals && tx.transaction.is_local() {
let sender_id = tx.transaction.sender_id();
if local_senders.insert(sender_id) {
non_local_senders -= 1;
}
continue
}
total_size += tx.transaction.size();
total_removed += 1;
removed.push(*tx.transaction.id());
}
// remove the transactions from this iteration
for id in &removed {
if let Some(tx) = self.remove_transaction(id) {
end_removed.push(tx);
}
}
// return if either the pool is under limits or there are no more _eligible_
// transactions to remove
if !self.exceeds(limit) || non_local_senders == 0 {
return
}
}
}
/// Truncates the pool to the given [`SubPoolLimit`], removing transactions until the subpool
/// limits are met.
///
/// This attempts to remove transactions by roughly the same amount for each sender. For more
/// information on this exact process see docs for
/// [`remove_to_limit`](PendingPool::remove_to_limit).
///
/// This first truncates all of the non-local transactions in the pool. If the subpool is still
/// not under the limit, this truncates the entire pool, including non-local transactions. The
/// removed transactions are returned.
pub fn truncate_pool(
&mut self,
limit: SubPoolLimit,
) -> Vec<Arc<ValidPoolTransaction<T::Transaction>>> {
let mut removed = Vec::new();
// return early if the pool is already under the limits
if !self.exceeds(&limit) {
return removed
}
// first truncate only non-local transactions, returning if the pool end up under the limit
self.remove_to_limit(&limit, false, &mut removed);
if !self.exceeds(&limit) {
return removed
}
// now repeat for local transactions, since local transactions must be removed now for the
// pool to be under the limit
self.remove_to_limit(&limit, true, &mut removed);
removed
}
/// Returns true if the pool exceeds the given limit
#[inline]
pub(crate) fn exceeds(&self, limit: &SubPoolLimit) -> bool {
limit.is_exceeded(self.len(), self.size())
}
/// The reported size of all transactions in this pool.
pub(crate) fn size(&self) -> usize {
self.size_of.into()
}
/// Number of transactions in the entire pool
pub(crate) fn len(&self) -> usize {
self.by_id.len()
}
/// All transactions grouped by id
pub const fn by_id(&self) -> &BTreeMap<TransactionId, PendingTransaction<T>> {
&self.by_id
}
/// Independent transactions
pub const fn independent_transactions(&self) -> &FxHashMap<SenderId, PendingTransaction<T>> {
&self.independent_transactions
}
/// Subscribes to new transactions
pub fn new_transaction_receiver(&self) -> broadcast::Receiver<PendingTransaction<T>> {
self.new_transaction_notifier.subscribe()
}
/// Whether the pool is empty
#[cfg(test)]
pub(crate) fn is_empty(&self) -> bool {
self.by_id.is_empty()
}
/// Returns `true` if the transaction with the given id is already included in this pool.
pub(crate) fn contains(&self, id: &TransactionId) -> bool {
self.by_id.contains_key(id)
}
/// Get transactions by sender
pub(crate) fn get_txs_by_sender(&self, sender: SenderId) -> Vec<TransactionId> {
self.iter_txs_by_sender(sender).copied().collect()
}
/// Returns an iterator over all transaction with the sender id
pub(crate) fn iter_txs_by_sender(
&self,
sender: SenderId,
) -> impl Iterator<Item = &TransactionId> + '_ {
self.by_id
.range((sender.start_bound(), Unbounded))
.take_while(move |(other, _)| sender == other.sender)
.map(|(tx_id, _)| tx_id)
}
/// Retrieves a transaction with the given ID from the pool, if it exists.
fn get(&self, id: &TransactionId) -> Option<&PendingTransaction<T>> {
self.by_id.get(id)
}
/// Returns a reference to the independent transactions in the pool
#[cfg(test)]
pub(crate) const fn independent(&self) -> &FxHashMap<SenderId, PendingTransaction<T>> {
&self.independent_transactions
}
/// Asserts that the bijection between `by_id` and `all` is valid.
#[cfg(any(test, feature = "test-utils"))]
pub(crate) fn assert_invariants(&self) {
assert!(
self.independent_transactions.len() <= self.by_id.len(),
"independent.len() > all.len()"
);
assert!(
self.highest_nonces.len() <= self.by_id.len(),
"independent_descendants.len() > all.len()"
);
assert_eq!(
self.highest_nonces.len(),
self.independent_transactions.len(),
"independent.len() = independent_descendants.len()"
);
}
}
/// A transaction that is ready to be included in a block.
#[derive(Debug)]
pub struct PendingTransaction<T: TransactionOrdering> {
/// Identifier that tags when transaction was submitted in the pool.
pub submission_id: u64,
/// Actual transaction.
pub transaction: Arc<ValidPoolTransaction<T::Transaction>>,
/// The priority value assigned by the used `Ordering` function.
pub priority: Priority<T::PriorityValue>,
}
impl<T: TransactionOrdering> PendingTransaction<T> {
/// The next transaction of the sender: `nonce + 1`
pub fn unlocks(&self) -> TransactionId {
self.transaction.transaction_id.descendant()
}
}
impl<T: TransactionOrdering> Clone for PendingTransaction<T> {
fn clone(&self) -> Self {
Self {
submission_id: self.submission_id,
transaction: Arc::clone(&self.transaction),
priority: self.priority.clone(),
}
}
}
impl<T: TransactionOrdering> Eq for PendingTransaction<T> {}
impl<T: TransactionOrdering> PartialEq<Self> for PendingTransaction<T> {
fn eq(&self, other: &Self) -> bool {
self.cmp(other) == Ordering::Equal
}
}
impl<T: TransactionOrdering> PartialOrd<Self> for PendingTransaction<T> {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl<T: TransactionOrdering> Ord for PendingTransaction<T> {
fn cmp(&self, other: &Self) -> Ordering {
// This compares by `priority` and only if two tx have the exact same priority this compares
// the unique `submission_id`. This ensures that transactions with same priority are not
// equal, so they're not replaced in the set
self.priority
.cmp(&other.priority)
.then_with(|| other.submission_id.cmp(&self.submission_id))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
test_utils::{MockOrdering, MockTransaction, MockTransactionFactory, MockTransactionSet},
PoolTransaction,
};
use alloy_consensus::{Transaction, TxType};
use alloy_primitives::address;
use std::collections::HashSet;
#[test]
fn test_enforce_basefee() {
let mut f = MockTransactionFactory::default();
let mut pool = PendingPool::new(MockOrdering::default());
let tx = f.validated_arc(MockTransaction::eip1559().inc_price());
pool.add_transaction(tx.clone(), 0);
assert!(pool.contains(tx.id()));
assert_eq!(pool.len(), 1);
let removed = pool.update_base_fee(0);
assert!(removed.is_empty());
let removed = pool.update_base_fee((tx.max_fee_per_gas() + 1) as u64);
assert_eq!(removed.len(), 1);
assert!(pool.is_empty());
}
#[test]
fn test_enforce_basefee_descendant() {
let mut f = MockTransactionFactory::default();
let mut pool = PendingPool::new(MockOrdering::default());
let t = MockTransaction::eip1559().inc_price_by(10);
let root_tx = f.validated_arc(t.clone());
pool.add_transaction(root_tx.clone(), 0);
let descendant_tx = f.validated_arc(t.inc_nonce().decr_price());
pool.add_transaction(descendant_tx.clone(), 0);
assert!(pool.contains(root_tx.id()));
assert!(pool.contains(descendant_tx.id()));
assert_eq!(pool.len(), 2);
assert_eq!(pool.independent_transactions.len(), 1);
assert_eq!(pool.highest_nonces.len(), 1);
let removed = pool.update_base_fee(0);
assert!(removed.is_empty());
// two dependent tx in the pool with decreasing fee
{
let mut pool2 = pool.clone();
let removed = pool2.update_base_fee((descendant_tx.max_fee_per_gas() + 1) as u64);
assert_eq!(removed.len(), 1);
assert_eq!(pool2.len(), 1);
// descendant got popped
assert!(pool2.contains(root_tx.id()));
assert!(!pool2.contains(descendant_tx.id()));
}
// remove root transaction via fee
let removed = pool.update_base_fee((root_tx.max_fee_per_gas() + 1) as u64);
assert_eq!(removed.len(), 2);
assert!(pool.is_empty());
pool.assert_invariants();
}
#[test]
fn evict_worst() {
let mut f = MockTransactionFactory::default();
let mut pool = PendingPool::new(MockOrdering::default());
let t = MockTransaction::eip1559();
pool.add_transaction(f.validated_arc(t.clone()), 0);
let t2 = MockTransaction::eip1559().inc_price_by(10);
pool.add_transaction(f.validated_arc(t2), 0);
// First transaction should be evicted.
assert_eq!(
pool.highest_nonces.values().min().map(|tx| *tx.transaction.hash()),
Some(*t.hash())
);
// truncate pool with max size = 1, ensure it's the same transaction
let removed = pool.truncate_pool(SubPoolLimit { max_txs: 1, max_size: usize::MAX });
assert_eq!(removed.len(), 1);
assert_eq!(removed[0].hash(), t.hash());
}
#[test]
fn correct_independent_descendants() {
// this test ensures that we set the right highest nonces set for each sender
let mut f = MockTransactionFactory::default();
let mut pool = PendingPool::new(MockOrdering::default());
let a_sender = address!("0x000000000000000000000000000000000000000a");
let b_sender = address!("0x000000000000000000000000000000000000000b");
let c_sender = address!("0x000000000000000000000000000000000000000c");
let d_sender = address!("0x000000000000000000000000000000000000000d");
// create a chain of transactions by sender A, B, C
let mut tx_set = MockTransactionSet::dependent(a_sender, 0, 4, TxType::Eip1559);
let a = tx_set.clone().into_vec();
let b = MockTransactionSet::dependent(b_sender, 0, 3, TxType::Eip1559).into_vec();
tx_set.extend(b.clone());
// C has the same number of txs as B
let c = MockTransactionSet::dependent(c_sender, 0, 3, TxType::Eip1559).into_vec();
tx_set.extend(c.clone());
let d = MockTransactionSet::dependent(d_sender, 0, 1, TxType::Eip1559).into_vec();
tx_set.extend(d.clone());
// add all the transactions to the pool
let all_txs = tx_set.into_vec();
for tx in all_txs {
pool.add_transaction(f.validated_arc(tx), 0);
}
pool.assert_invariants();
// the independent set is the roots of each of these tx chains, these are the highest
// nonces for each sender
let expected_highest_nonces = [d[0].clone(), c[2].clone(), b[2].clone(), a[3].clone()]
.iter()
.map(|tx| (tx.sender(), tx.nonce()))
.collect::<HashSet<_>>();
let actual_highest_nonces = pool
.highest_nonces
.values()
.map(|tx| (tx.transaction.sender(), tx.transaction.nonce()))
.collect::<HashSet<_>>();
assert_eq!(expected_highest_nonces, actual_highest_nonces);
pool.assert_invariants();
}
#[test]
fn truncate_by_sender() {
// This test ensures that transactions are removed from the pending pool by sender.
let mut f = MockTransactionFactory::default();
let mut pool = PendingPool::new(MockOrdering::default());
// Addresses for simulated senders A, B, C, and D.
let a = address!("0x000000000000000000000000000000000000000a");
let b = address!("0x000000000000000000000000000000000000000b");
let c = address!("0x000000000000000000000000000000000000000c");
let d = address!("0x000000000000000000000000000000000000000d");
// Create transaction chains for senders A, B, C, and D.
let a_txs = MockTransactionSet::sequential_transactions_by_sender(a, 4, TxType::Eip1559);
let b_txs = MockTransactionSet::sequential_transactions_by_sender(b, 3, TxType::Eip1559);
let c_txs = MockTransactionSet::sequential_transactions_by_sender(c, 3, TxType::Eip1559);
let d_txs = MockTransactionSet::sequential_transactions_by_sender(d, 1, TxType::Eip1559);
// Set up expected pending transactions.
let expected_pending = vec![
a_txs.transactions[0].clone(),
b_txs.transactions[0].clone(),
c_txs.transactions[0].clone(),
a_txs.transactions[1].clone(),
]
.into_iter()
.map(|tx| (tx.sender(), tx.nonce()))
.collect::<HashSet<_>>();
// Set up expected removed transactions.
let expected_removed = vec![
d_txs.transactions[0].clone(),
c_txs.transactions[2].clone(),
b_txs.transactions[2].clone(),
a_txs.transactions[3].clone(),
c_txs.transactions[1].clone(),
b_txs.transactions[1].clone(),
a_txs.transactions[2].clone(),
]
.into_iter()
.map(|tx| (tx.sender(), tx.nonce()))
.collect::<HashSet<_>>();
// Consolidate all transactions into a single vector.
let all_txs =
[a_txs.into_vec(), b_txs.into_vec(), c_txs.into_vec(), d_txs.into_vec()].concat();
// Add all the transactions to the pool.
for tx in all_txs {
pool.add_transaction(f.validated_arc(tx), 0);
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | true |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/transaction-pool/src/pool/update.rs | crates/transaction-pool/src/pool/update.rs | //! Support types for updating the pool.
use crate::{
identifier::TransactionId, pool::state::SubPool, PoolTransaction, ValidPoolTransaction,
};
use std::sync::Arc;
/// A change of the transaction's location
///
/// NOTE: this guarantees that `current` and `destination` differ.
#[derive(Debug)]
pub(crate) struct PoolUpdate {
/// Internal tx id.
pub(crate) id: TransactionId,
/// Where the transaction is currently held.
pub(crate) current: SubPool,
/// Where to move the transaction to.
pub(crate) destination: Destination,
}
/// Where to move an existing transaction.
#[derive(Debug)]
pub(crate) enum Destination {
/// Discard the transaction.
Discard,
/// Move transaction to pool
Pool(SubPool),
}
impl From<SubPool> for Destination {
fn from(sub_pool: SubPool) -> Self {
Self::Pool(sub_pool)
}
}
/// Tracks the result after updating the pool
#[derive(Debug)]
pub(crate) struct UpdateOutcome<T: PoolTransaction> {
/// transactions promoted to the pending pool
pub(crate) promoted: Vec<Arc<ValidPoolTransaction<T>>>,
/// transaction that failed and were discarded
pub(crate) discarded: Vec<Arc<ValidPoolTransaction<T>>>,
}
impl<T: PoolTransaction> Default for UpdateOutcome<T> {
fn default() -> Self {
Self { promoted: vec![], discarded: vec![] }
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/transaction-pool/src/pool/state.rs | crates/transaction-pool/src/pool/state.rs | bitflags::bitflags! {
/// Marker to represents the current state of a transaction in the pool and from which the corresponding sub-pool is derived, depending on what bits are set.
///
/// This mirrors [erigon's ephemeral state field](https://github.com/ledgerwatch/erigon/wiki/Transaction-Pool-Design#ordering-function).
///
/// The [SubPool] the transaction belongs to is derived from its state and determined by the following sequential checks:
///
/// - If it satisfies the [TxState::PENDING_POOL_BITS] it belongs in the pending sub-pool: [SubPool::Pending].
/// - If it is an EIP-4844 blob transaction it belongs in the blob sub-pool: [SubPool::Blob].
/// - If it satisfies the [TxState::BASE_FEE_POOL_BITS] it belongs in the base fee sub-pool: [SubPool::BaseFee].
///
/// Otherwise, it belongs in the queued sub-pool: [SubPool::Queued].
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Default, PartialOrd, Ord)]
pub(crate) struct TxState: u8 {
/// Set to `1` if all ancestor transactions are pending.
const NO_PARKED_ANCESTORS = 0b10000000;
/// Set to `1` if the transaction is either the next transaction of the sender (on chain nonce == tx.nonce) or all prior transactions are also present in the pool.
const NO_NONCE_GAPS = 0b01000000;
/// Bit derived from the sender's balance.
///
/// Set to `1` if the sender's balance can cover the maximum cost for this transaction (`feeCap * gasLimit + value`).
/// This includes cumulative costs of prior transactions, which ensures that the sender has enough funds for all max cost of prior transactions.
const ENOUGH_BALANCE = 0b00100000;
/// Bit set to true if the transaction has a lower gas limit than the block's gas limit.
const NOT_TOO_MUCH_GAS = 0b00010000;
/// Covers the Dynamic fee requirement.
///
/// Set to 1 if `maxFeePerGas` of the transaction meets the requirement of the pending block.
const ENOUGH_FEE_CAP_BLOCK = 0b00001000;
/// Covers the dynamic blob fee requirement, only relevant for EIP-4844 blob transactions.
///
/// Set to 1 if `maxBlobFeePerGas` of the transaction meets the requirement of the pending block.
const ENOUGH_BLOB_FEE_CAP_BLOCK = 0b00000100;
/// Marks whether the transaction is a blob transaction.
///
/// We track this as part of the state for simplicity, since blob transactions are handled differently and are mutually exclusive with normal transactions.
const BLOB_TRANSACTION = 0b00000010;
const PENDING_POOL_BITS = Self::NO_PARKED_ANCESTORS.bits() | Self::NO_NONCE_GAPS.bits() | Self::ENOUGH_BALANCE.bits() | Self::NOT_TOO_MUCH_GAS.bits() | Self::ENOUGH_FEE_CAP_BLOCK.bits() | Self::ENOUGH_BLOB_FEE_CAP_BLOCK.bits();
const BASE_FEE_POOL_BITS = Self::NO_PARKED_ANCESTORS.bits() | Self::NO_NONCE_GAPS.bits() | Self::ENOUGH_BALANCE.bits() | Self::NOT_TOO_MUCH_GAS.bits();
const QUEUED_POOL_BITS = Self::NO_PARKED_ANCESTORS.bits();
const BLOB_POOL_BITS = Self::BLOB_TRANSACTION.bits();
}
}
impl TxState {
/// The state of a transaction is considered `pending`, if the transaction has:
/// - _No_ parked ancestors
/// - enough balance
/// - enough fee cap
/// - enough blob fee cap
#[inline]
pub(crate) const fn is_pending(&self) -> bool {
self.bits() >= Self::PENDING_POOL_BITS.bits()
}
/// Whether this transaction is a blob transaction.
#[inline]
pub(crate) const fn is_blob(&self) -> bool {
self.contains(Self::BLOB_TRANSACTION)
}
/// Returns `true` if the transaction has a nonce gap.
#[inline]
pub(crate) const fn has_nonce_gap(&self) -> bool {
!self.intersects(Self::NO_NONCE_GAPS)
}
}
/// Identifier for the transaction Sub-pool
#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
#[repr(u8)]
pub enum SubPool {
/// The queued sub-pool contains transactions that are not ready to be included in the next
/// block because they have missing or queued ancestors or the sender the lacks funds to
/// execute this transaction.
Queued = 0,
/// The base-fee sub-pool contains transactions that are not ready to be included in the next
/// block because they don't meet the base fee requirement.
BaseFee,
/// The blob sub-pool contains all blob transactions that are __not__ pending.
Blob,
/// The pending sub-pool contains transactions that are ready to be included in the next block.
Pending,
}
impl SubPool {
/// Whether this transaction is to be moved to the pending sub-pool.
#[inline]
pub const fn is_pending(&self) -> bool {
matches!(self, Self::Pending)
}
/// Whether this transaction is in the queued pool.
#[inline]
pub const fn is_queued(&self) -> bool {
matches!(self, Self::Queued)
}
/// Whether this transaction is in the base fee pool.
#[inline]
pub const fn is_base_fee(&self) -> bool {
matches!(self, Self::BaseFee)
}
/// Whether this transaction is in the blob pool.
#[inline]
pub const fn is_blob(&self) -> bool {
matches!(self, Self::Blob)
}
/// Returns whether this is a promotion depending on the current sub-pool location.
#[inline]
pub fn is_promoted(&self, other: Self) -> bool {
self > &other
}
}
impl From<TxState> for SubPool {
fn from(value: TxState) -> Self {
if value.is_pending() {
Self::Pending
} else if value.is_blob() {
// all _non-pending_ blob transactions are in the blob sub-pool
Self::Blob
} else if value.bits() < TxState::BASE_FEE_POOL_BITS.bits() {
Self::Queued
} else {
Self::BaseFee
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_promoted() {
assert!(SubPool::BaseFee.is_promoted(SubPool::Queued));
assert!(SubPool::Pending.is_promoted(SubPool::BaseFee));
assert!(SubPool::Pending.is_promoted(SubPool::Queued));
assert!(SubPool::Pending.is_promoted(SubPool::Blob));
assert!(!SubPool::BaseFee.is_promoted(SubPool::Pending));
assert!(!SubPool::Queued.is_promoted(SubPool::BaseFee));
}
#[test]
fn test_tx_state() {
let mut state = TxState::default();
state |= TxState::NO_NONCE_GAPS;
assert!(state.intersects(TxState::NO_NONCE_GAPS))
}
#[test]
fn test_tx_queued() {
let state = TxState::default();
assert_eq!(SubPool::Queued, state.into());
let state = TxState::NO_PARKED_ANCESTORS |
TxState::NO_NONCE_GAPS |
TxState::NOT_TOO_MUCH_GAS |
TxState::ENOUGH_FEE_CAP_BLOCK;
assert_eq!(SubPool::Queued, state.into());
}
#[test]
fn test_tx_pending() {
let state = TxState::PENDING_POOL_BITS;
assert_eq!(SubPool::Pending, state.into());
assert!(state.is_pending());
let bits = 0b11111100;
let state = TxState::from_bits(bits).unwrap();
assert_eq!(SubPool::Pending, state.into());
assert!(state.is_pending());
let bits = 0b11111110;
let state = TxState::from_bits(bits).unwrap();
assert_eq!(SubPool::Pending, state.into());
assert!(state.is_pending());
}
#[test]
fn test_blob() {
let mut state = TxState::PENDING_POOL_BITS;
state.insert(TxState::BLOB_TRANSACTION);
assert!(state.is_pending());
state.remove(TxState::ENOUGH_BLOB_FEE_CAP_BLOCK);
assert!(state.is_blob());
assert!(!state.is_pending());
state.insert(TxState::ENOUGH_BLOB_FEE_CAP_BLOCK);
state.remove(TxState::ENOUGH_FEE_CAP_BLOCK);
assert!(state.is_blob());
assert!(!state.is_pending());
}
#[test]
fn test_tx_state_no_nonce_gap() {
let mut state = TxState::default();
state |= TxState::NO_NONCE_GAPS;
assert!(!state.has_nonce_gap());
}
#[test]
fn test_tx_state_with_nonce_gap() {
let state = TxState::default();
assert!(state.has_nonce_gap());
}
#[test]
fn test_tx_state_enough_balance() {
let mut state = TxState::default();
state.insert(TxState::ENOUGH_BALANCE);
assert!(state.contains(TxState::ENOUGH_BALANCE));
}
#[test]
fn test_tx_state_not_too_much_gas() {
let mut state = TxState::default();
state.insert(TxState::NOT_TOO_MUCH_GAS);
assert!(state.contains(TxState::NOT_TOO_MUCH_GAS));
}
#[test]
fn test_tx_state_enough_fee_cap_block() {
let mut state = TxState::default();
state.insert(TxState::ENOUGH_FEE_CAP_BLOCK);
assert!(state.contains(TxState::ENOUGH_FEE_CAP_BLOCK));
}
#[test]
fn test_tx_base_fee() {
let state = TxState::BASE_FEE_POOL_BITS;
assert_eq!(SubPool::BaseFee, state.into());
}
#[test]
fn test_blob_transaction_only() {
let state = TxState::BLOB_TRANSACTION;
assert_eq!(SubPool::Blob, state.into());
assert!(state.is_blob());
assert!(!state.is_pending());
}
#[test]
fn test_blob_transaction_with_base_fee_bits() {
let mut state = TxState::BASE_FEE_POOL_BITS;
state.insert(TxState::BLOB_TRANSACTION);
assert_eq!(SubPool::Blob, state.into());
assert!(state.is_blob());
assert!(!state.is_pending());
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/transaction-pool/src/pool/listener.rs | crates/transaction-pool/src/pool/listener.rs | //! Listeners for the transaction-pool
use crate::{
pool::events::{FullTransactionEvent, NewTransactionEvent, TransactionEvent},
traits::{NewBlobSidecar, PropagateKind},
PoolTransaction, ValidPoolTransaction,
};
use alloy_primitives::{TxHash, B256};
use futures_util::Stream;
use std::{
collections::{hash_map::Entry, HashMap},
pin::Pin,
sync::Arc,
task::{Context, Poll},
};
use tokio::sync::mpsc::{
self as mpsc, error::TrySendError, Receiver, Sender, UnboundedReceiver, UnboundedSender,
};
use tracing::debug;
/// The size of the event channel used to propagate transaction events.
const TX_POOL_EVENT_CHANNEL_SIZE: usize = 1024;
/// A Stream that receives [`TransactionEvent`] only for the transaction with the given hash.
#[derive(Debug)]
#[must_use = "streams do nothing unless polled"]
pub struct TransactionEvents {
hash: TxHash,
events: UnboundedReceiver<TransactionEvent>,
}
impl TransactionEvents {
/// The hash for this transaction
pub const fn hash(&self) -> TxHash {
self.hash
}
}
impl Stream for TransactionEvents {
type Item = TransactionEvent;
fn poll_next(
self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Option<Self::Item>> {
self.get_mut().events.poll_recv(cx)
}
}
/// A Stream that receives [`FullTransactionEvent`] for _all_ transaction.
#[derive(Debug)]
#[must_use = "streams do nothing unless polled"]
pub struct AllTransactionsEvents<T: PoolTransaction> {
pub(crate) events: Receiver<FullTransactionEvent<T>>,
}
impl<T: PoolTransaction> AllTransactionsEvents<T> {
/// Create a new instance of this stream.
pub const fn new(events: Receiver<FullTransactionEvent<T>>) -> Self {
Self { events }
}
}
impl<T: PoolTransaction> Stream for AllTransactionsEvents<T> {
type Item = FullTransactionEvent<T>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
self.get_mut().events.poll_recv(cx)
}
}
/// A type that broadcasts [`TransactionEvent`] to installed listeners.
///
/// This is essentially a multi-producer, multi-consumer channel where each event is broadcast to
/// all active receivers.
#[derive(Debug)]
pub(crate) struct PoolEventBroadcast<T: PoolTransaction> {
/// All listeners for all transaction events.
all_events_broadcaster: AllPoolEventsBroadcaster<T>,
/// All listeners for events for a certain transaction hash.
broadcasters_by_hash: HashMap<TxHash, PoolEventBroadcaster>,
}
impl<T: PoolTransaction> Default for PoolEventBroadcast<T> {
fn default() -> Self {
Self {
all_events_broadcaster: AllPoolEventsBroadcaster::default(),
broadcasters_by_hash: HashMap::default(),
}
}
}
impl<T: PoolTransaction> PoolEventBroadcast<T> {
/// Calls the broadcast callback with the `PoolEventBroadcaster` that belongs to the hash.
fn broadcast_event(
&mut self,
hash: &TxHash,
event: TransactionEvent,
pool_event: FullTransactionEvent<T>,
) {
// Broadcast to all listeners for the transaction hash.
if let Entry::Occupied(mut sink) = self.broadcasters_by_hash.entry(*hash) {
sink.get_mut().broadcast(event.clone());
if sink.get().is_empty() || event.is_final() {
sink.remove();
}
}
// Broadcast to all listeners for all transactions.
self.all_events_broadcaster.broadcast(pool_event);
}
/// Returns true if no listeners are installed
#[inline]
pub(crate) fn is_empty(&self) -> bool {
self.all_events_broadcaster.is_empty() && self.broadcasters_by_hash.is_empty()
}
/// Create a new subscription for the given transaction hash.
pub(crate) fn subscribe(&mut self, tx_hash: TxHash) -> TransactionEvents {
let (tx, rx) = tokio::sync::mpsc::unbounded_channel();
match self.broadcasters_by_hash.entry(tx_hash) {
Entry::Occupied(mut entry) => {
entry.get_mut().senders.push(tx);
}
Entry::Vacant(entry) => {
entry.insert(PoolEventBroadcaster { senders: vec![tx] });
}
};
TransactionEvents { hash: tx_hash, events: rx }
}
/// Create a new subscription for all transactions.
pub(crate) fn subscribe_all(&mut self) -> AllTransactionsEvents<T> {
let (tx, rx) = tokio::sync::mpsc::channel(TX_POOL_EVENT_CHANNEL_SIZE);
self.all_events_broadcaster.senders.push(tx);
AllTransactionsEvents::new(rx)
}
/// Notify listeners about a transaction that was added to the pending queue.
pub(crate) fn pending(&mut self, tx: &TxHash, replaced: Option<Arc<ValidPoolTransaction<T>>>) {
self.broadcast_event(tx, TransactionEvent::Pending, FullTransactionEvent::Pending(*tx));
if let Some(replaced) = replaced {
// notify listeners that this transaction was replaced
self.replaced(replaced, *tx);
}
}
/// Notify listeners about a transaction that was replaced.
pub(crate) fn replaced(&mut self, tx: Arc<ValidPoolTransaction<T>>, replaced_by: TxHash) {
let transaction = Arc::clone(&tx);
self.broadcast_event(
tx.hash(),
TransactionEvent::Replaced(replaced_by),
FullTransactionEvent::Replaced { transaction, replaced_by },
);
}
/// Notify listeners about a transaction that was added to the queued pool.
pub(crate) fn queued(&mut self, tx: &TxHash) {
self.broadcast_event(tx, TransactionEvent::Queued, FullTransactionEvent::Queued(*tx));
}
/// Notify listeners about a transaction that was propagated.
pub(crate) fn propagated(&mut self, tx: &TxHash, peers: Vec<PropagateKind>) {
let peers = Arc::new(peers);
self.broadcast_event(
tx,
TransactionEvent::Propagated(Arc::clone(&peers)),
FullTransactionEvent::Propagated(peers),
);
}
/// Notify listeners about all discarded transactions.
#[inline]
pub(crate) fn discarded_many(&mut self, discarded: &[Arc<ValidPoolTransaction<T>>]) {
if self.is_empty() {
return
}
for tx in discarded {
self.discarded(tx.hash());
}
}
/// Notify listeners about a transaction that was discarded.
pub(crate) fn discarded(&mut self, tx: &TxHash) {
self.broadcast_event(tx, TransactionEvent::Discarded, FullTransactionEvent::Discarded(*tx));
}
/// Notify listeners about a transaction that was invalid.
pub(crate) fn invalid(&mut self, tx: &TxHash) {
self.broadcast_event(tx, TransactionEvent::Invalid, FullTransactionEvent::Invalid(*tx));
}
/// Notify listeners that the transaction was mined
pub(crate) fn mined(&mut self, tx: &TxHash, block_hash: B256) {
self.broadcast_event(
tx,
TransactionEvent::Mined(block_hash),
FullTransactionEvent::Mined { tx_hash: *tx, block_hash },
);
}
}
/// All Sender half(s) of the event channels for all transactions.
///
/// This mimics [`tokio::sync::broadcast`] but uses separate channels.
#[derive(Debug)]
struct AllPoolEventsBroadcaster<T: PoolTransaction> {
/// Corresponding sender half(s) for event listener channel
senders: Vec<Sender<FullTransactionEvent<T>>>,
}
impl<T: PoolTransaction> Default for AllPoolEventsBroadcaster<T> {
fn default() -> Self {
Self { senders: Vec::new() }
}
}
impl<T: PoolTransaction> AllPoolEventsBroadcaster<T> {
// Broadcast an event to all listeners. Dropped listeners are silently evicted.
fn broadcast(&mut self, event: FullTransactionEvent<T>) {
self.senders.retain(|sender| match sender.try_send(event.clone()) {
Ok(_) | Err(TrySendError::Full(_)) => true,
Err(TrySendError::Closed(_)) => false,
})
}
/// Returns true if there are no listeners installed.
#[inline]
const fn is_empty(&self) -> bool {
self.senders.is_empty()
}
}
/// All Sender half(s) of the event channels for a specific transaction.
///
/// This mimics [`tokio::sync::broadcast`] but uses separate channels and is unbounded.
#[derive(Default, Debug)]
struct PoolEventBroadcaster {
/// Corresponding sender half(s) for event listener channel
senders: Vec<UnboundedSender<TransactionEvent>>,
}
impl PoolEventBroadcaster {
/// Returns `true` if there are no more listeners remaining.
const fn is_empty(&self) -> bool {
self.senders.is_empty()
}
// Broadcast an event to all listeners. Dropped listeners are silently evicted.
fn broadcast(&mut self, event: TransactionEvent) {
self.senders.retain(|sender| sender.send(event.clone()).is_ok())
}
}
/// An active listener for new pending transactions.
#[derive(Debug)]
pub(crate) struct PendingTransactionHashListener {
pub(crate) sender: mpsc::Sender<TxHash>,
/// Whether to include transactions that should not be propagated over the network.
pub(crate) kind: TransactionListenerKind,
}
impl PendingTransactionHashListener {
/// Attempts to send all hashes to the listener.
///
/// Returns false if the channel is closed (receiver dropped)
pub(crate) fn send_all(&self, hashes: impl IntoIterator<Item = TxHash>) -> bool {
for tx_hash in hashes {
match self.sender.try_send(tx_hash) {
Ok(()) => {}
Err(err) => {
return if matches!(err, mpsc::error::TrySendError::Full(_)) {
debug!(
target: "txpool",
"[{:?}] failed to send pending tx; channel full",
tx_hash,
);
true
} else {
false
}
}
}
}
true
}
}
/// An active listener for new pending transactions.
#[derive(Debug)]
pub(crate) struct TransactionListener<T: PoolTransaction> {
pub(crate) sender: mpsc::Sender<NewTransactionEvent<T>>,
/// Whether to include transactions that should not be propagated over the network.
pub(crate) kind: TransactionListenerKind,
}
impl<T: PoolTransaction> TransactionListener<T> {
/// Attempts to send the event to the listener.
///
/// Returns false if the channel is closed (receiver dropped)
pub(crate) fn send(&self, event: NewTransactionEvent<T>) -> bool {
self.send_all(std::iter::once(event))
}
/// Attempts to send all events to the listener.
///
/// Returns false if the channel is closed (receiver dropped)
pub(crate) fn send_all(
&self,
events: impl IntoIterator<Item = NewTransactionEvent<T>>,
) -> bool {
for event in events {
match self.sender.try_send(event) {
Ok(()) => {}
Err(err) => {
return if let mpsc::error::TrySendError::Full(event) = err {
debug!(
target: "txpool",
"[{:?}] failed to send pending tx; channel full",
event.transaction.hash(),
);
true
} else {
false
}
}
}
}
true
}
}
/// An active listener for new blobs
#[derive(Debug)]
pub(crate) struct BlobTransactionSidecarListener {
pub(crate) sender: mpsc::Sender<NewBlobSidecar>,
}
/// Determines what kind of new transactions should be emitted by a stream of transactions.
///
/// This gives control whether to include transactions that are allowed to be propagated.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum TransactionListenerKind {
/// Any new pending transactions
All,
/// Only transactions that are allowed to be propagated.
///
/// See also [`ValidPoolTransaction`]
PropagateOnly,
}
impl TransactionListenerKind {
/// Returns true if we're only interested in transactions that are allowed to be propagated.
#[inline]
pub const fn is_propagate_only(&self) -> bool {
matches!(self, Self::PropagateOnly)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/transaction-pool/src/pool/mod.rs | crates/transaction-pool/src/pool/mod.rs | //! Transaction Pool internals.
//!
//! Incoming transactions are validated before they enter the pool first. The validation outcome can
//! have 3 states:
//!
//! 1. Transaction can _never_ be valid
//! 2. Transaction is _currently_ valid
//! 3. Transaction is _currently_ invalid, but could potentially become valid in the future
//!
//! However, (2.) and (3.) of a transaction can only be determined on the basis of the current
//! state, whereas (1.) holds indefinitely. This means once the state changes (2.) and (3.) the
//! state of a transaction needs to be reevaluated again.
//!
//! The transaction pool is responsible for storing new, valid transactions and providing the next
//! best transactions sorted by their priority. Where priority is determined by the transaction's
//! score ([`TransactionOrdering`]).
//!
//! Furthermore, the following characteristics fall under (3.):
//!
//! a) Nonce of a transaction is higher than the expected nonce for the next transaction of its
//! sender. A distinction is made here whether multiple transactions from the same sender have
//! gapless nonce increments.
//!
//! a)(1) If _no_ transaction is missing in a chain of multiple
//! transactions from the same sender (all nonce in row), all of them can in principle be executed
//! on the current state one after the other.
//!
//! a)(2) If there's a nonce gap, then all
//! transactions after the missing transaction are blocked until the missing transaction arrives.
//!
//! b) Transaction does not meet the dynamic fee cap requirement introduced by EIP-1559: The
//! fee cap of the transaction needs to be no less than the base fee of block.
//!
//!
//! In essence the transaction pool is made of three separate sub-pools:
//!
//! - Pending Pool: Contains all transactions that are valid on the current state and satisfy (3.
//! a)(1): _No_ nonce gaps. A _pending_ transaction is considered _ready_ when it has the lowest
//! nonce of all transactions from the same sender. Once a _ready_ transaction with nonce `n` has
//! been executed, the next highest transaction from the same sender `n + 1` becomes ready.
//!
//! - Queued Pool: Contains all transactions that are currently blocked by missing transactions:
//! (3. a)(2): _With_ nonce gaps or due to lack of funds.
//!
//! - Basefee Pool: To account for the dynamic base fee requirement (3. b) which could render an
//! EIP-1559 and all subsequent transactions of the sender currently invalid.
//!
//! The classification of transactions is always dependent on the current state that is changed as
//! soon as a new block is mined. Once a new block is mined, the account changeset must be applied
//! to the transaction pool.
//!
//!
//! Depending on the use case, consumers of the [`TransactionPool`](crate::traits::TransactionPool)
//! are interested in (2.) and/or (3.).
//! A generic [`TransactionPool`](crate::traits::TransactionPool) that only handles transactions.
//!
//! This Pool maintains two separate sub-pools for (2.) and (3.)
//!
//! ## Terminology
//!
//! - _Pending_: pending transactions are transactions that fall under (2.). These transactions can
//! currently be executed and are stored in the pending sub-pool
//! - _Queued_: queued transactions are transactions that fall under category (3.). Those
//! transactions are _currently_ waiting for state changes that eventually move them into
//! category (2.) and become pending.
use crate::{
blobstore::BlobStore,
error::{PoolError, PoolErrorKind, PoolResult},
identifier::{SenderId, SenderIdentifiers, TransactionId},
metrics::BlobStoreMetrics,
pool::{
listener::{
BlobTransactionSidecarListener, PendingTransactionHashListener, PoolEventBroadcast,
TransactionListener,
},
state::SubPool,
txpool::{SenderInfo, TxPool},
update::UpdateOutcome,
},
traits::{
AllPoolTransactions, BestTransactionsAttributes, BlockInfo, GetPooledTransactionLimit,
NewBlobSidecar, PoolSize, PoolTransaction, PropagatedTransactions, TransactionOrigin,
},
validate::{TransactionValidationOutcome, ValidPoolTransaction, ValidTransaction},
CanonicalStateUpdate, EthPoolTransaction, PoolConfig, TransactionOrdering,
TransactionValidator,
};
use alloy_primitives::{Address, TxHash, B256};
use best::BestTransactions;
use parking_lot::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard};
use reth_eth_wire_types::HandleMempoolData;
use reth_execution_types::ChangedAccount;
use alloy_eips::{eip7594::BlobTransactionSidecarVariant, Typed2718};
use reth_primitives_traits::Recovered;
use rustc_hash::FxHashMap;
use std::{collections::HashSet, fmt, sync::Arc, time::Instant};
use tokio::sync::mpsc;
use tracing::{debug, trace, warn};
mod events;
pub use best::{BestTransactionFilter, BestTransactionsWithPrioritizedSenders};
pub use blob::{blob_tx_priority, fee_delta, BlobOrd, BlobTransactions};
pub use events::{FullTransactionEvent, NewTransactionEvent, TransactionEvent};
pub use listener::{AllTransactionsEvents, TransactionEvents, TransactionListenerKind};
pub use parked::{BasefeeOrd, ParkedOrd, ParkedPool, QueuedOrd};
pub use pending::PendingPool;
use reth_primitives_traits::Block;
mod best;
mod blob;
mod listener;
mod parked;
pub mod pending;
pub(crate) mod size;
pub(crate) mod state;
pub mod txpool;
mod update;
/// Bound on number of pending transactions from `reth_network::TransactionsManager` to buffer.
pub const PENDING_TX_LISTENER_BUFFER_SIZE: usize = 2048;
/// Bound on number of new transactions from `reth_network::TransactionsManager` to buffer.
pub const NEW_TX_LISTENER_BUFFER_SIZE: usize = 1024;
const BLOB_SIDECAR_LISTENER_BUFFER_SIZE: usize = 512;
/// Transaction pool internals.
pub struct PoolInner<V, T, S>
where
T: TransactionOrdering,
{
/// Internal mapping of addresses to plain ints.
identifiers: RwLock<SenderIdentifiers>,
/// Transaction validator.
validator: V,
/// Storage for blob transactions
blob_store: S,
/// The internal pool that manages all transactions.
pool: RwLock<TxPool<T>>,
/// Pool settings.
config: PoolConfig,
/// Manages listeners for transaction state change events.
event_listener: RwLock<PoolEventBroadcast<T::Transaction>>,
/// Listeners for new _full_ pending transactions.
pending_transaction_listener: Mutex<Vec<PendingTransactionHashListener>>,
/// Listeners for new transactions added to the pool.
transaction_listener: Mutex<Vec<TransactionListener<T::Transaction>>>,
/// Listener for new blob transaction sidecars added to the pool.
blob_transaction_sidecar_listener: Mutex<Vec<BlobTransactionSidecarListener>>,
/// Metrics for the blob store
blob_store_metrics: BlobStoreMetrics,
}
// === impl PoolInner ===
impl<V, T, S> PoolInner<V, T, S>
where
V: TransactionValidator,
T: TransactionOrdering<Transaction = <V as TransactionValidator>::Transaction>,
S: BlobStore,
{
/// Create a new transaction pool instance.
pub fn new(validator: V, ordering: T, blob_store: S, config: PoolConfig) -> Self {
Self {
identifiers: Default::default(),
validator,
event_listener: Default::default(),
pool: RwLock::new(TxPool::new(ordering, config.clone())),
pending_transaction_listener: Default::default(),
transaction_listener: Default::default(),
blob_transaction_sidecar_listener: Default::default(),
config,
blob_store,
blob_store_metrics: Default::default(),
}
}
/// Returns the configured blob store.
pub const fn blob_store(&self) -> &S {
&self.blob_store
}
/// Returns stats about the size of the pool.
pub fn size(&self) -> PoolSize {
self.get_pool_data().size()
}
/// Returns the currently tracked block
pub fn block_info(&self) -> BlockInfo {
self.get_pool_data().block_info()
}
/// Sets the currently tracked block
pub fn set_block_info(&self, info: BlockInfo) {
self.pool.write().set_block_info(info)
}
/// Returns the internal [`SenderId`] for this address
pub fn get_sender_id(&self, addr: Address) -> SenderId {
self.identifiers.write().sender_id_or_create(addr)
}
/// Returns the internal [`SenderId`]s for the given addresses.
pub fn get_sender_ids(&self, addrs: impl IntoIterator<Item = Address>) -> Vec<SenderId> {
self.identifiers.write().sender_ids_or_create(addrs)
}
/// Returns all senders in the pool
pub fn unique_senders(&self) -> HashSet<Address> {
self.get_pool_data().unique_senders()
}
/// Converts the changed accounts to a map of sender ids to sender info (internal identifier
/// used for accounts)
fn changed_senders(
&self,
accs: impl Iterator<Item = ChangedAccount>,
) -> FxHashMap<SenderId, SenderInfo> {
let mut identifiers = self.identifiers.write();
accs.into_iter()
.map(|acc| {
let ChangedAccount { address, nonce, balance } = acc;
let sender_id = identifiers.sender_id_or_create(address);
(sender_id, SenderInfo { state_nonce: nonce, balance })
})
.collect()
}
/// Get the config the pool was configured with.
pub const fn config(&self) -> &PoolConfig {
&self.config
}
/// Get the validator reference.
pub const fn validator(&self) -> &V {
&self.validator
}
/// Adds a new transaction listener to the pool that gets notified about every new _pending_
/// transaction inserted into the pool
pub fn add_pending_listener(&self, kind: TransactionListenerKind) -> mpsc::Receiver<TxHash> {
let (sender, rx) = mpsc::channel(self.config.pending_tx_listener_buffer_size);
let listener = PendingTransactionHashListener { sender, kind };
self.pending_transaction_listener.lock().push(listener);
rx
}
/// Adds a new transaction listener to the pool that gets notified about every new transaction.
pub fn add_new_transaction_listener(
&self,
kind: TransactionListenerKind,
) -> mpsc::Receiver<NewTransactionEvent<T::Transaction>> {
let (sender, rx) = mpsc::channel(self.config.new_tx_listener_buffer_size);
let listener = TransactionListener { sender, kind };
self.transaction_listener.lock().push(listener);
rx
}
/// Adds a new blob sidecar listener to the pool that gets notified about every new
/// eip4844 transaction's blob sidecar.
pub fn add_blob_sidecar_listener(&self) -> mpsc::Receiver<NewBlobSidecar> {
let (sender, rx) = mpsc::channel(BLOB_SIDECAR_LISTENER_BUFFER_SIZE);
let listener = BlobTransactionSidecarListener { sender };
self.blob_transaction_sidecar_listener.lock().push(listener);
rx
}
/// If the pool contains the transaction, this adds a new listener that gets notified about
/// transaction events.
pub fn add_transaction_event_listener(&self, tx_hash: TxHash) -> Option<TransactionEvents> {
self.get_pool_data()
.contains(&tx_hash)
.then(|| self.event_listener.write().subscribe(tx_hash))
}
/// Adds a listener for all transaction events.
pub fn add_all_transactions_event_listener(&self) -> AllTransactionsEvents<T::Transaction> {
self.event_listener.write().subscribe_all()
}
/// Returns a read lock to the pool's data.
pub fn get_pool_data(&self) -> RwLockReadGuard<'_, TxPool<T>> {
self.pool.read()
}
/// Returns hashes of transactions in the pool that can be propagated.
pub fn pooled_transactions_hashes(&self) -> Vec<TxHash> {
self.get_pool_data()
.all()
.transactions_iter()
.filter(|tx| tx.propagate)
.map(|tx| *tx.hash())
.collect()
}
/// Returns transactions in the pool that can be propagated
pub fn pooled_transactions(&self) -> Vec<Arc<ValidPoolTransaction<T::Transaction>>> {
self.get_pool_data().all().transactions_iter().filter(|tx| tx.propagate).cloned().collect()
}
/// Returns only the first `max` transactions in the pool that can be propagated.
pub fn pooled_transactions_max(
&self,
max: usize,
) -> Vec<Arc<ValidPoolTransaction<T::Transaction>>> {
self.get_pool_data()
.all()
.transactions_iter()
.filter(|tx| tx.propagate)
.take(max)
.cloned()
.collect()
}
/// Converts the internally tracked transaction to the pooled format.
///
/// If the transaction is an EIP-4844 transaction, the blob sidecar is fetched from the blob
/// store and attached to the transaction.
fn to_pooled_transaction(
&self,
transaction: Arc<ValidPoolTransaction<T::Transaction>>,
) -> Option<Recovered<<<V as TransactionValidator>::Transaction as PoolTransaction>::Pooled>>
where
<V as TransactionValidator>::Transaction: EthPoolTransaction,
{
if transaction.is_eip4844() {
let sidecar = self.blob_store.get(*transaction.hash()).ok()??;
transaction.transaction.clone().try_into_pooled_eip4844(sidecar)
} else {
transaction
.transaction
.clone()
.try_into_pooled()
.inspect_err(|err| {
debug!(
target: "txpool", %err,
"failed to convert transaction to pooled element; skipping",
);
})
.ok()
}
}
/// Returns pooled transactions for the given transaction hashes that are allowed to be
/// propagated.
pub fn get_pooled_transaction_elements(
&self,
tx_hashes: Vec<TxHash>,
limit: GetPooledTransactionLimit,
) -> Vec<<<V as TransactionValidator>::Transaction as PoolTransaction>::Pooled>
where
<V as TransactionValidator>::Transaction: EthPoolTransaction,
{
let transactions = self.get_all_propagatable(tx_hashes);
let mut elements = Vec::with_capacity(transactions.len());
let mut size = 0;
for transaction in transactions {
let encoded_len = transaction.encoded_length();
let Some(pooled) = self.to_pooled_transaction(transaction) else {
continue;
};
size += encoded_len;
elements.push(pooled.into_inner());
if limit.exceeds(size) {
break
}
}
elements
}
/// Returns converted pooled transaction for the given transaction hash.
pub fn get_pooled_transaction_element(
&self,
tx_hash: TxHash,
) -> Option<Recovered<<<V as TransactionValidator>::Transaction as PoolTransaction>::Pooled>>
where
<V as TransactionValidator>::Transaction: EthPoolTransaction,
{
self.get(&tx_hash).and_then(|tx| self.to_pooled_transaction(tx))
}
/// Updates the entire pool after a new block was executed.
pub fn on_canonical_state_change<B>(&self, update: CanonicalStateUpdate<'_, B>)
where
B: Block,
{
trace!(target: "txpool", ?update, "updating pool on canonical state change");
let block_info = update.block_info();
let CanonicalStateUpdate {
new_tip, changed_accounts, mined_transactions, update_kind, ..
} = update;
self.validator.on_new_head_block(new_tip);
let changed_senders = self.changed_senders(changed_accounts.into_iter());
// update the pool
let outcome = self.pool.write().on_canonical_state_change(
block_info,
mined_transactions,
changed_senders,
update_kind,
);
// This will discard outdated transactions based on the account's nonce
self.delete_discarded_blobs(outcome.discarded.iter());
// notify listeners about updates
self.notify_on_new_state(outcome);
}
/// Performs account updates on the pool.
///
/// This will either promote or discard transactions based on the new account state.
///
/// This should be invoked when the pool drifted and accounts are updated manually
pub fn update_accounts(&self, accounts: Vec<ChangedAccount>) {
let changed_senders = self.changed_senders(accounts.into_iter());
let UpdateOutcome { promoted, discarded } =
self.pool.write().update_accounts(changed_senders);
// Notify about promoted pending transactions (similar to notify_on_new_state)
if !promoted.is_empty() {
self.pending_transaction_listener.lock().retain_mut(|listener| {
let promoted_hashes = promoted.iter().filter_map(|tx| {
if listener.kind.is_propagate_only() && !tx.propagate {
None
} else {
Some(*tx.hash())
}
});
listener.send_all(promoted_hashes)
});
// in this case we should also emit promoted transactions in full
self.transaction_listener.lock().retain_mut(|listener| {
let promoted_txs = promoted.iter().filter_map(|tx| {
if listener.kind.is_propagate_only() && !tx.propagate {
None
} else {
Some(NewTransactionEvent::pending(tx.clone()))
}
});
listener.send_all(promoted_txs)
});
}
{
let mut listener = self.event_listener.write();
if !listener.is_empty() {
for tx in &promoted {
listener.pending(tx.hash(), None);
}
for tx in &discarded {
listener.discarded(tx.hash());
}
}
}
// This deletes outdated blob txs from the blob store, based on the account's nonce. This is
// called during txpool maintenance when the pool drifted.
self.delete_discarded_blobs(discarded.iter());
}
/// Add a single validated transaction into the pool.
///
/// Note: this is only used internally by [`Self::add_transactions()`], all new transaction(s)
/// come in through that function, either as a batch or `std::iter::once`.
fn add_transaction(
&self,
pool: &mut RwLockWriteGuard<'_, TxPool<T>>,
origin: TransactionOrigin,
tx: TransactionValidationOutcome<T::Transaction>,
) -> PoolResult<AddedTransactionOutcome> {
match tx {
TransactionValidationOutcome::Valid {
balance,
state_nonce,
transaction,
propagate,
bytecode_hash,
authorities,
} => {
let sender_id = self.get_sender_id(transaction.sender());
let transaction_id = TransactionId::new(sender_id, transaction.nonce());
// split the valid transaction and the blob sidecar if it has any
let (transaction, maybe_sidecar) = match transaction {
ValidTransaction::Valid(tx) => (tx, None),
ValidTransaction::ValidWithSidecar { transaction, sidecar } => {
debug_assert!(
transaction.is_eip4844(),
"validator returned sidecar for non EIP-4844 transaction"
);
(transaction, Some(sidecar))
}
};
let tx = ValidPoolTransaction {
transaction,
transaction_id,
propagate,
timestamp: Instant::now(),
origin,
authority_ids: authorities.map(|auths| self.get_sender_ids(auths)),
};
let added = pool.add_transaction(tx, balance, state_nonce, bytecode_hash)?;
let hash = *added.hash();
let state = match added.subpool() {
SubPool::Pending => AddedTransactionState::Pending,
_ => AddedTransactionState::Queued,
};
// transaction was successfully inserted into the pool
if let Some(sidecar) = maybe_sidecar {
// notify blob sidecar listeners
self.on_new_blob_sidecar(&hash, &sidecar);
// store the sidecar in the blob store
self.insert_blob(hash, sidecar);
}
if let Some(replaced) = added.replaced_blob_transaction() {
debug!(target: "txpool", "[{:?}] delete replaced blob sidecar", replaced);
// delete the replaced transaction from the blob store
self.delete_blob(replaced);
}
// Notify about new pending transactions
if let Some(pending) = added.as_pending() {
self.on_new_pending_transaction(pending);
}
// Notify tx event listeners
self.notify_event_listeners(&added);
if let Some(discarded) = added.discarded_transactions() {
self.delete_discarded_blobs(discarded.iter());
}
// Notify listeners for _all_ transactions
self.on_new_transaction(added.into_new_transaction_event());
Ok(AddedTransactionOutcome { hash, state })
}
TransactionValidationOutcome::Invalid(tx, err) => {
let mut listener = self.event_listener.write();
listener.invalid(tx.hash());
Err(PoolError::new(*tx.hash(), err))
}
TransactionValidationOutcome::Error(tx_hash, err) => {
let mut listener = self.event_listener.write();
listener.discarded(&tx_hash);
Err(PoolError::other(tx_hash, err))
}
}
}
/// Adds a transaction and returns the event stream.
pub fn add_transaction_and_subscribe(
&self,
origin: TransactionOrigin,
tx: TransactionValidationOutcome<T::Transaction>,
) -> PoolResult<TransactionEvents> {
let listener = {
let mut listener = self.event_listener.write();
listener.subscribe(tx.tx_hash())
};
let mut results = self.add_transactions(origin, std::iter::once(tx));
results.pop().expect("result length is the same as the input")?;
Ok(listener)
}
/// Adds all transactions in the iterator to the pool, each with its individual origin,
/// returning a list of results.
///
/// Note: A large batch may lock the pool for a long time that blocks important operations
/// like updating the pool on canonical state changes. The caller should consider having
/// a max batch size to balance transaction insertions with other updates.
pub fn add_transactions_with_origins(
&self,
transactions: impl IntoIterator<
Item = (TransactionOrigin, TransactionValidationOutcome<T::Transaction>),
>,
) -> Vec<PoolResult<AddedTransactionOutcome>> {
// Process all transactions in one write lock, maintaining individual origins
let (mut added, discarded) = {
let mut pool = self.pool.write();
let added = transactions
.into_iter()
.map(|(origin, tx)| self.add_transaction(&mut pool, origin, tx))
.collect::<Vec<_>>();
// Enforce the pool size limits if at least one transaction was added successfully
let discarded = if added.iter().any(Result::is_ok) {
pool.discard_worst()
} else {
Default::default()
};
(added, discarded)
};
if !discarded.is_empty() {
// Delete any blobs associated with discarded blob transactions
self.delete_discarded_blobs(discarded.iter());
self.event_listener.write().discarded_many(&discarded);
let discarded_hashes =
discarded.into_iter().map(|tx| *tx.hash()).collect::<HashSet<_>>();
// A newly added transaction may be immediately discarded, so we need to
// adjust the result here
for res in &mut added {
if let Ok(AddedTransactionOutcome { hash, .. }) = res {
if discarded_hashes.contains(hash) {
*res = Err(PoolError::new(*hash, PoolErrorKind::DiscardedOnInsert))
}
}
}
}
added
}
/// Adds all transactions in the iterator to the pool, returning a list of results.
///
/// Note: A large batch may lock the pool for a long time that blocks important operations
/// like updating the pool on canonical state changes. The caller should consider having
/// a max batch size to balance transaction insertions with other updates.
pub fn add_transactions(
&self,
origin: TransactionOrigin,
transactions: impl IntoIterator<Item = TransactionValidationOutcome<T::Transaction>>,
) -> Vec<PoolResult<AddedTransactionOutcome>> {
self.add_transactions_with_origins(transactions.into_iter().map(|tx| (origin, tx)))
}
/// Notify all listeners about a new pending transaction.
fn on_new_pending_transaction(&self, pending: &AddedPendingTransaction<T::Transaction>) {
let propagate_allowed = pending.is_propagate_allowed();
let mut transaction_listeners = self.pending_transaction_listener.lock();
transaction_listeners.retain_mut(|listener| {
if listener.kind.is_propagate_only() && !propagate_allowed {
// only emit this hash to listeners that are only allowed to receive propagate only
// transactions, such as network
return !listener.sender.is_closed()
}
// broadcast all pending transactions to the listener
listener.send_all(pending.pending_transactions(listener.kind))
});
}
/// Notify all listeners about a newly inserted pending transaction.
fn on_new_transaction(&self, event: NewTransactionEvent<T::Transaction>) {
let mut transaction_listeners = self.transaction_listener.lock();
transaction_listeners.retain_mut(|listener| {
if listener.kind.is_propagate_only() && !event.transaction.propagate {
// only emit this hash to listeners that are only allowed to receive propagate only
// transactions, such as network
return !listener.sender.is_closed()
}
listener.send(event.clone())
});
}
/// Notify all listeners about a blob sidecar for a newly inserted blob (eip4844) transaction.
fn on_new_blob_sidecar(&self, tx_hash: &TxHash, sidecar: &BlobTransactionSidecarVariant) {
let mut sidecar_listeners = self.blob_transaction_sidecar_listener.lock();
if sidecar_listeners.is_empty() {
return
}
let sidecar = Arc::new(sidecar.clone());
sidecar_listeners.retain_mut(|listener| {
let new_blob_event = NewBlobSidecar { tx_hash: *tx_hash, sidecar: sidecar.clone() };
match listener.sender.try_send(new_blob_event) {
Ok(()) => true,
Err(err) => {
if matches!(err, mpsc::error::TrySendError::Full(_)) {
debug!(
target: "txpool",
"[{:?}] failed to send blob sidecar; channel full",
sidecar,
);
true
} else {
false
}
}
}
})
}
/// Notifies transaction listeners about changes once a block was processed.
fn notify_on_new_state(&self, outcome: OnNewCanonicalStateOutcome<T::Transaction>) {
trace!(target: "txpool", promoted=outcome.promoted.len(), discarded= outcome.discarded.len() ,"notifying listeners on state change");
// notify about promoted pending transactions
// emit hashes
self.pending_transaction_listener
.lock()
.retain_mut(|listener| listener.send_all(outcome.pending_transactions(listener.kind)));
// emit full transactions
self.transaction_listener.lock().retain_mut(|listener| {
listener.send_all(outcome.full_pending_transactions(listener.kind))
});
let OnNewCanonicalStateOutcome { mined, promoted, discarded, block_hash } = outcome;
// broadcast specific transaction events
let mut listener = self.event_listener.write();
if !listener.is_empty() {
for tx in &mined {
listener.mined(tx, block_hash);
}
for tx in &promoted {
listener.pending(tx.hash(), None);
}
for tx in &discarded {
listener.discarded(tx.hash());
}
}
}
/// Fire events for the newly added transaction if there are any.
fn notify_event_listeners(&self, tx: &AddedTransaction<T::Transaction>) {
let mut listener = self.event_listener.write();
if listener.is_empty() {
// nothing to notify
return
}
match tx {
AddedTransaction::Pending(tx) => {
let AddedPendingTransaction { transaction, promoted, discarded, replaced } = tx;
listener.pending(transaction.hash(), replaced.clone());
for tx in promoted {
listener.pending(tx.hash(), None);
}
for tx in discarded {
listener.discarded(tx.hash());
}
}
AddedTransaction::Parked { transaction, replaced, .. } => {
listener.queued(transaction.hash());
if let Some(replaced) = replaced {
listener.replaced(replaced.clone(), *transaction.hash());
}
}
}
}
/// Returns an iterator that yields transactions that are ready to be included in the block.
pub fn best_transactions(&self) -> BestTransactions<T> {
self.get_pool_data().best_transactions()
}
/// Returns an iterator that yields transactions that are ready to be included in the block with
/// the given base fee and optional blob fee attributes.
pub fn best_transactions_with_attributes(
&self,
best_transactions_attributes: BestTransactionsAttributes,
) -> Box<dyn crate::traits::BestTransactions<Item = Arc<ValidPoolTransaction<T::Transaction>>>>
{
self.get_pool_data().best_transactions_with_attributes(best_transactions_attributes)
}
/// Returns only the first `max` transactions in the pending pool.
pub fn pending_transactions_max(
&self,
max: usize,
) -> Vec<Arc<ValidPoolTransaction<T::Transaction>>> {
self.get_pool_data().pending_transactions_iter().take(max).collect()
}
/// Returns all transactions from the pending sub-pool
pub fn pending_transactions(&self) -> Vec<Arc<ValidPoolTransaction<T::Transaction>>> {
self.get_pool_data().pending_transactions()
}
/// Returns all transactions from parked pools
pub fn queued_transactions(&self) -> Vec<Arc<ValidPoolTransaction<T::Transaction>>> {
self.get_pool_data().queued_transactions()
}
/// Returns all transactions in the pool
pub fn all_transactions(&self) -> AllPoolTransactions<T::Transaction> {
let pool = self.get_pool_data();
AllPoolTransactions {
pending: pool.pending_transactions(),
queued: pool.queued_transactions(),
}
}
/// Returns _all_ transactions in the pool
pub fn all_transaction_hashes(&self) -> Vec<TxHash> {
self.get_pool_data().all().transactions_iter().map(|tx| *tx.hash()).collect()
}
/// Removes and returns all matching transactions from the pool.
///
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | true |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/transaction-pool/src/pool/best.rs | crates/transaction-pool/src/pool/best.rs | use crate::{
error::{Eip4844PoolTransactionError, InvalidPoolTransactionError},
identifier::{SenderId, TransactionId},
pool::pending::PendingTransaction,
PoolTransaction, Priority, TransactionOrdering, ValidPoolTransaction,
};
use alloy_consensus::Transaction;
use alloy_eips::Typed2718;
use alloy_primitives::Address;
use core::fmt;
use reth_primitives_traits::transaction::error::InvalidTransactionError;
use std::{
collections::{BTreeMap, BTreeSet, HashSet, VecDeque},
sync::Arc,
};
use tokio::sync::broadcast::{error::TryRecvError, Receiver};
use tracing::debug;
/// An iterator that returns transactions that can be executed on the current state (*best*
/// transactions).
///
/// This is a wrapper around [`BestTransactions`] that also enforces a specific basefee.
///
/// This iterator guarantees that all transaction it returns satisfy both the base fee and blob fee!
pub(crate) struct BestTransactionsWithFees<T: TransactionOrdering> {
pub(crate) best: BestTransactions<T>,
pub(crate) base_fee: u64,
pub(crate) base_fee_per_blob_gas: u64,
}
impl<T: TransactionOrdering> crate::traits::BestTransactions for BestTransactionsWithFees<T> {
fn mark_invalid(&mut self, tx: &Self::Item, kind: InvalidPoolTransactionError) {
BestTransactions::mark_invalid(&mut self.best, tx, kind)
}
fn no_updates(&mut self) {
self.best.no_updates()
}
fn skip_blobs(&mut self) {
self.set_skip_blobs(true)
}
fn set_skip_blobs(&mut self, skip_blobs: bool) {
self.best.set_skip_blobs(skip_blobs)
}
}
impl<T: TransactionOrdering> Iterator for BestTransactionsWithFees<T> {
type Item = Arc<ValidPoolTransaction<T::Transaction>>;
fn next(&mut self) -> Option<Self::Item> {
// find the next transaction that satisfies the base fee
loop {
let best = Iterator::next(&mut self.best)?;
// If both the base fee and blob fee (if applicable for EIP-4844) are satisfied, return
// the transaction
if best.transaction.max_fee_per_gas() >= self.base_fee as u128 &&
best.transaction
.max_fee_per_blob_gas()
.is_none_or(|fee| fee >= self.base_fee_per_blob_gas as u128)
{
return Some(best);
}
crate::traits::BestTransactions::mark_invalid(
self,
&best,
InvalidPoolTransactionError::Underpriced,
);
}
}
}
/// An iterator that returns transactions that can be executed on the current state (*best*
/// transactions).
///
/// The [`PendingPool`](crate::pool::pending::PendingPool) contains transactions that *could* all
/// be executed on the current state, but only yields transactions that are ready to be executed
/// now. While it contains all gapless transactions of a sender, it _always_ only returns the
/// transaction with the current on chain nonce.
#[derive(Debug)]
pub struct BestTransactions<T: TransactionOrdering> {
/// Contains a copy of _all_ transactions of the pending pool at the point in time this
/// iterator was created.
pub(crate) all: BTreeMap<TransactionId, PendingTransaction<T>>,
/// Transactions that can be executed right away: these have the expected nonce.
///
/// Once an `independent` transaction with the nonce `N` is returned, it unlocks `N+1`, which
/// then can be moved from the `all` set to the `independent` set.
pub(crate) independent: BTreeSet<PendingTransaction<T>>,
/// There might be the case where a yielded transactions is invalid, this will track it.
pub(crate) invalid: HashSet<SenderId>,
/// Used to receive any new pending transactions that have been added to the pool after this
/// iterator was static filtered
///
/// These new pending transactions are inserted into this iterator's pool before yielding the
/// next value
pub(crate) new_transaction_receiver: Option<Receiver<PendingTransaction<T>>>,
/// The priority value of most recently yielded transaction.
///
/// This is required if we new pending transactions are fed in while it yields new values.
pub(crate) last_priority: Option<Priority<T::PriorityValue>>,
/// Flag to control whether to skip blob transactions (EIP4844).
pub(crate) skip_blobs: bool,
}
impl<T: TransactionOrdering> BestTransactions<T> {
/// Mark the transaction and it's descendants as invalid.
pub(crate) fn mark_invalid(
&mut self,
tx: &Arc<ValidPoolTransaction<T::Transaction>>,
_kind: InvalidPoolTransactionError,
) {
self.invalid.insert(tx.sender_id());
}
/// Returns the ancestor the given transaction, the transaction with `nonce - 1`.
///
/// Note: for a transaction with nonce higher than the current on chain nonce this will always
/// return an ancestor since all transaction in this pool are gapless.
pub(crate) fn ancestor(&self, id: &TransactionId) -> Option<&PendingTransaction<T>> {
self.all.get(&id.unchecked_ancestor()?)
}
/// Non-blocking read on the new pending transactions subscription channel
fn try_recv(&mut self) -> Option<PendingTransaction<T>> {
loop {
match self.new_transaction_receiver.as_mut()?.try_recv() {
Ok(tx) => {
if let Some(last_priority) = &self.last_priority {
if &tx.priority > last_priority {
// we skip transactions if we already yielded a transaction with lower
// priority
return None
}
}
return Some(tx)
}
// note TryRecvError::Lagged can be returned here, which is an error that attempts
// to correct itself on consecutive try_recv() attempts
// the cost of ignoring this error is allowing old transactions to get
// overwritten after the chan buffer size is met
Err(TryRecvError::Lagged(_)) => {
// Handle the case where the receiver lagged too far behind.
// `num_skipped` indicates the number of messages that were skipped.
}
// this case is still better than the existing iterator behavior where no new
// pending txs are surfaced to consumers
Err(_) => return None,
}
}
}
/// Removes the currently best independent transaction from the independent set and the total
/// set.
fn pop_best(&mut self) -> Option<PendingTransaction<T>> {
self.independent.pop_last().inspect(|best| {
self.all.remove(best.transaction.id());
})
}
/// Checks for new transactions that have come into the `PendingPool` after this iterator was
/// created and inserts them
fn add_new_transactions(&mut self) {
while let Some(pending_tx) = self.try_recv() {
// same logic as PendingPool::add_transaction/PendingPool::best_with_unlocked
let tx_id = *pending_tx.transaction.id();
if self.ancestor(&tx_id).is_none() {
self.independent.insert(pending_tx.clone());
}
self.all.insert(tx_id, pending_tx);
}
}
}
impl<T: TransactionOrdering> crate::traits::BestTransactions for BestTransactions<T> {
fn mark_invalid(&mut self, tx: &Self::Item, kind: InvalidPoolTransactionError) {
Self::mark_invalid(self, tx, kind)
}
fn no_updates(&mut self) {
self.new_transaction_receiver.take();
self.last_priority.take();
}
fn skip_blobs(&mut self) {
self.set_skip_blobs(true);
}
fn set_skip_blobs(&mut self, skip_blobs: bool) {
self.skip_blobs = skip_blobs;
}
}
impl<T: TransactionOrdering> Iterator for BestTransactions<T> {
type Item = Arc<ValidPoolTransaction<T::Transaction>>;
fn next(&mut self) -> Option<Self::Item> {
loop {
self.add_new_transactions();
// Remove the next independent tx with the highest priority
let best = self.pop_best()?;
let sender_id = best.transaction.sender_id();
// skip transactions for which sender was marked as invalid
if self.invalid.contains(&sender_id) {
debug!(
target: "txpool",
"[{:?}] skipping invalid transaction",
best.transaction.hash()
);
continue
}
// Insert transactions that just got unlocked.
if let Some(unlocked) = self.all.get(&best.unlocks()) {
self.independent.insert(unlocked.clone());
}
if self.skip_blobs && best.transaction.transaction.is_eip4844() {
// blobs should be skipped, marking them as invalid will ensure that no dependent
// transactions are returned
self.mark_invalid(
&best.transaction,
InvalidPoolTransactionError::Eip4844(
Eip4844PoolTransactionError::NoEip4844Blobs,
),
)
} else {
if self.new_transaction_receiver.is_some() {
self.last_priority = Some(best.priority.clone())
}
return Some(best.transaction)
}
}
}
}
/// A [`BestTransactions`](crate::traits::BestTransactions) implementation that filters the
/// transactions of iter with predicate.
///
/// Filter out transactions are marked as invalid:
/// [`BestTransactions::mark_invalid`](crate::traits::BestTransactions::mark_invalid).
pub struct BestTransactionFilter<I, P> {
pub(crate) best: I,
pub(crate) predicate: P,
}
impl<I, P> BestTransactionFilter<I, P> {
/// Create a new [`BestTransactionFilter`] with the given predicate.
pub const fn new(best: I, predicate: P) -> Self {
Self { best, predicate }
}
}
impl<I, P> Iterator for BestTransactionFilter<I, P>
where
I: crate::traits::BestTransactions,
P: FnMut(&<I as Iterator>::Item) -> bool,
{
type Item = <I as Iterator>::Item;
fn next(&mut self) -> Option<Self::Item> {
loop {
let best = self.best.next()?;
if (self.predicate)(&best) {
return Some(best)
}
self.best.mark_invalid(
&best,
InvalidPoolTransactionError::Consensus(InvalidTransactionError::TxTypeNotSupported),
);
}
}
}
impl<I, P> crate::traits::BestTransactions for BestTransactionFilter<I, P>
where
I: crate::traits::BestTransactions,
P: FnMut(&<I as Iterator>::Item) -> bool + Send,
{
fn mark_invalid(&mut self, tx: &Self::Item, kind: InvalidPoolTransactionError) {
crate::traits::BestTransactions::mark_invalid(&mut self.best, tx, kind)
}
fn no_updates(&mut self) {
self.best.no_updates()
}
fn skip_blobs(&mut self) {
self.set_skip_blobs(true)
}
fn set_skip_blobs(&mut self, skip_blobs: bool) {
self.best.set_skip_blobs(skip_blobs)
}
}
impl<I: fmt::Debug, P> fmt::Debug for BestTransactionFilter<I, P> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("BestTransactionFilter").field("best", &self.best).finish()
}
}
/// Wrapper over [`crate::traits::BestTransactions`] that prioritizes transactions of certain
/// senders capping total gas used by such transactions.
#[derive(Debug)]
pub struct BestTransactionsWithPrioritizedSenders<I: Iterator> {
/// Inner iterator
inner: I,
/// A set of senders which transactions should be prioritized
prioritized_senders: HashSet<Address>,
/// Maximum total gas limit of prioritized transactions
max_prioritized_gas: u64,
/// Buffer with transactions that are not being prioritized. Those will be the first to be
/// included after the prioritized transactions
buffer: VecDeque<I::Item>,
/// Tracker of total gas limit of prioritized transactions. Once it reaches
/// `max_prioritized_gas` no more transactions will be prioritized
prioritized_gas: u64,
}
impl<I: Iterator> BestTransactionsWithPrioritizedSenders<I> {
/// Constructs a new [`BestTransactionsWithPrioritizedSenders`].
pub fn new(prioritized_senders: HashSet<Address>, max_prioritized_gas: u64, inner: I) -> Self {
Self {
inner,
prioritized_senders,
max_prioritized_gas,
buffer: Default::default(),
prioritized_gas: Default::default(),
}
}
}
impl<I, T> Iterator for BestTransactionsWithPrioritizedSenders<I>
where
I: crate::traits::BestTransactions<Item = Arc<ValidPoolTransaction<T>>>,
T: PoolTransaction,
{
type Item = <I as Iterator>::Item;
fn next(&mut self) -> Option<Self::Item> {
// If we have space, try prioritizing transactions
if self.prioritized_gas < self.max_prioritized_gas {
for item in &mut self.inner {
if self.prioritized_senders.contains(&item.transaction.sender()) &&
self.prioritized_gas + item.transaction.gas_limit() <=
self.max_prioritized_gas
{
self.prioritized_gas += item.transaction.gas_limit();
return Some(item)
}
self.buffer.push_back(item);
}
}
if let Some(item) = self.buffer.pop_front() {
Some(item)
} else {
self.inner.next()
}
}
}
impl<I, T> crate::traits::BestTransactions for BestTransactionsWithPrioritizedSenders<I>
where
I: crate::traits::BestTransactions<Item = Arc<ValidPoolTransaction<T>>>,
T: PoolTransaction,
{
fn mark_invalid(&mut self, tx: &Self::Item, kind: InvalidPoolTransactionError) {
self.inner.mark_invalid(tx, kind)
}
fn no_updates(&mut self) {
self.inner.no_updates()
}
fn set_skip_blobs(&mut self, skip_blobs: bool) {
if skip_blobs {
self.buffer.retain(|tx| !tx.transaction.is_eip4844())
}
self.inner.set_skip_blobs(skip_blobs)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
pool::pending::PendingPool,
test_utils::{MockOrdering, MockTransaction, MockTransactionFactory},
BestTransactions, Priority,
};
use alloy_primitives::U256;
#[test]
fn test_best_iter() {
let mut pool = PendingPool::new(MockOrdering::default());
let mut f = MockTransactionFactory::default();
let num_tx = 10;
// insert 10 gapless tx
let tx = MockTransaction::eip1559();
for nonce in 0..num_tx {
let tx = tx.clone().rng_hash().with_nonce(nonce);
let valid_tx = f.validated(tx);
pool.add_transaction(Arc::new(valid_tx), 0);
}
let mut best = pool.best();
assert_eq!(best.all.len(), num_tx as usize);
assert_eq!(best.independent.len(), 1);
// check tx are returned in order
for nonce in 0..num_tx {
assert_eq!(best.independent.len(), 1);
let tx = best.next().unwrap();
assert_eq!(tx.nonce(), nonce);
}
}
#[test]
fn test_best_iter_invalid() {
let mut pool = PendingPool::new(MockOrdering::default());
let mut f = MockTransactionFactory::default();
let num_tx = 10;
// insert 10 gapless tx
let tx = MockTransaction::eip1559();
for nonce in 0..num_tx {
let tx = tx.clone().rng_hash().with_nonce(nonce);
let valid_tx = f.validated(tx);
pool.add_transaction(Arc::new(valid_tx), 0);
}
let mut best = pool.best();
// mark the first tx as invalid
let invalid = best.independent.iter().next().unwrap();
best.mark_invalid(
&invalid.transaction.clone(),
InvalidPoolTransactionError::Consensus(InvalidTransactionError::TxTypeNotSupported),
);
// iterator is empty
assert!(best.next().is_none());
}
#[test]
fn test_best_transactions_iter_invalid() {
let mut pool = PendingPool::new(MockOrdering::default());
let mut f = MockTransactionFactory::default();
let num_tx = 10;
// insert 10 gapless tx
let tx = MockTransaction::eip1559();
for nonce in 0..num_tx {
let tx = tx.clone().rng_hash().with_nonce(nonce);
let valid_tx = f.validated(tx);
pool.add_transaction(Arc::new(valid_tx), 0);
}
let mut best: Box<
dyn crate::traits::BestTransactions<Item = Arc<ValidPoolTransaction<MockTransaction>>>,
> = Box::new(pool.best());
let tx = Iterator::next(&mut best).unwrap();
crate::traits::BestTransactions::mark_invalid(
&mut *best,
&tx,
InvalidPoolTransactionError::Consensus(InvalidTransactionError::TxTypeNotSupported),
);
assert!(Iterator::next(&mut best).is_none());
}
#[test]
fn test_best_with_fees_iter_base_fee_satisfied() {
let mut pool = PendingPool::new(MockOrdering::default());
let mut f = MockTransactionFactory::default();
let num_tx = 5;
let base_fee: u64 = 10;
let base_fee_per_blob_gas: u64 = 15;
// Insert transactions with a max_fee_per_gas greater than or equal to the base fee
// Without blob fee
for nonce in 0..num_tx {
let tx = MockTransaction::eip1559()
.rng_hash()
.with_nonce(nonce)
.with_max_fee(base_fee as u128 + 5);
let valid_tx = f.validated(tx);
pool.add_transaction(Arc::new(valid_tx), 0);
}
let mut best = pool.best_with_basefee_and_blobfee(base_fee, base_fee_per_blob_gas);
for nonce in 0..num_tx {
let tx = best.next().expect("Transaction should be returned");
assert_eq!(tx.nonce(), nonce);
assert!(tx.transaction.max_fee_per_gas() >= base_fee as u128);
}
}
#[test]
fn test_best_with_fees_iter_base_fee_violated() {
let mut pool = PendingPool::new(MockOrdering::default());
let mut f = MockTransactionFactory::default();
let num_tx = 5;
let base_fee: u64 = 20;
let base_fee_per_blob_gas: u64 = 15;
// Insert transactions with a max_fee_per_gas less than the base fee
for nonce in 0..num_tx {
let tx = MockTransaction::eip1559()
.rng_hash()
.with_nonce(nonce)
.with_max_fee(base_fee as u128 - 5);
let valid_tx = f.validated(tx);
pool.add_transaction(Arc::new(valid_tx), 0);
}
let mut best = pool.best_with_basefee_and_blobfee(base_fee, base_fee_per_blob_gas);
// No transaction should be returned since all violate the base fee
assert!(best.next().is_none());
}
#[test]
fn test_best_with_fees_iter_blob_fee_satisfied() {
let mut pool = PendingPool::new(MockOrdering::default());
let mut f = MockTransactionFactory::default();
let num_tx = 5;
let base_fee: u64 = 10;
let base_fee_per_blob_gas: u64 = 20;
// Insert transactions with a max_fee_per_blob_gas greater than or equal to the base fee per
// blob gas
for nonce in 0..num_tx {
let tx = MockTransaction::eip4844()
.rng_hash()
.with_nonce(nonce)
.with_max_fee(base_fee as u128 + 5)
.with_blob_fee(base_fee_per_blob_gas as u128 + 5);
let valid_tx = f.validated(tx);
pool.add_transaction(Arc::new(valid_tx), 0);
}
let mut best = pool.best_with_basefee_and_blobfee(base_fee, base_fee_per_blob_gas);
// All transactions should be returned in order since they satisfy both base fee and blob
// fee
for nonce in 0..num_tx {
let tx = best.next().expect("Transaction should be returned");
assert_eq!(tx.nonce(), nonce);
assert!(tx.transaction.max_fee_per_gas() >= base_fee as u128);
assert!(
tx.transaction.max_fee_per_blob_gas().unwrap() >= base_fee_per_blob_gas as u128
);
}
// No more transactions should be returned
assert!(best.next().is_none());
}
#[test]
fn test_best_with_fees_iter_blob_fee_violated() {
let mut pool = PendingPool::new(MockOrdering::default());
let mut f = MockTransactionFactory::default();
let num_tx = 5;
let base_fee: u64 = 10;
let base_fee_per_blob_gas: u64 = 20;
// Insert transactions with a max_fee_per_blob_gas less than the base fee per blob gas
for nonce in 0..num_tx {
let tx = MockTransaction::eip4844()
.rng_hash()
.with_nonce(nonce)
.with_max_fee(base_fee as u128 + 5)
.with_blob_fee(base_fee_per_blob_gas as u128 - 5);
let valid_tx = f.validated(tx);
pool.add_transaction(Arc::new(valid_tx), 0);
}
let mut best = pool.best_with_basefee_and_blobfee(base_fee, base_fee_per_blob_gas);
// No transaction should be returned since all violate the blob fee
assert!(best.next().is_none());
}
#[test]
fn test_best_with_fees_iter_mixed_fees() {
let mut pool = PendingPool::new(MockOrdering::default());
let mut f = MockTransactionFactory::default();
let base_fee: u64 = 10;
let base_fee_per_blob_gas: u64 = 20;
// Insert transactions with varying max_fee_per_gas and max_fee_per_blob_gas
let tx1 =
MockTransaction::eip1559().rng_hash().with_nonce(0).with_max_fee(base_fee as u128 + 5);
let tx2 = MockTransaction::eip4844()
.rng_hash()
.with_nonce(1)
.with_max_fee(base_fee as u128 + 5)
.with_blob_fee(base_fee_per_blob_gas as u128 + 5);
let tx3 = MockTransaction::eip4844()
.rng_hash()
.with_nonce(2)
.with_max_fee(base_fee as u128 + 5)
.with_blob_fee(base_fee_per_blob_gas as u128 - 5);
let tx4 =
MockTransaction::eip1559().rng_hash().with_nonce(3).with_max_fee(base_fee as u128 - 5);
pool.add_transaction(Arc::new(f.validated(tx1.clone())), 0);
pool.add_transaction(Arc::new(f.validated(tx2.clone())), 0);
pool.add_transaction(Arc::new(f.validated(tx3)), 0);
pool.add_transaction(Arc::new(f.validated(tx4)), 0);
let mut best = pool.best_with_basefee_and_blobfee(base_fee, base_fee_per_blob_gas);
let expected_order = vec![tx1, tx2];
for expected_tx in expected_order {
let tx = best.next().expect("Transaction should be returned");
assert_eq!(tx.transaction, expected_tx);
}
// No more transactions should be returned
assert!(best.next().is_none());
}
#[test]
fn test_best_add_transaction_with_next_nonce() {
let mut pool = PendingPool::new(MockOrdering::default());
let mut f = MockTransactionFactory::default();
// Add 5 transactions with increasing nonces to the pool
let num_tx = 5;
let tx = MockTransaction::eip1559();
for nonce in 0..num_tx {
let tx = tx.clone().rng_hash().with_nonce(nonce);
let valid_tx = f.validated(tx);
pool.add_transaction(Arc::new(valid_tx), 0);
}
// Create a BestTransactions iterator from the pool
let mut best = pool.best();
// Use a broadcast channel for transaction updates
let (tx_sender, tx_receiver) =
tokio::sync::broadcast::channel::<PendingTransaction<MockOrdering>>(1000);
best.new_transaction_receiver = Some(tx_receiver);
// Create a new transaction with nonce 5 and validate it
let new_tx = MockTransaction::eip1559().rng_hash().with_nonce(5);
let valid_new_tx = f.validated(new_tx);
// Send the new transaction through the broadcast channel
let pending_tx = PendingTransaction {
submission_id: 10,
transaction: Arc::new(valid_new_tx.clone()),
priority: Priority::Value(U256::from(1000)),
};
tx_sender.send(pending_tx.clone()).unwrap();
// Add new transactions to the iterator
best.add_new_transactions();
// Verify that the new transaction has been added to the 'all' map
assert_eq!(best.all.len(), 6);
assert!(best.all.contains_key(valid_new_tx.id()));
// Verify that the new transaction has been added to the 'independent' set
assert_eq!(best.independent.len(), 2);
assert!(best.independent.contains(&pending_tx));
}
#[test]
fn test_best_add_transaction_with_ancestor() {
// Initialize a new PendingPool with default MockOrdering and MockTransactionFactory
let mut pool = PendingPool::new(MockOrdering::default());
let mut f = MockTransactionFactory::default();
// Add 5 transactions with increasing nonces to the pool
let num_tx = 5;
let tx = MockTransaction::eip1559();
for nonce in 0..num_tx {
let tx = tx.clone().rng_hash().with_nonce(nonce);
let valid_tx = f.validated(tx);
pool.add_transaction(Arc::new(valid_tx), 0);
}
// Create a BestTransactions iterator from the pool
let mut best = pool.best();
// Use a broadcast channel for transaction updates
let (tx_sender, tx_receiver) =
tokio::sync::broadcast::channel::<PendingTransaction<MockOrdering>>(1000);
best.new_transaction_receiver = Some(tx_receiver);
// Create a new transaction with nonce 5 and validate it
let base_tx1 = MockTransaction::eip1559().rng_hash().with_nonce(5);
let valid_new_tx1 = f.validated(base_tx1.clone());
// Send the new transaction through the broadcast channel
let pending_tx1 = PendingTransaction {
submission_id: 10,
transaction: Arc::new(valid_new_tx1.clone()),
priority: Priority::Value(U256::from(1000)),
};
tx_sender.send(pending_tx1.clone()).unwrap();
// Add new transactions to the iterator
best.add_new_transactions();
// Verify that the new transaction has been added to the 'all' map
assert_eq!(best.all.len(), 6);
assert!(best.all.contains_key(valid_new_tx1.id()));
// Verify that the new transaction has been added to the 'independent' set
assert_eq!(best.independent.len(), 2);
assert!(best.independent.contains(&pending_tx1));
// Attempt to add a new transaction with a different nonce (not a duplicate)
let base_tx2 = base_tx1.with_nonce(6);
let valid_new_tx2 = f.validated(base_tx2);
// Send the new transaction through the broadcast channel
let pending_tx2 = PendingTransaction {
submission_id: 11, // Different submission ID
transaction: Arc::new(valid_new_tx2.clone()),
priority: Priority::Value(U256::from(1000)),
};
tx_sender.send(pending_tx2.clone()).unwrap();
// Add new transactions to the iterator
best.add_new_transactions();
// Verify that the new transaction has been added to 'all'
assert_eq!(best.all.len(), 7);
assert!(best.all.contains_key(valid_new_tx2.id()));
// Verify that the new transaction has not been added to the 'independent' set
assert_eq!(best.independent.len(), 2);
assert!(!best.independent.contains(&pending_tx2));
}
#[test]
fn test_best_transactions_filter_trait_object() {
// Initialize a new PendingPool with default MockOrdering and MockTransactionFactory
let mut pool = PendingPool::new(MockOrdering::default());
let mut f = MockTransactionFactory::default();
// Add 5 transactions with increasing nonces to the pool
let num_tx = 5;
let tx = MockTransaction::eip1559();
for nonce in 0..num_tx {
let tx = tx.clone().rng_hash().with_nonce(nonce);
let valid_tx = f.validated(tx);
pool.add_transaction(Arc::new(valid_tx), 0);
}
// Create a trait object of BestTransactions iterator from the pool
let best: Box<dyn crate::traits::BestTransactions<Item = _>> = Box::new(pool.best());
// Create a filter that only returns transactions with even nonces
let filter =
BestTransactionFilter::new(best, |tx: &Arc<ValidPoolTransaction<MockTransaction>>| {
tx.nonce().is_multiple_of(2)
});
// Verify that the filter only returns transactions with even nonces
for tx in filter {
assert_eq!(tx.nonce() % 2, 0);
}
}
#[test]
fn test_best_transactions_prioritized_senders() {
let mut pool = PendingPool::new(MockOrdering::default());
let mut f = MockTransactionFactory::default();
// Add 5 plain transactions from different senders with increasing gas price
for gas_price in 0..5 {
let tx = MockTransaction::eip1559().with_gas_price((gas_price + 1) * 10);
let valid_tx = f.validated(tx);
pool.add_transaction(Arc::new(valid_tx), 0);
}
// Add another transaction with 5 gas price that's going to be prioritized by sender
let prioritized_tx = MockTransaction::eip1559().with_gas_price(5).with_gas_limit(200);
let valid_prioritized_tx = f.validated(prioritized_tx.clone());
pool.add_transaction(Arc::new(valid_prioritized_tx), 0);
// Add another transaction with 3 gas price that should not be prioritized by sender because
// of gas limit.
let prioritized_tx2 = MockTransaction::eip1559().with_gas_price(3);
let valid_prioritized_tx2 = f.validated(prioritized_tx2.clone());
pool.add_transaction(Arc::new(valid_prioritized_tx2), 0);
let prioritized_senders =
HashSet::from([prioritized_tx.sender(), prioritized_tx2.sender()]);
let best =
BestTransactionsWithPrioritizedSenders::new(prioritized_senders, 200, pool.best());
// Verify that the prioritized transaction is returned first
// and the rest are returned in the reverse order of gas price
let mut iter = best.into_iter();
let top_of_block_tx = iter.next().unwrap();
assert_eq!(top_of_block_tx.max_fee_per_gas(), 5);
assert_eq!(top_of_block_tx.sender(), prioritized_tx.sender());
for gas_price in (0..5).rev() {
assert_eq!(iter.next().unwrap().max_fee_per_gas(), (gas_price + 1) * 10);
}
// Due to the gas limit, the transaction from second prioritized sender was not
// prioritized.
let top_of_block_tx2 = iter.next().unwrap();
assert_eq!(top_of_block_tx2.max_fee_per_gas(), 3);
assert_eq!(top_of_block_tx2.sender(), prioritized_tx2.sender());
}
#[test]
fn test_best_with_fees_iter_no_blob_fee_required() {
// Tests transactions without blob fees where base fees are checked.
let mut pool = PendingPool::new(MockOrdering::default());
let mut f = MockTransactionFactory::default();
let base_fee: u64 = 10;
let base_fee_per_blob_gas: u64 = 0; // No blob fee requirement
// Insert transactions with max_fee_per_gas above the base fee
for nonce in 0..5 {
let tx = MockTransaction::eip1559()
.rng_hash()
.with_nonce(nonce)
.with_max_fee(base_fee as u128 + 5);
let valid_tx = f.validated(tx);
pool.add_transaction(Arc::new(valid_tx), 0);
}
let mut best = pool.best_with_basefee_and_blobfee(base_fee, base_fee_per_blob_gas);
// All transactions should be returned as no blob fee requirement is imposed
for nonce in 0..5 {
let tx = best.next().expect("Transaction should be returned");
assert_eq!(tx.nonce(), nonce);
}
// Ensure no more transactions are left
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | true |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/transaction-pool/src/pool/events.rs | crates/transaction-pool/src/pool/events.rs | use crate::{traits::PropagateKind, PoolTransaction, SubPool, ValidPoolTransaction};
use alloy_primitives::{TxHash, B256};
use std::sync::Arc;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
/// An event that happened to a transaction and contains its full body where possible.
#[derive(Debug)]
pub enum FullTransactionEvent<T: PoolTransaction> {
/// Transaction has been added to the pending pool.
Pending(TxHash),
/// Transaction has been added to the queued pool.
Queued(TxHash),
/// Transaction has been included in the block belonging to this hash.
Mined {
/// The hash of the mined transaction.
tx_hash: TxHash,
/// The hash of the mined block that contains the transaction.
block_hash: B256,
},
/// Transaction has been replaced by the transaction belonging to the hash.
///
/// E.g. same (sender + nonce) pair
Replaced {
/// The transaction that was replaced.
transaction: Arc<ValidPoolTransaction<T>>,
/// The transaction that replaced the event subject.
replaced_by: TxHash,
},
/// Transaction was dropped due to configured limits.
Discarded(TxHash),
/// Transaction became invalid indefinitely.
Invalid(TxHash),
/// Transaction was propagated to peers.
Propagated(Arc<Vec<PropagateKind>>),
}
impl<T: PoolTransaction> Clone for FullTransactionEvent<T> {
fn clone(&self) -> Self {
match self {
Self::Pending(hash) => Self::Pending(*hash),
Self::Queued(hash) => Self::Queued(*hash),
Self::Mined { tx_hash, block_hash } => {
Self::Mined { tx_hash: *tx_hash, block_hash: *block_hash }
}
Self::Replaced { transaction, replaced_by } => {
Self::Replaced { transaction: Arc::clone(transaction), replaced_by: *replaced_by }
}
Self::Discarded(hash) => Self::Discarded(*hash),
Self::Invalid(hash) => Self::Invalid(*hash),
Self::Propagated(propagated) => Self::Propagated(Arc::clone(propagated)),
}
}
}
/// Various events that describe status changes of a transaction.
#[derive(Debug, Clone, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum TransactionEvent {
/// Transaction has been added to the pending pool.
Pending,
/// Transaction has been added to the queued pool.
Queued,
/// Transaction has been included in the block belonging to this hash.
Mined(B256),
/// Transaction has been replaced by the transaction belonging to the hash.
///
/// E.g. same (sender + nonce) pair
Replaced(TxHash),
/// Transaction was dropped due to configured limits.
Discarded,
/// Transaction became invalid indefinitely.
Invalid,
/// Transaction was propagated to peers.
Propagated(Arc<Vec<PropagateKind>>),
}
impl TransactionEvent {
/// Returns `true` if the event is final and no more events are expected for this transaction
/// hash.
pub const fn is_final(&self) -> bool {
matches!(self, Self::Replaced(_) | Self::Mined(_) | Self::Discarded)
}
}
/// Represents a new transaction
#[derive(Debug)]
pub struct NewTransactionEvent<T: PoolTransaction> {
/// The pool which the transaction was moved to.
pub subpool: SubPool,
/// Actual transaction
pub transaction: Arc<ValidPoolTransaction<T>>,
}
impl<T: PoolTransaction> NewTransactionEvent<T> {
/// Creates a new event for a pending transaction.
pub const fn pending(transaction: Arc<ValidPoolTransaction<T>>) -> Self {
Self { subpool: SubPool::Pending, transaction }
}
}
impl<T: PoolTransaction> Clone for NewTransactionEvent<T> {
fn clone(&self) -> Self {
Self { subpool: self.subpool, transaction: self.transaction.clone() }
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/transaction-pool/src/pool/txpool.rs | crates/transaction-pool/src/pool/txpool.rs | //! The internal transaction pool implementation.
use crate::{
config::{LocalTransactionConfig, TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER},
error::{
Eip4844PoolTransactionError, Eip7702PoolTransactionError, InvalidPoolTransactionError,
PoolError, PoolErrorKind,
},
identifier::{SenderId, TransactionId},
metrics::{AllTransactionsMetrics, TxPoolMetrics},
pool::{
best::BestTransactions,
blob::BlobTransactions,
parked::{BasefeeOrd, ParkedPool, QueuedOrd},
pending::PendingPool,
state::{SubPool, TxState},
update::{Destination, PoolUpdate, UpdateOutcome},
AddedPendingTransaction, AddedTransaction, OnNewCanonicalStateOutcome,
},
traits::{BestTransactionsAttributes, BlockInfo, PoolSize},
PoolConfig, PoolResult, PoolTransaction, PoolUpdateKind, PriceBumpConfig, TransactionOrdering,
ValidPoolTransaction, U256,
};
use alloy_consensus::constants::{
EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, KECCAK_EMPTY,
LEGACY_TX_TYPE_ID,
};
use alloy_eips::{
eip1559::{ETHEREUM_BLOCK_GAS_LIMIT_30M, MIN_PROTOCOL_BASE_FEE},
eip4844::BLOB_TX_MIN_BLOB_GASPRICE,
Typed2718,
};
use alloy_primitives::{Address, TxHash, B256};
use rustc_hash::FxHashMap;
use smallvec::SmallVec;
use std::{
cmp::Ordering,
collections::{btree_map::Entry, hash_map, BTreeMap, HashMap, HashSet},
fmt,
ops::Bound::{Excluded, Unbounded},
sync::Arc,
};
use tracing::{trace, warn};
#[cfg_attr(doc, aquamarine::aquamarine)]
// TODO: Inlined diagram due to a bug in aquamarine library, should become an include when it's
// fixed. See https://github.com/mersinvald/aquamarine/issues/50
// include_mmd!("docs/mermaid/txpool.mmd")
/// A pool that manages transactions.
///
/// This pool maintains the state of all transactions and stores them accordingly.
///
/// ```mermaid
/// graph TB
/// subgraph TxPool
/// direction TB
/// pool[(All Transactions)]
/// subgraph Subpools
/// direction TB
/// B3[(Queued)]
/// B1[(Pending)]
/// B2[(Basefee)]
/// B4[(Blob)]
/// end
/// end
/// discard([discard])
/// production([Block Production])
/// new([New Block])
/// A[Incoming Tx] --> B[Validation] -->|ins
/// pool --> |if ready + blobfee too low| B4
/// pool --> |if ready| B1
/// pool --> |if ready + basfee too low| B2
/// pool --> |nonce gap or lack of funds| B3
/// pool --> |update| pool
/// B1 --> |best| production
/// B2 --> |worst| discard
/// B3 --> |worst| discard
/// B4 --> |worst| discard
/// B1 --> |increased blob fee| B4
/// B4 --> |decreased blob fee| B1
/// B1 --> |increased base fee| B2
/// B2 --> |decreased base fee| B1
/// B3 --> |promote| B1
/// B3 --> |promote| B2
/// new --> |apply state changes| pool
/// ```
pub struct TxPool<T: TransactionOrdering> {
/// Contains the currently known information about the senders.
sender_info: FxHashMap<SenderId, SenderInfo>,
/// pending subpool
///
/// Holds transactions that are ready to be executed on the current state.
pending_pool: PendingPool<T>,
/// Pool settings to enforce limits etc.
config: PoolConfig,
/// queued subpool
///
/// Holds all parked transactions that depend on external changes from the sender:
///
/// - blocked by missing ancestor transaction (has nonce gaps)
/// - sender lacks funds to pay for this transaction.
queued_pool: ParkedPool<QueuedOrd<T::Transaction>>,
/// base fee subpool
///
/// Holds all parked transactions that currently violate the dynamic fee requirement but could
/// be moved to pending if the base fee changes in their favor (decreases) in future blocks.
basefee_pool: ParkedPool<BasefeeOrd<T::Transaction>>,
/// Blob transactions in the pool that are __not pending__.
///
/// This means they either do not satisfy the dynamic fee requirement or the blob fee
/// requirement. These transactions can be moved to pending if the base fee or blob fee changes
/// in their favor (decreases) in future blocks. The transaction may need both the base fee and
/// blob fee to decrease to become executable.
blob_pool: BlobTransactions<T::Transaction>,
/// All transactions in the pool.
all_transactions: AllTransactions<T::Transaction>,
/// Transaction pool metrics
metrics: TxPoolMetrics,
/// The last update kind that was applied to the pool.
latest_update_kind: Option<PoolUpdateKind>,
}
// === impl TxPool ===
impl<T: TransactionOrdering> TxPool<T> {
/// Create a new graph pool instance.
pub fn new(ordering: T, config: PoolConfig) -> Self {
Self {
sender_info: Default::default(),
pending_pool: PendingPool::with_buffer(
ordering,
config.max_new_pending_txs_notifications,
),
queued_pool: Default::default(),
basefee_pool: Default::default(),
blob_pool: Default::default(),
all_transactions: AllTransactions::new(&config),
config,
metrics: Default::default(),
latest_update_kind: None,
}
}
/// Retrieves the highest nonce for a specific sender from the transaction pool.
pub fn get_highest_nonce_by_sender(&self, sender: SenderId) -> Option<u64> {
self.all().txs_iter(sender).last().map(|(_, tx)| tx.transaction.nonce())
}
/// Retrieves the highest transaction (wrapped in an `Arc`) for a specific sender from the
/// transaction pool.
pub fn get_highest_transaction_by_sender(
&self,
sender: SenderId,
) -> Option<Arc<ValidPoolTransaction<T::Transaction>>> {
self.all().txs_iter(sender).last().map(|(_, tx)| Arc::clone(&tx.transaction))
}
/// Returns the transaction with the highest nonce that is executable given the on chain nonce.
///
/// If the pool already tracks a higher nonce for the given sender, then this nonce is used
/// instead.
///
/// Note: The next pending pooled transaction must have the on chain nonce.
pub(crate) fn get_highest_consecutive_transaction_by_sender(
&self,
mut on_chain: TransactionId,
) -> Option<Arc<ValidPoolTransaction<T::Transaction>>> {
let mut last_consecutive_tx = None;
// ensure this operates on the most recent
if let Some(current) = self.sender_info.get(&on_chain.sender) {
on_chain.nonce = on_chain.nonce.max(current.state_nonce);
}
let mut next_expected_nonce = on_chain.nonce;
for (id, tx) in self.all().descendant_txs_inclusive(&on_chain) {
if next_expected_nonce != id.nonce {
break
}
next_expected_nonce = id.next_nonce();
last_consecutive_tx = Some(tx);
}
last_consecutive_tx.map(|tx| Arc::clone(&tx.transaction))
}
/// Returns access to the [`AllTransactions`] container.
pub(crate) const fn all(&self) -> &AllTransactions<T::Transaction> {
&self.all_transactions
}
/// Returns all senders in the pool
pub(crate) fn unique_senders(&self) -> HashSet<Address> {
self.all_transactions.txs.values().map(|tx| tx.transaction.sender()).collect()
}
/// Returns stats about the size of pool.
pub fn size(&self) -> PoolSize {
PoolSize {
pending: self.pending_pool.len(),
pending_size: self.pending_pool.size(),
basefee: self.basefee_pool.len(),
basefee_size: self.basefee_pool.size(),
queued: self.queued_pool.len(),
queued_size: self.queued_pool.size(),
blob: self.blob_pool.len(),
blob_size: self.blob_pool.size(),
total: self.all_transactions.len(),
}
}
/// Returns the currently tracked block values
pub const fn block_info(&self) -> BlockInfo {
BlockInfo {
block_gas_limit: self.all_transactions.block_gas_limit,
last_seen_block_hash: self.all_transactions.last_seen_block_hash,
last_seen_block_number: self.all_transactions.last_seen_block_number,
pending_basefee: self.all_transactions.pending_fees.base_fee,
pending_blob_fee: Some(self.all_transactions.pending_fees.blob_fee),
}
}
/// Updates the tracked blob fee
fn update_blob_fee(&mut self, mut pending_blob_fee: u128, base_fee_update: Ordering) {
std::mem::swap(&mut self.all_transactions.pending_fees.blob_fee, &mut pending_blob_fee);
match (self.all_transactions.pending_fees.blob_fee.cmp(&pending_blob_fee), base_fee_update)
{
(Ordering::Equal, Ordering::Equal | Ordering::Greater) => {
// fee unchanged, nothing to update
}
(Ordering::Greater, Ordering::Equal | Ordering::Greater) => {
// increased blob fee: recheck pending pool and remove all that are no longer valid
let removed =
self.pending_pool.update_blob_fee(self.all_transactions.pending_fees.blob_fee);
for tx in removed {
let to = {
let tx =
self.all_transactions.txs.get_mut(tx.id()).expect("tx exists in set");
// the blob fee is too high now, unset the blob fee cap block flag
tx.state.remove(TxState::ENOUGH_BLOB_FEE_CAP_BLOCK);
tx.subpool = tx.state.into();
tx.subpool
};
self.add_transaction_to_subpool(to, tx);
}
}
(Ordering::Less, _) | (_, Ordering::Less) => {
// decreased blob/base fee: recheck blob pool and promote all that are now valid
let removed =
self.blob_pool.enforce_pending_fees(&self.all_transactions.pending_fees);
for tx in removed {
let to = {
let tx =
self.all_transactions.txs.get_mut(tx.id()).expect("tx exists in set");
tx.state.insert(TxState::ENOUGH_BLOB_FEE_CAP_BLOCK);
tx.state.insert(TxState::ENOUGH_FEE_CAP_BLOCK);
tx.subpool = tx.state.into();
tx.subpool
};
self.add_transaction_to_subpool(to, tx);
}
}
}
}
/// Updates the tracked basefee
///
/// Depending on the change in direction of the basefee, this will promote or demote
/// transactions from the basefee pool.
fn update_basefee(&mut self, mut pending_basefee: u64) -> Ordering {
std::mem::swap(&mut self.all_transactions.pending_fees.base_fee, &mut pending_basefee);
match self.all_transactions.pending_fees.base_fee.cmp(&pending_basefee) {
Ordering::Equal => {
// fee unchanged, nothing to update
Ordering::Equal
}
Ordering::Greater => {
// increased base fee: recheck pending pool and remove all that are no longer valid
let removed =
self.pending_pool.update_base_fee(self.all_transactions.pending_fees.base_fee);
for tx in removed {
let to = {
let tx =
self.all_transactions.txs.get_mut(tx.id()).expect("tx exists in set");
tx.state.remove(TxState::ENOUGH_FEE_CAP_BLOCK);
tx.subpool = tx.state.into();
tx.subpool
};
self.add_transaction_to_subpool(to, tx);
}
Ordering::Greater
}
Ordering::Less => {
// Base fee decreased: recheck BaseFee and promote.
// Invariants:
// - BaseFee contains only non-blob txs (blob txs live in Blob) and they already
// have ENOUGH_BLOB_FEE_CAP_BLOCK.
// - PENDING_POOL_BITS = BASE_FEE_POOL_BITS | ENOUGH_FEE_CAP_BLOCK |
// ENOUGH_BLOB_FEE_CAP_BLOCK.
// With the lower base fee they gain ENOUGH_FEE_CAP_BLOCK, so we can set the bit and
// insert directly into Pending (skip generic routing).
self.basefee_pool.enforce_basefee_with(
self.all_transactions.pending_fees.base_fee,
|tx| {
// Update transaction state β guaranteed Pending by the invariants above
let meta =
self.all_transactions.txs.get_mut(tx.id()).expect("tx exists in set");
meta.state.insert(TxState::ENOUGH_FEE_CAP_BLOCK);
meta.subpool = meta.state.into();
trace!(target: "txpool", hash=%tx.transaction.hash(), pool=?meta.subpool, "Adding transaction to a subpool");
match meta.subpool {
SubPool::Queued => self.queued_pool.add_transaction(tx),
SubPool::Pending => {
self.pending_pool.add_transaction(tx, self.all_transactions.pending_fees.base_fee);
}
SubPool::Blob => {
self.blob_pool.add_transaction(tx);
}
SubPool::BaseFee => {
// This should be unreachable as transactions from BaseFee pool with
// decreased basefee are guaranteed to become Pending
warn!( target: "txpool", "BaseFee transactions should become Pending after basefee decrease");
}
}
},
);
Ordering::Less
}
}
}
/// Sets the current block info for the pool.
///
/// This will also apply updates to the pool based on the new base fee and blob fee
pub fn set_block_info(&mut self, info: BlockInfo) {
// first update the subpools based on the new values
let basefee_ordering = self.update_basefee(info.pending_basefee);
if let Some(blob_fee) = info.pending_blob_fee {
self.update_blob_fee(blob_fee, basefee_ordering)
}
// then update tracked values
self.all_transactions.set_block_info(info);
}
/// Returns an iterator that yields transactions that are ready to be included in the block with
/// the tracked fees.
pub(crate) fn best_transactions(&self) -> BestTransactions<T> {
self.pending_pool.best()
}
/// Returns an iterator that yields transactions that are ready to be included in the block with
/// the given base fee and optional blob fee.
///
/// If the provided attributes differ from the currently tracked fees, this will also include
/// transactions that are unlocked by the new fees, or exclude transactions that are no longer
/// valid with the new fees.
pub(crate) fn best_transactions_with_attributes(
&self,
best_transactions_attributes: BestTransactionsAttributes,
) -> Box<dyn crate::traits::BestTransactions<Item = Arc<ValidPoolTransaction<T::Transaction>>>>
{
// First we need to check if the given base fee is different than what's currently being
// tracked
match best_transactions_attributes.basefee.cmp(&self.all_transactions.pending_fees.base_fee)
{
Ordering::Equal => {
// for EIP-4844 transactions we also need to check if the blob fee is now lower than
// what's currently being tracked, if so we need to include transactions from the
// blob pool that are valid with the lower blob fee
let new_blob_fee = best_transactions_attributes.blob_fee.unwrap_or_default();
match new_blob_fee.cmp(&(self.all_transactions.pending_fees.blob_fee as u64)) {
Ordering::Less => {
// it's possible that this swing unlocked more blob transactions
let unlocked =
self.blob_pool.satisfy_attributes(best_transactions_attributes);
Box::new(self.pending_pool.best_with_unlocked_and_attributes(
unlocked,
best_transactions_attributes.basefee,
new_blob_fee,
))
}
Ordering::Equal => Box::new(self.pending_pool.best()),
Ordering::Greater => {
// no additional transactions unlocked
Box::new(self.pending_pool.best_with_basefee_and_blobfee(
best_transactions_attributes.basefee,
best_transactions_attributes.blob_fee.unwrap_or_default(),
))
}
}
}
Ordering::Greater => {
// base fee increased, we need to check how the blob fee moved
let new_blob_fee = best_transactions_attributes.blob_fee.unwrap_or_default();
match new_blob_fee.cmp(&(self.all_transactions.pending_fees.blob_fee as u64)) {
Ordering::Less => {
// it's possible that this swing unlocked more blob transactions
let unlocked =
self.blob_pool.satisfy_attributes(best_transactions_attributes);
Box::new(self.pending_pool.best_with_unlocked_and_attributes(
unlocked,
best_transactions_attributes.basefee,
new_blob_fee,
))
}
Ordering::Equal | Ordering::Greater => {
// no additional transactions unlocked
Box::new(self.pending_pool.best_with_basefee_and_blobfee(
best_transactions_attributes.basefee,
new_blob_fee,
))
}
}
}
Ordering::Less => {
// base fee decreased, we need to move transactions from the basefee + blob pool to
// the pending pool that might be unlocked by the lower base fee
let mut unlocked = self
.basefee_pool
.satisfy_base_fee_transactions(best_transactions_attributes.basefee);
// also include blob pool transactions that are now unlocked
unlocked.extend(self.blob_pool.satisfy_attributes(best_transactions_attributes));
Box::new(self.pending_pool.best_with_unlocked_and_attributes(
unlocked,
best_transactions_attributes.basefee,
best_transactions_attributes.blob_fee.unwrap_or_default(),
))
}
}
}
/// Returns all transactions from the pending sub-pool
pub(crate) fn pending_transactions(&self) -> Vec<Arc<ValidPoolTransaction<T::Transaction>>> {
self.pending_pool.all().collect()
}
/// Returns an iterator over all transactions from the pending sub-pool
pub(crate) fn pending_transactions_iter(
&self,
) -> impl Iterator<Item = Arc<ValidPoolTransaction<T::Transaction>>> + '_ {
self.pending_pool.all()
}
/// Returns the number of transactions from the pending sub-pool
pub(crate) fn pending_transactions_count(&self) -> usize {
self.pending_pool.len()
}
/// Returns all pending transactions filtered by predicate
pub(crate) fn pending_transactions_with_predicate(
&self,
mut predicate: impl FnMut(&ValidPoolTransaction<T::Transaction>) -> bool,
) -> Vec<Arc<ValidPoolTransaction<T::Transaction>>> {
self.pending_transactions_iter().filter(|tx| predicate(tx)).collect()
}
/// Returns all pending transactions for the specified sender
pub(crate) fn pending_txs_by_sender(
&self,
sender: SenderId,
) -> Vec<Arc<ValidPoolTransaction<T::Transaction>>> {
self.pending_transactions_iter().filter(|tx| tx.sender_id() == sender).collect()
}
/// Returns all transactions from parked pools
pub(crate) fn queued_transactions(&self) -> Vec<Arc<ValidPoolTransaction<T::Transaction>>> {
self.basefee_pool.all().chain(self.queued_pool.all()).collect()
}
/// Returns an iterator over all transactions from parked pools
pub(crate) fn queued_transactions_iter(
&self,
) -> impl Iterator<Item = Arc<ValidPoolTransaction<T::Transaction>>> + '_ {
self.basefee_pool.all().chain(self.queued_pool.all())
}
/// Returns the number of transactions in parked pools
pub(crate) fn queued_transactions_count(&self) -> usize {
self.basefee_pool.len() + self.queued_pool.len()
}
/// Returns queued and pending transactions for the specified sender
pub fn queued_and_pending_txs_by_sender(
&self,
sender: SenderId,
) -> (SmallVec<[TransactionId; TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER]>, Vec<TransactionId>) {
(self.queued_pool.get_txs_by_sender(sender), self.pending_pool.get_txs_by_sender(sender))
}
/// Returns all queued transactions for the specified sender
pub(crate) fn queued_txs_by_sender(
&self,
sender: SenderId,
) -> Vec<Arc<ValidPoolTransaction<T::Transaction>>> {
self.queued_transactions_iter().filter(|tx| tx.sender_id() == sender).collect()
}
/// Returns `true` if the transaction with the given hash is already included in this pool.
pub(crate) fn contains(&self, tx_hash: &TxHash) -> bool {
self.all_transactions.contains(tx_hash)
}
/// Returns `true` if the transaction with the given id is already included in the given subpool
#[cfg(test)]
pub(crate) fn subpool_contains(&self, subpool: SubPool, id: &TransactionId) -> bool {
match subpool {
SubPool::Queued => self.queued_pool.contains(id),
SubPool::Pending => self.pending_pool.contains(id),
SubPool::BaseFee => self.basefee_pool.contains(id),
SubPool::Blob => self.blob_pool.contains(id),
}
}
/// Returns `true` if the pool is over its configured limits.
#[inline]
pub(crate) fn is_exceeded(&self) -> bool {
self.config.is_exceeded(self.size())
}
/// Returns the transaction for the given hash.
pub(crate) fn get(
&self,
tx_hash: &TxHash,
) -> Option<Arc<ValidPoolTransaction<T::Transaction>>> {
self.all_transactions.by_hash.get(tx_hash).cloned()
}
/// Returns transactions for the multiple given hashes, if they exist.
pub(crate) fn get_all(
&self,
txs: Vec<TxHash>,
) -> impl Iterator<Item = Arc<ValidPoolTransaction<T::Transaction>>> + '_ {
txs.into_iter().filter_map(|tx| self.get(&tx))
}
/// Returns all transactions sent from the given sender.
pub(crate) fn get_transactions_by_sender(
&self,
sender: SenderId,
) -> Vec<Arc<ValidPoolTransaction<T::Transaction>>> {
self.all_transactions.txs_iter(sender).map(|(_, tx)| Arc::clone(&tx.transaction)).collect()
}
/// Updates the transactions for the changed senders.
pub(crate) fn update_accounts(
&mut self,
changed_senders: FxHashMap<SenderId, SenderInfo>,
) -> UpdateOutcome<T::Transaction> {
// Apply the state changes to the total set of transactions which triggers sub-pool updates.
let updates = self.all_transactions.update(&changed_senders);
// track changed accounts
self.sender_info.extend(changed_senders);
// Process the sub-pool updates
let update = self.process_updates(updates);
// update the metrics after the update
self.update_size_metrics();
update
}
/// Updates the entire pool after a new block was mined.
///
/// This removes all mined transactions, updates according to the new base fee and blob fee and
/// rechecks sender allowance based on the given changed sender infos.
pub(crate) fn on_canonical_state_change(
&mut self,
block_info: BlockInfo,
mined_transactions: Vec<TxHash>,
changed_senders: FxHashMap<SenderId, SenderInfo>,
update_kind: PoolUpdateKind,
) -> OnNewCanonicalStateOutcome<T::Transaction> {
// update block info
let block_hash = block_info.last_seen_block_hash;
self.set_block_info(block_info);
// Remove all transaction that were included in the block
let mut removed_txs_count = 0;
for tx_hash in &mined_transactions {
if self.prune_transaction_by_hash(tx_hash).is_some() {
removed_txs_count += 1;
}
}
// Update removed transactions metric
self.metrics.removed_transactions.increment(removed_txs_count);
let UpdateOutcome { promoted, discarded } = self.update_accounts(changed_senders);
self.update_transaction_type_metrics();
self.metrics.performed_state_updates.increment(1);
// Update the latest update kind
self.latest_update_kind = Some(update_kind);
OnNewCanonicalStateOutcome { block_hash, mined: mined_transactions, promoted, discarded }
}
/// Update sub-pools size metrics.
pub(crate) fn update_size_metrics(&self) {
let stats = self.size();
self.metrics.pending_pool_transactions.set(stats.pending as f64);
self.metrics.pending_pool_size_bytes.set(stats.pending_size as f64);
self.metrics.basefee_pool_transactions.set(stats.basefee as f64);
self.metrics.basefee_pool_size_bytes.set(stats.basefee_size as f64);
self.metrics.queued_pool_transactions.set(stats.queued as f64);
self.metrics.queued_pool_size_bytes.set(stats.queued_size as f64);
self.metrics.blob_pool_transactions.set(stats.blob as f64);
self.metrics.blob_pool_size_bytes.set(stats.blob_size as f64);
self.metrics.total_transactions.set(stats.total as f64);
}
/// Updates transaction type metrics for the entire pool.
pub(crate) fn update_transaction_type_metrics(&self) {
let mut legacy_count = 0;
let mut eip2930_count = 0;
let mut eip1559_count = 0;
let mut eip4844_count = 0;
let mut eip7702_count = 0;
for tx in self.all_transactions.transactions_iter() {
match tx.transaction.ty() {
LEGACY_TX_TYPE_ID => legacy_count += 1,
EIP2930_TX_TYPE_ID => eip2930_count += 1,
EIP1559_TX_TYPE_ID => eip1559_count += 1,
EIP4844_TX_TYPE_ID => eip4844_count += 1,
EIP7702_TX_TYPE_ID => eip7702_count += 1,
_ => {} // Ignore other types
}
}
self.metrics.total_legacy_transactions.set(legacy_count as f64);
self.metrics.total_eip2930_transactions.set(eip2930_count as f64);
self.metrics.total_eip1559_transactions.set(eip1559_count as f64);
self.metrics.total_eip4844_transactions.set(eip4844_count as f64);
self.metrics.total_eip7702_transactions.set(eip7702_count as f64);
}
/// Adds the transaction into the pool.
///
/// This pool consists of four sub-pools: `Queued`, `Pending`, `BaseFee`, and `Blob`.
///
/// The `Queued` pool contains transactions with gaps in its dependency tree: It requires
/// additional transactions that are note yet present in the pool. And transactions that the
/// sender can not afford with the current balance.
///
/// The `Pending` pool contains all transactions that have no nonce gaps, and can be afforded by
/// the sender. It only contains transactions that are ready to be included in the pending
/// block. The pending pool contains all transactions that could be listed currently, but not
/// necessarily independently. However, this pool never contains transactions with nonce gaps. A
/// transaction is considered `ready` when it has the lowest nonce of all transactions from the
/// same sender. Which is equals to the chain nonce of the sender in the pending pool.
///
/// The `BaseFee` pool contains transactions that currently can't satisfy the dynamic fee
/// requirement. With EIP-1559, transactions can become executable or not without any changes to
/// the sender's balance or nonce and instead their `feeCap` determines whether the
/// transaction is _currently_ (on the current state) ready or needs to be parked until the
/// `feeCap` satisfies the block's `baseFee`.
///
/// The `Blob` pool contains _blob_ transactions that currently can't satisfy the dynamic fee
/// requirement, or blob fee requirement. Transactions become executable only if the
/// transaction `feeCap` is greater than the block's `baseFee` and the `maxBlobFee` is greater
/// than the block's `blobFee`.
pub(crate) fn add_transaction(
&mut self,
tx: ValidPoolTransaction<T::Transaction>,
on_chain_balance: U256,
on_chain_nonce: u64,
on_chain_code_hash: Option<B256>,
) -> PoolResult<AddedTransaction<T::Transaction>> {
if self.contains(tx.hash()) {
return Err(PoolError::new(*tx.hash(), PoolErrorKind::AlreadyImported))
}
self.validate_auth(&tx, on_chain_nonce, on_chain_code_hash)?;
// Update sender info with balance and nonce
self.sender_info
.entry(tx.sender_id())
.or_default()
.update(on_chain_nonce, on_chain_balance);
match self.all_transactions.insert_tx(tx, on_chain_balance, on_chain_nonce) {
Ok(InsertOk { transaction, move_to, replaced_tx, updates, .. }) => {
// replace the new tx and remove the replaced in the subpool(s)
self.add_new_transaction(transaction.clone(), replaced_tx.clone(), move_to);
// Update inserted transactions metric
self.metrics.inserted_transactions.increment(1);
let UpdateOutcome { promoted, discarded } = self.process_updates(updates);
let replaced = replaced_tx.map(|(tx, _)| tx);
// This transaction was moved to the pending pool.
let res = if move_to.is_pending() {
AddedTransaction::Pending(AddedPendingTransaction {
transaction,
promoted,
discarded,
replaced,
})
} else {
AddedTransaction::Parked { transaction, subpool: move_to, replaced }
};
// Update size metrics after adding and potentially moving transactions.
self.update_size_metrics();
Ok(res)
}
Err(err) => {
// Update invalid transactions metric
self.metrics.invalid_transactions.increment(1);
match err {
InsertErr::Underpriced { existing: _, transaction } => Err(PoolError::new(
*transaction.hash(),
PoolErrorKind::ReplacementUnderpriced,
)),
InsertErr::FeeCapBelowMinimumProtocolFeeCap { transaction, fee_cap } => {
Err(PoolError::new(
*transaction.hash(),
PoolErrorKind::FeeCapBelowMinimumProtocolFeeCap(fee_cap),
))
}
InsertErr::ExceededSenderTransactionsCapacity { transaction } => {
Err(PoolError::new(
*transaction.hash(),
PoolErrorKind::SpammerExceededCapacity(transaction.sender()),
))
}
InsertErr::TxGasLimitMoreThanAvailableBlockGas {
transaction,
block_gas_limit,
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | true |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/transaction-pool/src/pool/blob.rs | crates/transaction-pool/src/pool/blob.rs | use super::txpool::PendingFees;
use crate::{
identifier::TransactionId, pool::size::SizeTracker, traits::BestTransactionsAttributes,
PoolTransaction, SubPoolLimit, ValidPoolTransaction,
};
use std::{
cmp::Ordering,
collections::{BTreeMap, BTreeSet},
sync::Arc,
};
/// A set of validated blob transactions in the pool that are __not pending__.
///
/// The purpose of this pool is to keep track of blob transactions that are queued and to evict the
/// worst blob transactions once the sub-pool is full.
///
/// This expects that certain constraints are met:
/// - blob transactions are always gap less
#[derive(Debug, Clone)]
pub struct BlobTransactions<T: PoolTransaction> {
/// Keeps track of transactions inserted in the pool.
///
/// This way we can determine when transactions were submitted to the pool.
submission_id: u64,
/// _All_ Transactions that are currently inside the pool grouped by their identifier.
by_id: BTreeMap<TransactionId, BlobTransaction<T>>,
/// _All_ transactions sorted by blob priority.
all: BTreeSet<BlobTransaction<T>>,
/// Keeps track of the current fees, so transaction priority can be calculated on insertion.
pending_fees: PendingFees,
/// Keeps track of the size of this pool.
///
/// See also [`reth_primitives_traits::InMemorySize::size`].
size_of: SizeTracker,
}
// === impl BlobTransactions ===
impl<T: PoolTransaction> BlobTransactions<T> {
/// Adds a new transactions to the pending queue.
///
/// # Panics
///
/// - If the transaction is not a blob tx.
/// - If the transaction is already included.
pub fn add_transaction(&mut self, tx: Arc<ValidPoolTransaction<T>>) {
assert!(tx.is_eip4844(), "transaction is not a blob tx");
let id = *tx.id();
assert!(!self.contains(&id), "transaction already included {:?}", self.get(&id).unwrap());
let submission_id = self.next_id();
// keep track of size
self.size_of += tx.size();
// set transaction, which will also calculate priority based on current pending fees
let transaction = BlobTransaction::new(tx, submission_id, &self.pending_fees);
self.by_id.insert(id, transaction.clone());
self.all.insert(transaction);
}
const fn next_id(&mut self) -> u64 {
let id = self.submission_id;
self.submission_id = self.submission_id.wrapping_add(1);
id
}
/// Removes the transaction from the pool
pub(crate) fn remove_transaction(
&mut self,
id: &TransactionId,
) -> Option<Arc<ValidPoolTransaction<T>>> {
// remove from queues
let tx = self.by_id.remove(id)?;
self.all.remove(&tx);
// keep track of size
self.size_of -= tx.transaction.size();
Some(tx.transaction)
}
/// Returns all transactions that satisfy the given basefee and blobfee.
///
/// Note: This does not remove any the transactions from the pool.
pub(crate) fn satisfy_attributes(
&self,
best_transactions_attributes: BestTransactionsAttributes,
) -> Vec<Arc<ValidPoolTransaction<T>>> {
let mut transactions = Vec::new();
{
// short path if blob_fee is None in provided best transactions attributes
if let Some(blob_fee_to_satisfy) =
best_transactions_attributes.blob_fee.map(|fee| fee as u128)
{
let mut iter = self.by_id.iter().peekable();
while let Some((id, tx)) = iter.next() {
if tx.transaction.max_fee_per_blob_gas().unwrap_or_default() <
blob_fee_to_satisfy ||
tx.transaction.max_fee_per_gas() <
best_transactions_attributes.basefee as u128
{
// does not satisfy the blob fee or base fee
// still parked in blob pool -> skip descendant transactions
'this: while let Some((peek, _)) = iter.peek() {
if peek.sender != id.sender {
break 'this
}
iter.next();
}
} else {
transactions.push(tx.transaction.clone());
}
}
}
}
transactions
}
/// Returns true if the pool exceeds the given limit
#[inline]
pub(crate) fn exceeds(&self, limit: &SubPoolLimit) -> bool {
limit.is_exceeded(self.len(), self.size())
}
/// The reported size of all transactions in this pool.
pub(crate) fn size(&self) -> usize {
self.size_of.into()
}
/// Number of transactions in the entire pool
pub(crate) fn len(&self) -> usize {
self.by_id.len()
}
/// Returns whether the pool is empty
#[cfg(test)]
pub(crate) fn is_empty(&self) -> bool {
self.by_id.is_empty()
}
/// Returns all transactions which:
/// * have a `max_fee_per_blob_gas` greater than or equal to the given `blob_fee`, _and_
/// * have a `max_fee_per_gas` greater than or equal to the given `base_fee`
fn satisfy_pending_fee_ids(&self, pending_fees: &PendingFees) -> Vec<TransactionId> {
let mut transactions = Vec::new();
{
let mut iter = self.by_id.iter().peekable();
while let Some((id, tx)) = iter.next() {
if tx.transaction.max_fee_per_blob_gas() < Some(pending_fees.blob_fee) ||
tx.transaction.max_fee_per_gas() < pending_fees.base_fee as u128
{
// still parked in blob pool -> skip descendant transactions
'this: while let Some((peek, _)) = iter.peek() {
if peek.sender != id.sender {
break 'this
}
iter.next();
}
} else {
transactions.push(*id);
}
}
}
transactions
}
/// Resorts the transactions in the pool based on the pool's current [`PendingFees`].
pub(crate) fn reprioritize(&mut self) {
// mem::take to modify without allocating, then collect to rebuild the BTreeSet
self.all = std::mem::take(&mut self.all)
.into_iter()
.map(|mut tx| {
tx.update_priority(&self.pending_fees);
tx
})
.collect();
// we need to update `by_id` as well because removal from `all` can only happen if the
// `BlobTransaction`s in each struct are consistent
for tx in self.by_id.values_mut() {
tx.update_priority(&self.pending_fees);
}
}
/// Removes all transactions (and their descendants) which:
/// * have a `max_fee_per_blob_gas` greater than or equal to the given `blob_fee`, _and_
/// * have a `max_fee_per_gas` greater than or equal to the given `base_fee`
///
/// This also sets the [`PendingFees`] for the pool, resorting transactions based on their
/// updated priority.
///
/// Note: the transactions are not returned in a particular order.
pub(crate) fn enforce_pending_fees(
&mut self,
pending_fees: &PendingFees,
) -> Vec<Arc<ValidPoolTransaction<T>>> {
let removed = self
.satisfy_pending_fee_ids(pending_fees)
.into_iter()
.map(|id| self.remove_transaction(&id).expect("transaction exists"))
.collect();
// Update pending fees and reprioritize
self.pending_fees = pending_fees.clone();
self.reprioritize();
removed
}
/// Removes transactions until the pool satisfies its [`SubPoolLimit`].
///
/// This is done by removing transactions according to their ordering in the pool, defined by
/// the [`BlobOrd`] struct.
///
/// Removed transactions are returned in the order they were removed.
pub fn truncate_pool(&mut self, limit: SubPoolLimit) -> Vec<Arc<ValidPoolTransaction<T>>> {
let mut removed = Vec::new();
while self.exceeds(&limit) {
let tx = self.all.last().expect("pool is not empty");
let id = *tx.transaction.id();
removed.push(self.remove_transaction(&id).expect("transaction exists"));
}
removed
}
/// Returns `true` if the transaction with the given id is already included in this pool.
pub(crate) fn contains(&self, id: &TransactionId) -> bool {
self.by_id.contains_key(id)
}
/// Retrieves a transaction with the given ID from the pool, if it exists.
fn get(&self, id: &TransactionId) -> Option<&BlobTransaction<T>> {
self.by_id.get(id)
}
/// Asserts that the bijection between `by_id` and `all` is valid.
#[cfg(any(test, feature = "test-utils"))]
pub(crate) fn assert_invariants(&self) {
assert_eq!(self.by_id.len(), self.all.len(), "by_id.len() != all.len()");
}
}
impl<T: PoolTransaction> Default for BlobTransactions<T> {
fn default() -> Self {
Self {
submission_id: 0,
by_id: Default::default(),
all: Default::default(),
size_of: Default::default(),
pending_fees: Default::default(),
}
}
}
/// A transaction that is ready to be included in a block.
#[derive(Debug)]
struct BlobTransaction<T: PoolTransaction> {
/// Actual blob transaction.
transaction: Arc<ValidPoolTransaction<T>>,
/// The value that determines the order of this transaction.
ord: BlobOrd,
}
impl<T: PoolTransaction> BlobTransaction<T> {
/// Creates a new blob transaction, based on the pool transaction, submission id, and current
/// pending fees.
pub(crate) fn new(
transaction: Arc<ValidPoolTransaction<T>>,
submission_id: u64,
pending_fees: &PendingFees,
) -> Self {
let priority = blob_tx_priority(
pending_fees.blob_fee,
transaction.max_fee_per_blob_gas().unwrap_or_default(),
pending_fees.base_fee as u128,
transaction.max_fee_per_gas(),
);
let ord = BlobOrd { priority, submission_id };
Self { transaction, ord }
}
/// Updates the priority for the transaction based on the current pending fees.
pub(crate) fn update_priority(&mut self, pending_fees: &PendingFees) {
self.ord.priority = blob_tx_priority(
pending_fees.blob_fee,
self.transaction.max_fee_per_blob_gas().unwrap_or_default(),
pending_fees.base_fee as u128,
self.transaction.max_fee_per_gas(),
);
}
}
impl<T: PoolTransaction> Clone for BlobTransaction<T> {
fn clone(&self) -> Self {
Self { transaction: self.transaction.clone(), ord: self.ord.clone() }
}
}
impl<T: PoolTransaction> Eq for BlobTransaction<T> {}
impl<T: PoolTransaction> PartialEq<Self> for BlobTransaction<T> {
fn eq(&self, other: &Self) -> bool {
self.cmp(other) == Ordering::Equal
}
}
impl<T: PoolTransaction> PartialOrd<Self> for BlobTransaction<T> {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl<T: PoolTransaction> Ord for BlobTransaction<T> {
fn cmp(&self, other: &Self) -> Ordering {
self.ord.cmp(&other.ord)
}
}
/// This is the log base 2 of 1.125, which we'll use to calculate the priority
const LOG_2_1_125: f64 = 0.16992500144231237;
/// The blob step function, attempting to compute the delta given the `max_tx_fee`, and
/// `current_fee`.
///
/// The `max_tx_fee` is the maximum fee that the transaction is willing to pay, this
/// would be the priority fee for the EIP1559 component of transaction fees, and the blob fee cap
/// for the blob component of transaction fees.
///
/// The `current_fee` is the current value of the fee, this would be the base fee for the EIP1559
/// component, and the blob fee (computed from the current head) for the blob component.
///
/// This is supposed to get the number of fee jumps required to get from the current fee to the fee
/// cap, or where the transaction would not be executable any more.
///
/// A positive value means that the transaction will remain executable unless the current fee
/// increases.
///
/// A negative value means that the transaction is currently not executable, and requires the
/// current fee to decrease by some number of jumps before the max fee is greater than the current
/// fee.
pub fn fee_delta(max_tx_fee: u128, current_fee: u128) -> i64 {
if max_tx_fee == current_fee {
// if these are equal, then there's no fee jump
return 0
}
let max_tx_fee_jumps = if max_tx_fee == 0 {
// we can't take log2 of 0, so we set this to zero here
0f64
} else {
(max_tx_fee.ilog2() as f64) / LOG_2_1_125
};
let current_fee_jumps = if current_fee == 0 {
// we can't take log2 of 0, so we set this to zero here
0f64
} else {
(current_fee.ilog2() as f64) / LOG_2_1_125
};
// jumps = log1.125(txfee) - log1.125(basefee)
let jumps = max_tx_fee_jumps - current_fee_jumps;
// delta = sign(jumps) * log(abs(jumps))
match (jumps as i64).cmp(&0) {
Ordering::Equal => {
// can't take ilog2 of 0
0
}
Ordering::Greater => (jumps.ceil() as i64).ilog2() as i64,
Ordering::Less => -((-jumps.floor() as i64).ilog2() as i64),
}
}
/// Returns the priority for the transaction, based on the "delta" blob fee and priority fee.
pub fn blob_tx_priority(
blob_fee_cap: u128,
blob_fee: u128,
max_priority_fee: u128,
base_fee: u128,
) -> i64 {
let delta_blob_fee = fee_delta(blob_fee_cap, blob_fee);
let delta_priority_fee = fee_delta(max_priority_fee, base_fee);
// TODO: this could be u64:
// * if all are positive, zero is returned
// * if all are negative, the min negative value is returned
// * if some are positive and some are negative, the min negative value is returned
//
// the BlobOrd could then just be a u64, and higher values represent worse transactions (more
// jumps for one of the fees until the cap satisfies)
//
// priority = min(delta-basefee, delta-blobfee, 0)
delta_blob_fee.min(delta_priority_fee).min(0)
}
/// A struct used to determine the ordering for a specific blob transaction in the pool. This uses
/// a `priority` value to determine the ordering, and uses the `submission_id` to break ties.
///
/// The `priority` value is calculated using the [`blob_tx_priority`] function, and should be
/// re-calculated on each block.
#[derive(Debug, Clone)]
pub struct BlobOrd {
/// Identifier that tags when transaction was submitted in the pool.
pub(crate) submission_id: u64,
/// The priority for this transaction, calculated using the [`blob_tx_priority`] function,
/// taking into account both the blob and priority fee.
pub(crate) priority: i64,
}
impl Eq for BlobOrd {}
impl PartialEq<Self> for BlobOrd {
fn eq(&self, other: &Self) -> bool {
self.cmp(other) == Ordering::Equal
}
}
impl PartialOrd<Self> for BlobOrd {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for BlobOrd {
/// Compares two `BlobOrd` instances.
///
/// The comparison is performed in reverse order based on the priority field. This is
/// because transactions with larger negative values in the priority field will take more fee
/// jumps, making them take longer to become executable. Therefore, transactions with lower
/// ordering should return `Greater`, ensuring they are evicted first.
///
/// If the priority values are equal, the submission ID is used to break ties.
fn cmp(&self, other: &Self) -> Ordering {
other
.priority
.cmp(&self.priority)
.then_with(|| self.submission_id.cmp(&other.submission_id))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test_utils::{MockTransaction, MockTransactionFactory};
/// Represents the fees for a single transaction, which will be built inside of a test.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
struct TransactionFees {
/// The blob fee cap for the transaction.
max_blob_fee: u128,
/// The max priority fee for the transaction.
max_priority_fee_per_gas: u128,
/// The base fee for the transaction.
max_fee_per_gas: u128,
}
/// Represents an ordering of transactions based on their fees and the current network fees.
#[derive(Debug, Clone)]
struct TransactionOrdering {
/// The transaction fees, in the order that they're expected to be returned
fees: Vec<TransactionFees>,
/// The network fees
network_fees: PendingFees,
}
#[test]
fn test_blob_ordering() {
// Tests are from:
// <https://github.com/ethereum/go-ethereum/blob/e91cdb49beb4b2a3872b5f2548bf2d6559e4f561/core/txpool/blobpool/evictheap_test.go>
let mut factory = MockTransactionFactory::default();
let vectors = vec![
// If everything is above basefee and blobfee, order by miner tip
TransactionOrdering {
fees: vec![
TransactionFees {
max_blob_fee: 2,
max_priority_fee_per_gas: 0,
max_fee_per_gas: 2,
},
TransactionFees {
max_blob_fee: 3,
max_priority_fee_per_gas: 1,
max_fee_per_gas: 1,
},
TransactionFees {
max_blob_fee: 1,
max_priority_fee_per_gas: 2,
max_fee_per_gas: 3,
},
],
network_fees: PendingFees { base_fee: 0, blob_fee: 0 },
},
// If only basefees are used (blob fee matches with network), return the ones the
// furthest below the current basefee, splitting same ones with the tip. Anything above
// the basefee should be split by tip.
TransactionOrdering {
fees: vec![
TransactionFees {
max_blob_fee: 0,
max_priority_fee_per_gas: 50,
max_fee_per_gas: 500,
},
TransactionFees {
max_blob_fee: 0,
max_priority_fee_per_gas: 100,
max_fee_per_gas: 500,
},
TransactionFees {
max_blob_fee: 0,
max_priority_fee_per_gas: 50,
max_fee_per_gas: 1000,
},
TransactionFees {
max_blob_fee: 0,
max_priority_fee_per_gas: 100,
max_fee_per_gas: 1000,
},
TransactionFees {
max_blob_fee: 0,
max_priority_fee_per_gas: 1,
max_fee_per_gas: 2000,
},
TransactionFees {
max_blob_fee: 0,
max_priority_fee_per_gas: 2,
max_fee_per_gas: 2000,
},
TransactionFees {
max_blob_fee: 0,
max_priority_fee_per_gas: 3,
max_fee_per_gas: 2000,
},
],
network_fees: PendingFees { base_fee: 1999, blob_fee: 0 },
},
// If only blobfees are used (base fee matches with network), return the
// ones the furthest below the current blobfee, splitting same ones with
// the tip. Anything above the blobfee should be split by tip.
TransactionOrdering {
fees: vec![
TransactionFees {
max_blob_fee: 500,
max_priority_fee_per_gas: 50,
max_fee_per_gas: 0,
},
TransactionFees {
max_blob_fee: 500,
max_priority_fee_per_gas: 100,
max_fee_per_gas: 0,
},
TransactionFees {
max_blob_fee: 1000,
max_priority_fee_per_gas: 50,
max_fee_per_gas: 0,
},
TransactionFees {
max_blob_fee: 1000,
max_priority_fee_per_gas: 100,
max_fee_per_gas: 0,
},
TransactionFees {
max_blob_fee: 2000,
max_priority_fee_per_gas: 1,
max_fee_per_gas: 0,
},
TransactionFees {
max_blob_fee: 2000,
max_priority_fee_per_gas: 2,
max_fee_per_gas: 0,
},
TransactionFees {
max_blob_fee: 2000,
max_priority_fee_per_gas: 3,
max_fee_per_gas: 0,
},
],
network_fees: PendingFees { base_fee: 0, blob_fee: 1999 },
},
// If both basefee and blobfee is specified, sort by the larger distance
// of the two from the current network conditions, splitting same (loglog)
// ones via the tip.
//
// Basefee: 1000
// Blobfee: 100
//
// Tx #0: (800, 80) - 2 jumps below both => priority -1
// Tx #1: (630, 63) - 4 jumps below both => priority -2
// Tx #2: (800, 63) - 2 jumps below basefee, 4 jumps below blobfee => priority -2 (blob
// penalty dominates) Tx #3: (630, 80) - 4 jumps below basefee, 2 jumps
// below blobfee => priority -2 (base penalty dominates)
//
// Txs 1, 2, 3 share the same priority, split via tip, prefer 0 as the best
TransactionOrdering {
fees: vec![
TransactionFees {
max_blob_fee: 80,
max_priority_fee_per_gas: 4,
max_fee_per_gas: 630,
},
TransactionFees {
max_blob_fee: 63,
max_priority_fee_per_gas: 3,
max_fee_per_gas: 800,
},
TransactionFees {
max_blob_fee: 63,
max_priority_fee_per_gas: 2,
max_fee_per_gas: 630,
},
TransactionFees {
max_blob_fee: 80,
max_priority_fee_per_gas: 1,
max_fee_per_gas: 800,
},
],
network_fees: PendingFees { base_fee: 1000, blob_fee: 100 },
},
];
for ordering in vectors {
// create a new pool each time
let mut pool = BlobTransactions::default();
// create tx from fees
let txs = ordering
.fees
.iter()
.map(|fees| {
MockTransaction::eip4844()
.with_blob_fee(fees.max_blob_fee)
.with_priority_fee(fees.max_priority_fee_per_gas)
.with_max_fee(fees.max_fee_per_gas)
})
.collect::<Vec<_>>();
for tx in &txs {
pool.add_transaction(factory.validated_arc(tx.clone()));
}
// update fees and resort the pool
pool.pending_fees = ordering.network_fees.clone();
pool.reprioritize();
// now iterate through the pool and make sure they're in the same order as the original
// fees - map to TransactionFees so it's easier to compare the ordering without having
// to see irrelevant fields
let actual_txs = pool
.all
.iter()
.map(|tx| TransactionFees {
max_blob_fee: tx.transaction.max_fee_per_blob_gas().unwrap_or_default(),
max_priority_fee_per_gas: tx.transaction.priority_fee_or_price(),
max_fee_per_gas: tx.transaction.max_fee_per_gas(),
})
.collect::<Vec<_>>();
assert_eq!(
ordering.fees, actual_txs,
"ordering mismatch, expected: {:#?}, actual: {:#?}",
ordering.fees, actual_txs
);
}
}
#[test]
fn priority_tests() {
// Test vectors from:
// <https://github.com/ethereum/go-ethereum/blob/e91cdb49beb4b2a3872b5f2548bf2d6559e4f561/core/txpool/blobpool/priority_test.go#L27-L49>
let vectors = vec![
(7u128, 10u128, 2i64),
(17_200_000_000, 17_200_000_000, 0),
(9_853_941_692, 11_085_092_510, 0),
(11_544_106_391, 10_356_781_100, 0),
(17_200_000_000, 7, -7),
(7, 17_200_000_000, 7),
];
for (base_fee, tx_fee, expected) in vectors {
let actual = fee_delta(tx_fee, base_fee);
assert_eq!(
actual, expected,
"fee_delta({tx_fee}, {base_fee}) = {actual}, expected: {expected}"
);
}
}
#[test]
fn test_empty_pool_operations() {
let mut pool: BlobTransactions<MockTransaction> = BlobTransactions::default();
// Ensure pool is empty
assert!(pool.is_empty());
assert_eq!(pool.len(), 0);
assert_eq!(pool.size(), 0);
// Attempt to remove a non-existent transaction
let non_existent_id = TransactionId::new(0.into(), 0);
assert!(pool.remove_transaction(&non_existent_id).is_none());
// Check contains method on empty pool
assert!(!pool.contains(&non_existent_id));
}
#[test]
fn test_transaction_removal() {
let mut factory = MockTransactionFactory::default();
let mut pool = BlobTransactions::default();
// Add a transaction
let tx = factory.validated_arc(MockTransaction::eip4844());
let tx_id = *tx.id();
pool.add_transaction(tx);
// Remove the transaction
let removed = pool.remove_transaction(&tx_id);
assert!(removed.is_some());
assert_eq!(*removed.unwrap().id(), tx_id);
assert!(pool.is_empty());
}
#[test]
fn test_satisfy_attributes_empty_pool() {
let pool: BlobTransactions<MockTransaction> = BlobTransactions::default();
let attributes = BestTransactionsAttributes { blob_fee: Some(100), basefee: 100 };
// Satisfy attributes on an empty pool should return an empty vector
let satisfied = pool.satisfy_attributes(attributes);
assert!(satisfied.is_empty());
}
#[test]
#[should_panic(expected = "transaction is not a blob tx")]
fn test_add_non_blob_transaction() {
// Ensure that adding a non-blob transaction causes a panic
let mut factory = MockTransactionFactory::default();
let mut pool = BlobTransactions::default();
let tx = factory.validated_arc(MockTransaction::eip1559()); // Not a blob transaction
pool.add_transaction(tx);
}
#[test]
#[should_panic(expected = "transaction already included")]
fn test_add_duplicate_blob_transaction() {
// Ensure that adding a duplicate blob transaction causes a panic
let mut factory = MockTransactionFactory::default();
let mut pool = BlobTransactions::default();
let tx = factory.validated_arc(MockTransaction::eip4844());
pool.add_transaction(tx.clone()); // First addition
pool.add_transaction(tx); // Attempt to add the same transaction again
}
#[test]
fn test_remove_transactions_until_limit() {
// Test truncating the pool until it satisfies the given size limit
let mut factory = MockTransactionFactory::default();
let mut pool = BlobTransactions::default();
let tx1 = factory.validated_arc(MockTransaction::eip4844().with_size(100));
let tx2 = factory.validated_arc(MockTransaction::eip4844().with_size(200));
let tx3 = factory.validated_arc(MockTransaction::eip4844().with_size(300));
// Add transactions to the pool
pool.add_transaction(tx1);
pool.add_transaction(tx2);
pool.add_transaction(tx3);
// Set a size limit that requires truncation
let limit = SubPoolLimit { max_txs: 2, max_size: 300 };
let removed = pool.truncate_pool(limit);
// Check that only one transaction was removed to satisfy the limit
assert_eq!(removed.len(), 1);
assert_eq!(pool.len(), 2);
assert!(pool.size() <= limit.max_size);
}
#[test]
fn test_empty_pool_invariants() {
// Ensure that the invariants hold for an empty pool
let pool: BlobTransactions<MockTransaction> = BlobTransactions::default();
pool.assert_invariants();
assert!(pool.is_empty());
assert_eq!(pool.size(), 0);
assert_eq!(pool.len(), 0);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/transaction-pool/src/pool/parked.rs | crates/transaction-pool/src/pool/parked.rs | use crate::{
identifier::{SenderId, TransactionId},
pool::size::SizeTracker,
PoolTransaction, SubPoolLimit, ValidPoolTransaction, TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER,
};
use rustc_hash::FxHashMap;
use smallvec::SmallVec;
use std::{
cmp::Ordering,
collections::{hash_map::Entry, BTreeMap, BTreeSet},
ops::{Bound::Unbounded, Deref},
sync::Arc,
};
/// A pool of transactions that are currently parked and are waiting for external changes (e.g.
/// basefee, ancestor transactions, balance) that eventually move the transaction into the pending
/// pool.
///
/// Note: This type is generic over [`ParkedPool`] which enforces that the underlying transaction
/// type is [`ValidPoolTransaction`] wrapped in an [Arc].
#[derive(Debug, Clone)]
pub struct ParkedPool<T: ParkedOrd> {
/// Keeps track of transactions inserted in the pool.
///
/// This way we can determine when transactions were submitted to the pool.
submission_id: u64,
/// _All_ Transactions that are currently inside the pool grouped by their identifier.
by_id: BTreeMap<TransactionId, ParkedPoolTransaction<T>>,
/// Keeps track of last submission id for each sender.
///
/// This are sorted in reverse order, so the last (highest) submission id is first, and the
/// lowest (oldest) is the last.
last_sender_submission: BTreeSet<SubmissionSenderId>,
/// Keeps track of the number of transactions in the pool by the sender and the last submission
/// id.
sender_transaction_count: FxHashMap<SenderId, SenderTransactionCount>,
/// Keeps track of the size of this pool.
///
/// See also [`reth_primitives_traits::InMemorySize::size`].
size_of: SizeTracker,
}
// === impl ParkedPool ===
impl<T: ParkedOrd> ParkedPool<T> {
/// Adds a new transactions to the pending queue.
pub fn add_transaction(&mut self, tx: Arc<ValidPoolTransaction<T::Transaction>>) {
let id = *tx.id();
debug_assert!(
!self.contains(&id),
"transaction already included {:?}",
self.get(&id).unwrap().transaction.transaction
);
let submission_id = self.next_id();
// keep track of size
self.size_of += tx.size();
// update or create sender entry
self.add_sender_count(tx.sender_id(), submission_id);
let transaction = ParkedPoolTransaction { submission_id, transaction: tx.into() };
self.by_id.insert(id, transaction);
}
/// Increments the count of transactions for the given sender and updates the tracked submission
/// id.
fn add_sender_count(&mut self, sender: SenderId, submission_id: u64) {
match self.sender_transaction_count.entry(sender) {
Entry::Occupied(mut entry) => {
let value = entry.get_mut();
// remove the __currently__ tracked submission id
self.last_sender_submission
.remove(&SubmissionSenderId::new(sender, value.last_submission_id));
value.count += 1;
value.last_submission_id = submission_id;
}
Entry::Vacant(entry) => {
entry
.insert(SenderTransactionCount { count: 1, last_submission_id: submission_id });
}
}
// insert a new entry
self.last_sender_submission.insert(SubmissionSenderId::new(sender, submission_id));
}
/// Decrements the count of transactions for the given sender.
///
/// If the count reaches zero, the sender is removed from the map.
///
/// Note: this does not update the tracked submission id for the sender, because we're only
/// interested in the __last__ submission id when truncating the pool.
fn remove_sender_count(&mut self, sender_id: SenderId) {
let removed_sender = match self.sender_transaction_count.entry(sender_id) {
Entry::Occupied(mut entry) => {
let value = entry.get_mut();
value.count -= 1;
if value.count == 0 {
entry.remove()
} else {
return
}
}
Entry::Vacant(_) => {
// This should never happen because the bisection between the two maps
unreachable!("sender count not found {:?}", sender_id);
}
};
// all transactions for this sender have been removed
assert!(
self.last_sender_submission
.remove(&SubmissionSenderId::new(sender_id, removed_sender.last_submission_id)),
"last sender transaction not found {sender_id:?}"
);
}
/// Returns an iterator over all transactions in the pool
pub(crate) fn all(
&self,
) -> impl ExactSizeIterator<Item = Arc<ValidPoolTransaction<T::Transaction>>> + '_ {
self.by_id.values().map(|tx| tx.transaction.clone().into())
}
/// Removes the transaction from the pool
pub(crate) fn remove_transaction(
&mut self,
id: &TransactionId,
) -> Option<Arc<ValidPoolTransaction<T::Transaction>>> {
// remove from queues
let tx = self.by_id.remove(id)?;
self.remove_sender_count(tx.transaction.sender_id());
// keep track of size
self.size_of -= tx.transaction.size();
Some(tx.transaction.into())
}
/// Retrieves transactions by sender, using `SmallVec` to efficiently handle up to
/// `TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER` transactions.
pub(crate) fn get_txs_by_sender(
&self,
sender: SenderId,
) -> SmallVec<[TransactionId; TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER]> {
self.by_id
.range((sender.start_bound(), Unbounded))
.take_while(move |(other, _)| sender == other.sender)
.map(|(tx_id, _)| *tx_id)
.collect()
}
#[cfg(test)]
pub(crate) fn get_senders_by_submission_id(
&self,
) -> impl Iterator<Item = SubmissionSenderId> + '_ {
self.last_sender_submission.iter().copied()
}
/// Truncates the pool by removing transactions, until the given [`SubPoolLimit`] has been met.
///
/// This is done by first ordering senders by the last time they have submitted a transaction
///
/// Uses sender ids sorted by each sender's last submission id. Senders with older last
/// submission ids are first. Note that _last_ submission ids are the newest submission id for
/// that sender, so this sorts senders by the last time they submitted a transaction in
/// descending order. Senders that have least recently submitted a transaction are first.
///
/// Then, for each sender, all transactions for that sender are removed, until the pool limits
/// have been met.
///
/// Any removed transactions are returned.
pub fn truncate_pool(
&mut self,
limit: SubPoolLimit,
) -> Vec<Arc<ValidPoolTransaction<T::Transaction>>> {
if !self.exceeds(&limit) {
// if we are below the limits, we don't need to drop anything
return Vec::new()
}
let mut removed = Vec::new();
while !self.last_sender_submission.is_empty() && limit.is_exceeded(self.len(), self.size())
{
// NOTE: This will not panic due to `!last_sender_transaction.is_empty()`
let sender_id = self.last_sender_submission.last().unwrap().sender_id;
let list = self.get_txs_by_sender(sender_id);
// Drop transactions from this sender until the pool is under limits
for txid in list.into_iter().rev() {
if let Some(tx) = self.remove_transaction(&txid) {
removed.push(tx);
}
if !self.exceeds(&limit) {
break
}
}
}
removed
}
const fn next_id(&mut self) -> u64 {
let id = self.submission_id;
self.submission_id = self.submission_id.wrapping_add(1);
id
}
/// The reported size of all transactions in this pool.
pub(crate) fn size(&self) -> usize {
self.size_of.into()
}
/// Number of transactions in the entire pool
pub(crate) fn len(&self) -> usize {
self.by_id.len()
}
/// Returns true if the pool exceeds the given limit
#[inline]
pub(crate) fn exceeds(&self, limit: &SubPoolLimit) -> bool {
limit.is_exceeded(self.len(), self.size())
}
/// Returns whether the pool is empty
#[cfg(test)]
pub(crate) fn is_empty(&self) -> bool {
self.by_id.is_empty()
}
/// Returns `true` if the transaction with the given id is already included in this pool.
pub(crate) fn contains(&self, id: &TransactionId) -> bool {
self.by_id.contains_key(id)
}
/// Retrieves a transaction with the given ID from the pool, if it exists.
fn get(&self, id: &TransactionId) -> Option<&ParkedPoolTransaction<T>> {
self.by_id.get(id)
}
/// Asserts that all subpool invariants
#[cfg(any(test, feature = "test-utils"))]
pub(crate) fn assert_invariants(&self) {
assert_eq!(
self.last_sender_submission.len(),
self.sender_transaction_count.len(),
"last_sender_transaction.len() != sender_to_last_transaction.len()"
);
}
}
impl<T: PoolTransaction> ParkedPool<BasefeeOrd<T>> {
/// Returns all transactions that satisfy the given basefee.
///
/// Note: this does _not_ remove the transactions
pub(crate) fn satisfy_base_fee_transactions(
&self,
basefee: u64,
) -> Vec<Arc<ValidPoolTransaction<T>>> {
let ids = self.satisfy_base_fee_ids(basefee as u128);
let mut txs = Vec::with_capacity(ids.len());
for id in ids {
txs.push(self.get(&id).expect("transaction exists").transaction.clone().into());
}
txs
}
/// Returns all transactions that satisfy the given basefee.
fn satisfy_base_fee_ids(&self, basefee: u128) -> Vec<TransactionId> {
let mut transactions = Vec::new();
{
let mut iter = self.by_id.iter().peekable();
while let Some((id, tx)) = iter.next() {
if tx.transaction.transaction.max_fee_per_gas() < basefee {
// still parked -> skip descendant transactions
'this: while let Some((peek, _)) = iter.peek() {
if peek.sender != id.sender {
break 'this
}
iter.next();
}
} else {
transactions.push(*id);
}
}
}
transactions
}
/// Removes all transactions from this subpool that can afford the given basefee,
/// invoking the provided handler for each transaction as it is removed.
///
/// This method enforces the basefee constraint by identifying transactions that now
/// satisfy the basefee requirement (typically after a basefee decrease) and processing
/// them via the provided transaction handler closure.
///
/// Respects per-sender nonce ordering: if the lowest-nonce transaction for a sender
/// still cannot afford the basefee, higher-nonce transactions from that sender are skipped.
///
/// Note: the transactions are not returned in a particular order.
pub(crate) fn enforce_basefee_with<F>(&mut self, basefee: u64, mut tx_handler: F)
where
F: FnMut(Arc<ValidPoolTransaction<T>>),
{
let to_remove = self.satisfy_base_fee_ids(basefee as u128);
for id in to_remove {
if let Some(tx) = self.remove_transaction(&id) {
tx_handler(tx);
}
}
}
/// Removes all transactions and their dependent transaction from the subpool that no longer
/// satisfy the given basefee.
///
/// Legacy method maintained for compatibility with read-only queries.
/// For basefee enforcement, prefer `enforce_basefee_with` for better performance.
///
/// Note: the transactions are not returned in a particular order.
#[cfg(test)]
pub(crate) fn enforce_basefee(&mut self, basefee: u64) -> Vec<Arc<ValidPoolTransaction<T>>> {
let mut removed = Vec::new();
self.enforce_basefee_with(basefee, |tx| {
removed.push(tx);
});
removed
}
}
impl<T: ParkedOrd> Default for ParkedPool<T> {
fn default() -> Self {
Self {
submission_id: 0,
by_id: Default::default(),
last_sender_submission: Default::default(),
sender_transaction_count: Default::default(),
size_of: Default::default(),
}
}
}
/// Keeps track of the number of transactions and the latest submission id for each sender.
#[derive(Debug, Clone, Default, PartialEq, Eq)]
struct SenderTransactionCount {
count: u64,
last_submission_id: u64,
}
/// Represents a transaction in this pool.
#[derive(Debug)]
struct ParkedPoolTransaction<T: ParkedOrd> {
/// Identifier that tags when transaction was submitted in the pool.
submission_id: u64,
/// Actual transaction.
transaction: T,
}
impl<T: ParkedOrd> Clone for ParkedPoolTransaction<T> {
fn clone(&self) -> Self {
Self { submission_id: self.submission_id, transaction: self.transaction.clone() }
}
}
impl<T: ParkedOrd> Eq for ParkedPoolTransaction<T> {}
impl<T: ParkedOrd> PartialEq<Self> for ParkedPoolTransaction<T> {
fn eq(&self, other: &Self) -> bool {
self.cmp(other) == Ordering::Equal
}
}
impl<T: ParkedOrd> PartialOrd<Self> for ParkedPoolTransaction<T> {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl<T: ParkedOrd> Ord for ParkedPoolTransaction<T> {
fn cmp(&self, other: &Self) -> Ordering {
// This compares by the transactions first, and only if two tx are equal this compares
// the unique `submission_id`.
// "better" transactions are Greater
self.transaction
.cmp(&other.transaction)
.then_with(|| other.submission_id.cmp(&self.submission_id))
}
}
/// Includes a [`SenderId`] and `submission_id`. This is used to sort senders by their last
/// submission id.
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub(crate) struct SubmissionSenderId {
/// The sender id
pub(crate) sender_id: SenderId,
/// The submission id
pub(crate) submission_id: u64,
}
impl SubmissionSenderId {
/// Creates a new [`SubmissionSenderId`] based on the [`SenderId`] and `submission_id`.
const fn new(sender_id: SenderId, submission_id: u64) -> Self {
Self { sender_id, submission_id }
}
}
impl Ord for SubmissionSenderId {
fn cmp(&self, other: &Self) -> Ordering {
// Reverse ordering for `submission_id`
other.submission_id.cmp(&self.submission_id)
}
}
impl PartialOrd for SubmissionSenderId {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
/// Helper trait used for custom `Ord` wrappers around a transaction.
///
/// This is effectively a wrapper for `Arc<ValidPoolTransaction>` with custom `Ord` implementation.
pub trait ParkedOrd:
Ord
+ Clone
+ From<Arc<ValidPoolTransaction<Self::Transaction>>>
+ Into<Arc<ValidPoolTransaction<Self::Transaction>>>
+ Deref<Target = Arc<ValidPoolTransaction<Self::Transaction>>>
{
/// The wrapper transaction type.
type Transaction: PoolTransaction;
}
/// Helper macro to implement necessary conversions for `ParkedOrd` trait
macro_rules! impl_ord_wrapper {
($name:ident) => {
impl<T: PoolTransaction> Clone for $name<T> {
fn clone(&self) -> Self {
Self(self.0.clone())
}
}
impl<T: PoolTransaction> Eq for $name<T> {}
impl<T: PoolTransaction> PartialEq<Self> for $name<T> {
fn eq(&self, other: &Self) -> bool {
self.cmp(other) == Ordering::Equal
}
}
impl<T: PoolTransaction> PartialOrd<Self> for $name<T> {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl<T: PoolTransaction> Deref for $name<T> {
type Target = Arc<ValidPoolTransaction<T>>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<T: PoolTransaction> ParkedOrd for $name<T> {
type Transaction = T;
}
impl<T: PoolTransaction> From<Arc<ValidPoolTransaction<T>>> for $name<T> {
fn from(value: Arc<ValidPoolTransaction<T>>) -> Self {
Self(value)
}
}
impl<T: PoolTransaction> From<$name<T>> for Arc<ValidPoolTransaction<T>> {
fn from(value: $name<T>) -> Arc<ValidPoolTransaction<T>> {
value.0
}
}
};
}
/// A new type wrapper for [`ValidPoolTransaction`]
///
/// This sorts transactions by their base fee.
///
/// Caution: This assumes all transaction in the `BaseFee` sub-pool have a fee value.
#[derive(Debug)]
pub struct BasefeeOrd<T: PoolTransaction>(Arc<ValidPoolTransaction<T>>);
impl_ord_wrapper!(BasefeeOrd);
impl<T: PoolTransaction> Ord for BasefeeOrd<T> {
fn cmp(&self, other: &Self) -> Ordering {
self.0.transaction.max_fee_per_gas().cmp(&other.0.transaction.max_fee_per_gas())
}
}
/// A new type wrapper for [`ValidPoolTransaction`]
///
/// This sorts transactions by their distance.
///
/// `Queued` transactions are transactions that are currently blocked by other parked (basefee,
/// queued) or missing transactions.
///
/// The primary order function always compares the transaction costs first. In case these
/// are equal, it compares the timestamps when the transactions were created.
#[derive(Debug)]
pub struct QueuedOrd<T: PoolTransaction>(Arc<ValidPoolTransaction<T>>);
impl_ord_wrapper!(QueuedOrd);
impl<T: PoolTransaction> Ord for QueuedOrd<T> {
fn cmp(&self, other: &Self) -> Ordering {
// Higher fee is better
self.max_fee_per_gas().cmp(&other.max_fee_per_gas()).then_with(||
// Lower timestamp is better
other.timestamp.cmp(&self.timestamp))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test_utils::{MockTransaction, MockTransactionFactory, MockTransactionSet};
use alloy_consensus::{Transaction, TxType};
use alloy_primitives::address;
use std::collections::HashSet;
#[test]
fn test_enforce_parked_basefee() {
let mut f = MockTransactionFactory::default();
let mut pool = ParkedPool::<BasefeeOrd<_>>::default();
let tx = f.validated_arc(MockTransaction::eip1559().inc_price());
pool.add_transaction(tx.clone());
assert!(pool.contains(tx.id()));
assert_eq!(pool.len(), 1);
let removed = pool.enforce_basefee(u64::MAX);
assert!(removed.is_empty());
let removed = pool.enforce_basefee((tx.max_fee_per_gas() - 1) as u64);
assert_eq!(removed.len(), 1);
assert!(pool.is_empty());
}
#[test]
fn test_enforce_parked_basefee_descendant() {
let mut f = MockTransactionFactory::default();
let mut pool = ParkedPool::<BasefeeOrd<_>>::default();
let t = MockTransaction::eip1559().inc_price_by(10);
let root_tx = f.validated_arc(t.clone());
pool.add_transaction(root_tx.clone());
let descendant_tx = f.validated_arc(t.inc_nonce().decr_price());
pool.add_transaction(descendant_tx.clone());
assert!(pool.contains(root_tx.id()));
assert!(pool.contains(descendant_tx.id()));
assert_eq!(pool.len(), 2);
let removed = pool.enforce_basefee(u64::MAX);
assert!(removed.is_empty());
assert_eq!(pool.len(), 2);
// two dependent tx in the pool with decreasing fee
{
// TODO: test change might not be intended, re review
let mut pool2 = pool.clone();
let removed = pool2.enforce_basefee(root_tx.max_fee_per_gas() as u64);
assert_eq!(removed.len(), 1);
assert_eq!(pool2.len(), 1);
// root got popped - descendant should be skipped
assert!(!pool2.contains(root_tx.id()));
assert!(pool2.contains(descendant_tx.id()));
}
// remove root transaction via descendant tx fee
let removed = pool.enforce_basefee(descendant_tx.max_fee_per_gas() as u64);
assert_eq!(removed.len(), 2);
assert!(pool.is_empty());
}
#[test]
fn truncate_parked_by_submission_id() {
// this test ensures that we evict from the pending pool by sender
let mut f = MockTransactionFactory::default();
let mut pool = ParkedPool::<BasefeeOrd<_>>::default();
let a_sender = address!("0x000000000000000000000000000000000000000a");
let b_sender = address!("0x000000000000000000000000000000000000000b");
let c_sender = address!("0x000000000000000000000000000000000000000c");
let d_sender = address!("0x000000000000000000000000000000000000000d");
// create a chain of transactions by sender A, B, C
let mut tx_set = MockTransactionSet::dependent(a_sender, 0, 4, TxType::Eip1559);
let a = tx_set.clone().into_vec();
let b = MockTransactionSet::dependent(b_sender, 0, 3, TxType::Eip1559).into_vec();
tx_set.extend(b.clone());
// C has the same number of txs as B
let c = MockTransactionSet::dependent(c_sender, 0, 3, TxType::Eip1559).into_vec();
tx_set.extend(c.clone());
let d = MockTransactionSet::dependent(d_sender, 0, 1, TxType::Eip1559).into_vec();
tx_set.extend(d.clone());
let all_txs = tx_set.into_vec();
// just construct a list of all txs to add
let expected_parked = vec![c[0].clone(), c[1].clone(), c[2].clone(), d[0].clone()]
.into_iter()
.map(|tx| (tx.sender(), tx.nonce()))
.collect::<HashSet<_>>();
// we expect the truncate operation to go through the senders with the most txs, removing
// txs based on when they were submitted, removing the oldest txs first, until the pool is
// not over the limit
let expected_removed = vec![
a[0].clone(),
a[1].clone(),
a[2].clone(),
a[3].clone(),
b[0].clone(),
b[1].clone(),
b[2].clone(),
]
.into_iter()
.map(|tx| (tx.sender(), tx.nonce()))
.collect::<HashSet<_>>();
// add all the transactions to the pool
for tx in all_txs {
pool.add_transaction(f.validated_arc(tx));
}
// we should end up with the most recently submitted transactions
let pool_limit = SubPoolLimit { max_txs: 4, max_size: usize::MAX };
// truncate the pool
let removed = pool.truncate_pool(pool_limit);
assert_eq!(removed.len(), expected_removed.len());
// get the inner txs from the removed txs
let removed =
removed.into_iter().map(|tx| (tx.sender(), tx.nonce())).collect::<HashSet<_>>();
assert_eq!(removed, expected_removed);
// get the parked pool
let parked = pool.all().collect::<Vec<_>>();
assert_eq!(parked.len(), expected_parked.len());
// get the inner txs from the parked txs
let parked = parked.into_iter().map(|tx| (tx.sender(), tx.nonce())).collect::<HashSet<_>>();
assert_eq!(parked, expected_parked);
}
#[test]
fn test_truncate_parked_with_large_tx() {
let mut f = MockTransactionFactory::default();
let mut pool = ParkedPool::<BasefeeOrd<_>>::default();
let default_limits = SubPoolLimit::default();
// create a chain of transactions by sender A
// make sure they are all one over half the limit
let a_sender = address!("0x000000000000000000000000000000000000000a");
// 2 txs, that should put the pool over the size limit but not max txs
let a_txs = MockTransactionSet::dependent(a_sender, 0, 2, TxType::Eip1559)
.into_iter()
.map(|mut tx| {
tx.set_size(default_limits.max_size / 2 + 1);
tx
})
.collect::<Vec<_>>();
// add all the transactions to the pool
for tx in a_txs {
pool.add_transaction(f.validated_arc(tx));
}
// truncate the pool, it should remove at least one transaction
let removed = pool.truncate_pool(default_limits);
assert_eq!(removed.len(), 1);
}
#[test]
fn test_senders_by_submission_id() {
// this test ensures that we evict from the pending pool by sender
let mut f = MockTransactionFactory::default();
let mut pool = ParkedPool::<BasefeeOrd<_>>::default();
let a_sender = address!("0x000000000000000000000000000000000000000a");
let b_sender = address!("0x000000000000000000000000000000000000000b");
let c_sender = address!("0x000000000000000000000000000000000000000c");
let d_sender = address!("0x000000000000000000000000000000000000000d");
// create a chain of transactions by sender A, B, C
let mut tx_set = MockTransactionSet::dependent(a_sender, 0, 4, TxType::Eip1559);
let a = tx_set.clone().into_vec();
let b = MockTransactionSet::dependent(b_sender, 0, 3, TxType::Eip1559).into_vec();
tx_set.extend(b.clone());
// C has the same number of txs as B
let c = MockTransactionSet::dependent(c_sender, 0, 3, TxType::Eip1559).into_vec();
tx_set.extend(c.clone());
let d = MockTransactionSet::dependent(d_sender, 0, 1, TxType::Eip1559).into_vec();
tx_set.extend(d.clone());
let all_txs = tx_set.into_vec();
// add all the transactions to the pool
for tx in all_txs {
pool.add_transaction(f.validated_arc(tx));
}
// get senders by submission id - a4, b3, c3, d1, reversed
let senders = pool.get_senders_by_submission_id().map(|s| s.sender_id).collect::<Vec<_>>();
assert_eq!(senders.len(), 4);
let expected_senders = vec![d_sender, c_sender, b_sender, a_sender]
.into_iter()
.map(|s| f.ids.sender_id(&s).unwrap())
.collect::<Vec<_>>();
assert_eq!(senders, expected_senders);
// manually order the txs
let mut pool = ParkedPool::<BasefeeOrd<_>>::default();
let all_txs = vec![d[0].clone(), b[0].clone(), c[0].clone(), a[0].clone()];
// add all the transactions to the pool
for tx in all_txs {
pool.add_transaction(f.validated_arc(tx));
}
let senders = pool.get_senders_by_submission_id().map(|s| s.sender_id).collect::<Vec<_>>();
assert_eq!(senders.len(), 4);
let expected_senders = vec![a_sender, c_sender, b_sender, d_sender]
.into_iter()
.map(|s| f.ids.sender_id(&s).unwrap())
.collect::<Vec<_>>();
assert_eq!(senders, expected_senders);
}
#[test]
fn test_add_sender_count_new_sender() {
// Initialize a mock transaction factory
let mut f = MockTransactionFactory::default();
// Create an empty transaction pool
let mut pool = ParkedPool::<BasefeeOrd<_>>::default();
// Generate a validated transaction and add it to the pool
let tx = f.validated_arc(MockTransaction::eip1559().inc_price());
pool.add_transaction(tx);
// Define a new sender ID and submission ID
let sender: SenderId = 11.into();
let submission_id = 1;
// Add the sender count to the pool
pool.add_sender_count(sender, submission_id);
// Assert that the sender transaction count is updated correctly
assert_eq!(pool.sender_transaction_count.len(), 2);
let sender_info = pool.sender_transaction_count.get(&sender).unwrap();
assert_eq!(sender_info.count, 1);
assert_eq!(sender_info.last_submission_id, submission_id);
// Assert that the last sender submission is updated correctly
assert_eq!(pool.last_sender_submission.len(), 2);
let submission_info = pool.last_sender_submission.iter().next().unwrap();
assert_eq!(submission_info.sender_id, sender);
assert_eq!(submission_info.submission_id, submission_id);
}
#[test]
fn test_add_sender_count_existing_sender() {
// Initialize a mock transaction factory
let mut f = MockTransactionFactory::default();
// Create an empty transaction pool
let mut pool = ParkedPool::<BasefeeOrd<_>>::default();
// Generate a validated transaction and add it to the pool
let tx = f.validated_arc(MockTransaction::eip1559().inc_price());
pool.add_transaction(tx);
// Define a sender ID and initial submission ID
let sender: SenderId = 11.into();
let initial_submission_id = 1;
// Add the sender count to the pool with the initial submission ID
pool.add_sender_count(sender, initial_submission_id);
// Define a new submission ID
let new_submission_id = 2;
// Add the sender count to the pool with the new submission ID
pool.add_sender_count(sender, new_submission_id);
// Assert that the sender transaction count is updated correctly
assert_eq!(pool.sender_transaction_count.len(), 2);
let sender_info = pool.sender_transaction_count.get(&sender).unwrap();
assert_eq!(sender_info.count, 2);
assert_eq!(sender_info.last_submission_id, new_submission_id);
// Assert that the last sender submission is updated correctly
assert_eq!(pool.last_sender_submission.len(), 2);
let submission_info = pool.last_sender_submission.iter().next().unwrap();
assert_eq!(submission_info.sender_id, sender);
assert_eq!(submission_info.submission_id, new_submission_id);
}
#[test]
fn test_add_sender_count_multiple_senders() {
// Initialize a mock transaction factory
let mut f = MockTransactionFactory::default();
// Create an empty transaction pool
let mut pool = ParkedPool::<BasefeeOrd<_>>::default();
// Generate two validated transactions and add them to the pool
let tx1 = f.validated_arc(MockTransaction::eip1559().inc_price());
let tx2 = f.validated_arc(MockTransaction::eip1559().inc_price());
pool.add_transaction(tx1);
pool.add_transaction(tx2);
// Define two different sender IDs and their corresponding submission IDs
let sender1: SenderId = 11.into();
let sender2: SenderId = 22.into();
// Add the sender counts to the pool
pool.add_sender_count(sender1, 1);
pool.add_sender_count(sender2, 2);
// Assert that the sender transaction counts are updated correctly
assert_eq!(pool.sender_transaction_count.len(), 4);
let sender1_info = pool.sender_transaction_count.get(&sender1).unwrap();
assert_eq!(sender1_info.count, 1);
assert_eq!(sender1_info.last_submission_id, 1);
let sender2_info = pool.sender_transaction_count.get(&sender2).unwrap();
assert_eq!(sender2_info.count, 1);
assert_eq!(sender2_info.last_submission_id, 2);
// Assert that the last sender submission is updated correctly
assert_eq!(pool.last_sender_submission.len(), 3);
// Verify that sender 1 is not in the last sender submission
let submission_info1 =
pool.last_sender_submission.iter().find(|info| info.sender_id == sender1);
assert!(submission_info1.is_none());
// Verify that sender 2 is in the last sender submission
let submission_info2 =
pool.last_sender_submission.iter().find(|info| info.sender_id == sender2).unwrap();
assert_eq!(submission_info2.sender_id, sender2);
assert_eq!(submission_info2.submission_id, 2);
}
#[test]
fn test_remove_sender_count() {
// Initialize a mock transaction factory
let mut f = MockTransactionFactory::default();
// Create an empty transaction pool
let mut pool = ParkedPool::<BasefeeOrd<_>>::default();
// Generate two validated transactions and add them to the pool
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | true |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/transaction-pool/src/pool/size.rs | crates/transaction-pool/src/pool/size.rs | //! Tracks a size value.
use std::ops::{AddAssign, SubAssign};
/// Keeps track of accumulated size in bytes.
///
/// Note: We do not assume that size tracking is always exact. Depending on the bookkeeping of the
/// additions and subtractions the total size might be slightly off. Therefore, the underlying value
/// is an `isize`, so that the value does not wrap.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
pub struct SizeTracker(isize);
impl SizeTracker {
/// Reset the size tracker.
pub const fn reset(&mut self) {
self.0 = 0;
}
}
impl AddAssign<usize> for SizeTracker {
fn add_assign(&mut self, rhs: usize) {
self.0 += rhs as isize
}
}
impl SubAssign<usize> for SizeTracker {
fn sub_assign(&mut self, rhs: usize) {
self.0 -= rhs as isize
}
}
impl From<SizeTracker> for usize {
fn from(value: SizeTracker) -> Self {
value.0 as Self
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/transaction-pool/src/blobstore/noop.rs | crates/transaction-pool/src/blobstore/noop.rs | use crate::blobstore::{BlobStore, BlobStoreCleanupStat, BlobStoreError};
use alloy_eips::{
eip4844::{BlobAndProofV1, BlobAndProofV2},
eip7594::BlobTransactionSidecarVariant,
};
use alloy_primitives::B256;
use std::sync::Arc;
/// A blobstore implementation that does nothing
#[derive(Clone, Copy, Debug, PartialOrd, PartialEq, Eq, Default)]
#[non_exhaustive]
pub struct NoopBlobStore;
impl BlobStore for NoopBlobStore {
fn insert(
&self,
_tx: B256,
_data: BlobTransactionSidecarVariant,
) -> Result<(), BlobStoreError> {
Ok(())
}
fn insert_all(
&self,
_txs: Vec<(B256, BlobTransactionSidecarVariant)>,
) -> Result<(), BlobStoreError> {
Ok(())
}
fn delete(&self, _tx: B256) -> Result<(), BlobStoreError> {
Ok(())
}
fn delete_all(&self, _txs: Vec<B256>) -> Result<(), BlobStoreError> {
Ok(())
}
fn cleanup(&self) -> BlobStoreCleanupStat {
BlobStoreCleanupStat::default()
}
fn get(&self, _tx: B256) -> Result<Option<Arc<BlobTransactionSidecarVariant>>, BlobStoreError> {
Ok(None)
}
fn contains(&self, _tx: B256) -> Result<bool, BlobStoreError> {
Ok(false)
}
fn get_all(
&self,
_txs: Vec<B256>,
) -> Result<Vec<(B256, Arc<BlobTransactionSidecarVariant>)>, BlobStoreError> {
Ok(vec![])
}
fn get_exact(
&self,
txs: Vec<B256>,
) -> Result<Vec<Arc<BlobTransactionSidecarVariant>>, BlobStoreError> {
if txs.is_empty() {
return Ok(vec![])
}
Err(BlobStoreError::MissingSidecar(txs[0]))
}
fn get_by_versioned_hashes_v1(
&self,
versioned_hashes: &[B256],
) -> Result<Vec<Option<BlobAndProofV1>>, BlobStoreError> {
Ok(vec![None; versioned_hashes.len()])
}
fn get_by_versioned_hashes_v2(
&self,
_versioned_hashes: &[B256],
) -> Result<Option<Vec<BlobAndProofV2>>, BlobStoreError> {
Ok(None)
}
fn data_size_hint(&self) -> Option<usize> {
Some(0)
}
fn blobs_len(&self) -> usize {
0
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/transaction-pool/src/blobstore/tracker.rs | crates/transaction-pool/src/blobstore/tracker.rs | //! Support for maintaining the blob pool.
use alloy_consensus::Typed2718;
use alloy_eips::eip2718::Encodable2718;
use alloy_primitives::{BlockNumber, B256};
use reth_execution_types::ChainBlocks;
use reth_primitives_traits::{Block, BlockBody, SignedTransaction};
use std::collections::BTreeMap;
/// The type that is used to track canonical blob transactions.
#[derive(Debug, Default, Eq, PartialEq)]
pub struct BlobStoreCanonTracker {
/// Keeps track of the blob transactions included in blocks.
blob_txs_in_blocks: BTreeMap<BlockNumber, Vec<B256>>,
}
impl BlobStoreCanonTracker {
/// Adds a block to the blob store maintenance.
pub fn add_block(
&mut self,
block_number: BlockNumber,
blob_txs: impl IntoIterator<Item = B256>,
) {
self.blob_txs_in_blocks.insert(block_number, blob_txs.into_iter().collect());
}
/// Adds all blocks to the tracked list of blocks.
///
/// Replaces any previously tracked blocks with the set of transactions.
pub fn add_blocks(
&mut self,
blocks: impl IntoIterator<Item = (BlockNumber, impl IntoIterator<Item = B256>)>,
) {
for (block_number, blob_txs) in blocks {
self.add_block(block_number, blob_txs);
}
}
/// Adds all blob transactions from the given chain to the tracker.
///
/// Note: In case this is a chain that's part of a reorg, this replaces previously tracked
/// blocks.
pub fn add_new_chain_blocks<B>(&mut self, blocks: &ChainBlocks<'_, B>)
where
B: Block<Body: BlockBody<Transaction: SignedTransaction>>,
{
let blob_txs = blocks.iter().map(|(num, block)| {
let iter = block
.body()
.transactions()
.iter()
.filter(|tx| tx.is_eip4844())
.map(|tx| tx.trie_hash());
(*num, iter)
});
self.add_blocks(blob_txs);
}
/// Invoked when a block is finalized.
///
/// This returns all blob transactions that were included in blocks that are now finalized.
pub fn on_finalized_block(&mut self, finalized_block: BlockNumber) -> BlobStoreUpdates {
let mut finalized = Vec::new();
while let Some(entry) = self.blob_txs_in_blocks.first_entry() {
if *entry.key() <= finalized_block {
finalized.extend(entry.remove_entry().1);
} else {
break
}
}
if finalized.is_empty() {
BlobStoreUpdates::None
} else {
BlobStoreUpdates::Finalized(finalized)
}
}
}
/// Updates that should be applied to the blob store.
#[derive(Debug, Eq, PartialEq)]
pub enum BlobStoreUpdates {
/// No updates.
None,
/// Delete the given finalized transactions from the blob store.
Finalized(Vec<B256>),
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_consensus::{Header, Signed};
use alloy_primitives::Signature;
use reth_ethereum_primitives::Transaction;
use reth_execution_types::Chain;
use reth_primitives_traits::{RecoveredBlock, SealedBlock, SealedHeader};
#[test]
fn test_finalized_tracker() {
let mut tracker = BlobStoreCanonTracker::default();
let block1 = vec![B256::random()];
let block2 = vec![B256::random()];
let block3 = vec![B256::random()];
tracker.add_block(1, block1.clone());
tracker.add_block(2, block2.clone());
tracker.add_block(3, block3.clone());
assert_eq!(tracker.on_finalized_block(0), BlobStoreUpdates::None);
assert_eq!(tracker.on_finalized_block(1), BlobStoreUpdates::Finalized(block1));
assert_eq!(
tracker.on_finalized_block(3),
BlobStoreUpdates::Finalized(block2.into_iter().chain(block3).collect::<Vec<_>>())
);
}
#[test]
fn test_add_new_chain_blocks() {
let mut tracker = BlobStoreCanonTracker::default();
// Create sample transactions
let tx1_signed = Signed::new_unhashed(
Transaction::Eip4844(Default::default()),
Signature::test_signature(),
); // EIP-4844 transaction
let tx2_signed = Signed::new_unhashed(
Transaction::Eip4844(Default::default()),
Signature::test_signature(),
); // EIP-4844 transaction
let tx1_hash = *tx1_signed.hash();
let tx2_hash = *tx2_signed.hash();
// Creating a first block with EIP-4844 transactions
let block1 = RecoveredBlock::new_sealed(
SealedBlock::from_sealed_parts(
SealedHeader::new(Header { number: 10, ..Default::default() }, B256::random()),
alloy_consensus::BlockBody {
transactions: vec![
tx1_signed.into(),
tx2_signed.into(),
// Another transaction that is not EIP-4844
Signed::new_unhashed(
Transaction::Eip7702(Default::default()),
Signature::test_signature(),
)
.into(),
],
..Default::default()
},
),
Default::default(),
);
// Creating a second block with EIP-1559 and EIP-2930 transactions
// Note: This block does not contain any EIP-4844 transactions
let block2 = RecoveredBlock::new_sealed(
SealedBlock::from_sealed_parts(
SealedHeader::new(Header { number: 11, ..Default::default() }, B256::random()),
alloy_consensus::BlockBody {
transactions: vec![
Signed::new_unhashed(
Transaction::Eip1559(Default::default()),
Signature::test_signature(),
)
.into(),
Signed::new_unhashed(
Transaction::Eip2930(Default::default()),
Signature::test_signature(),
)
.into(),
],
..Default::default()
},
),
Default::default(),
);
// Extract blocks from the chain
let chain: Chain = Chain::new(vec![block1, block2], Default::default(), None);
let blocks = chain.into_inner().0;
// Add new chain blocks to the tracker
tracker.add_new_chain_blocks(&blocks);
// Tx1 and tx2 should be in the block containing EIP-4844 transactions
assert_eq!(tracker.blob_txs_in_blocks.get(&10).unwrap(), &vec![tx1_hash, tx2_hash]);
// No transactions should be in the block containing non-EIP-4844 transactions
assert!(tracker.blob_txs_in_blocks.get(&11).unwrap().is_empty());
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/transaction-pool/src/blobstore/mod.rs | crates/transaction-pool/src/blobstore/mod.rs | //! Storage for blob data of EIP4844 transactions.
use alloy_eips::{
eip4844::{BlobAndProofV1, BlobAndProofV2},
eip7594::BlobTransactionSidecarVariant,
};
use alloy_primitives::B256;
pub use disk::{DiskFileBlobStore, DiskFileBlobStoreConfig, OpenDiskFileBlobStore};
pub use mem::InMemoryBlobStore;
pub use noop::NoopBlobStore;
use std::{
fmt,
sync::{
atomic::{AtomicUsize, Ordering},
Arc,
},
};
pub use tracker::{BlobStoreCanonTracker, BlobStoreUpdates};
pub mod disk;
mod mem;
mod noop;
mod tracker;
/// A blob store that can be used to store blob data of EIP4844 transactions.
///
/// This type is responsible for keeping track of blob data until it is no longer needed (after
/// finalization).
///
/// Note: this is Clone because it is expected to be wrapped in an Arc.
pub trait BlobStore: fmt::Debug + Send + Sync + 'static {
/// Inserts the blob sidecar into the store
fn insert(&self, tx: B256, data: BlobTransactionSidecarVariant) -> Result<(), BlobStoreError>;
/// Inserts multiple blob sidecars into the store
fn insert_all(
&self,
txs: Vec<(B256, BlobTransactionSidecarVariant)>,
) -> Result<(), BlobStoreError>;
/// Deletes the blob sidecar from the store
fn delete(&self, tx: B256) -> Result<(), BlobStoreError>;
/// Deletes multiple blob sidecars from the store
fn delete_all(&self, txs: Vec<B256>) -> Result<(), BlobStoreError>;
/// A maintenance function that can be called periodically to clean up the blob store, returns
/// the number of successfully deleted blobs and the number of failed deletions.
///
/// This is intended to be called in the background to clean up any old or unused data, in case
/// the store uses deferred cleanup: [`DiskFileBlobStore`]
fn cleanup(&self) -> BlobStoreCleanupStat;
/// Retrieves the decoded blob data for the given transaction hash.
fn get(&self, tx: B256) -> Result<Option<Arc<BlobTransactionSidecarVariant>>, BlobStoreError>;
/// Checks if the given transaction hash is in the blob store.
fn contains(&self, tx: B256) -> Result<bool, BlobStoreError>;
/// Retrieves all decoded blob data for the given transaction hashes.
///
/// This only returns the blobs that were found in the store.
/// If there's no blob it will not be returned.
///
/// Note: this is not guaranteed to return the blobs in the same order as the input.
fn get_all(
&self,
txs: Vec<B256>,
) -> Result<Vec<(B256, Arc<BlobTransactionSidecarVariant>)>, BlobStoreError>;
/// Returns the exact [`BlobTransactionSidecarVariant`] for the given transaction hashes in the
/// exact order they were requested.
///
/// Returns an error if any of the blobs are not found in the blob store.
fn get_exact(
&self,
txs: Vec<B256>,
) -> Result<Vec<Arc<BlobTransactionSidecarVariant>>, BlobStoreError>;
/// Return the [`BlobAndProofV1`]s for a list of blob versioned hashes.
fn get_by_versioned_hashes_v1(
&self,
versioned_hashes: &[B256],
) -> Result<Vec<Option<BlobAndProofV1>>, BlobStoreError>;
/// Return the [`BlobAndProofV2`]s for a list of blob versioned hashes.
/// Blobs and proofs are returned only if they are present for _all_ requested
/// versioned hashes.
///
/// This differs from [`BlobStore::get_by_versioned_hashes_v1`] in that it also returns all the
/// cell proofs in [`BlobAndProofV2`] supported by the EIP-7594 blob sidecar variant.
///
/// The response also differs from [`BlobStore::get_by_versioned_hashes_v1`] in that this
/// returns `None` if any of the requested versioned hashes are not present in the blob store:
/// e.g. where v1 would return `[A, None, C]` v2 would return `None`. See also <https://github.com/ethereum/execution-apis/blob/main/src/engine/osaka.md#engine_getblobsv2>
fn get_by_versioned_hashes_v2(
&self,
versioned_hashes: &[B256],
) -> Result<Option<Vec<BlobAndProofV2>>, BlobStoreError>;
/// Data size of all transactions in the blob store.
fn data_size_hint(&self) -> Option<usize>;
/// How many blobs are in the blob store.
fn blobs_len(&self) -> usize;
}
/// Error variants that can occur when interacting with a blob store.
#[derive(Debug, thiserror::Error)]
pub enum BlobStoreError {
/// Thrown if the blob sidecar is not found for a given transaction hash but was required.
#[error("blob sidecar not found for transaction {0:?}")]
MissingSidecar(B256),
/// Failed to decode the stored blob data.
#[error("failed to decode blob data: {0}")]
DecodeError(#[from] alloy_rlp::Error),
/// Other implementation specific error.
#[error(transparent)]
Other(Box<dyn core::error::Error + Send + Sync>),
}
/// Keeps track of the size of the blob store.
#[derive(Debug, Default)]
pub(crate) struct BlobStoreSize {
data_size: AtomicUsize,
num_blobs: AtomicUsize,
}
impl BlobStoreSize {
#[inline]
pub(crate) fn add_size(&self, add: usize) {
self.data_size.fetch_add(add, Ordering::Relaxed);
}
#[inline]
pub(crate) fn sub_size(&self, sub: usize) {
let _ = self.data_size.fetch_update(Ordering::Relaxed, Ordering::Relaxed, |current| {
Some(current.saturating_sub(sub))
});
}
#[inline]
pub(crate) fn update_len(&self, len: usize) {
self.num_blobs.store(len, Ordering::Relaxed);
}
#[inline]
pub(crate) fn inc_len(&self, add: usize) {
self.num_blobs.fetch_add(add, Ordering::Relaxed);
}
#[inline]
pub(crate) fn sub_len(&self, sub: usize) {
let _ = self.num_blobs.fetch_update(Ordering::Relaxed, Ordering::Relaxed, |current| {
Some(current.saturating_sub(sub))
});
}
#[inline]
pub(crate) fn data_size(&self) -> usize {
self.data_size.load(Ordering::Relaxed)
}
#[inline]
pub(crate) fn blobs_len(&self) -> usize {
self.num_blobs.load(Ordering::Relaxed)
}
}
impl PartialEq for BlobStoreSize {
fn eq(&self, other: &Self) -> bool {
self.data_size.load(Ordering::Relaxed) == other.data_size.load(Ordering::Relaxed) &&
self.num_blobs.load(Ordering::Relaxed) == other.num_blobs.load(Ordering::Relaxed)
}
}
/// Statistics for the cleanup operation.
#[derive(Debug, Clone, Default, PartialEq, Eq)]
pub struct BlobStoreCleanupStat {
/// the number of successfully deleted blobs
pub delete_succeed: usize,
/// the number of failed deletions
pub delete_failed: usize,
}
#[cfg(test)]
mod tests {
use super::*;
#[expect(dead_code)]
struct DynStore {
store: Box<dyn BlobStore>,
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/transaction-pool/src/blobstore/mem.rs | crates/transaction-pool/src/blobstore/mem.rs | use crate::blobstore::{BlobStore, BlobStoreCleanupStat, BlobStoreError, BlobStoreSize};
use alloy_eips::{
eip4844::{BlobAndProofV1, BlobAndProofV2},
eip7594::BlobTransactionSidecarVariant,
};
use alloy_primitives::B256;
use parking_lot::RwLock;
use std::{collections::HashMap, sync::Arc};
/// An in-memory blob store.
#[derive(Clone, Debug, Default, PartialEq)]
pub struct InMemoryBlobStore {
inner: Arc<InMemoryBlobStoreInner>,
}
#[derive(Debug, Default)]
struct InMemoryBlobStoreInner {
/// Storage for all blob data.
store: RwLock<HashMap<B256, Arc<BlobTransactionSidecarVariant>>>,
size_tracker: BlobStoreSize,
}
impl PartialEq for InMemoryBlobStoreInner {
fn eq(&self, other: &Self) -> bool {
self.store.read().eq(&other.store.read())
}
}
impl BlobStore for InMemoryBlobStore {
fn insert(&self, tx: B256, data: BlobTransactionSidecarVariant) -> Result<(), BlobStoreError> {
let mut store = self.inner.store.write();
self.inner.size_tracker.add_size(insert_size(&mut store, tx, data));
self.inner.size_tracker.update_len(store.len());
Ok(())
}
fn insert_all(
&self,
txs: Vec<(B256, BlobTransactionSidecarVariant)>,
) -> Result<(), BlobStoreError> {
if txs.is_empty() {
return Ok(())
}
let mut store = self.inner.store.write();
let mut total_add = 0;
for (tx, data) in txs {
let add = insert_size(&mut store, tx, data);
total_add += add;
}
self.inner.size_tracker.add_size(total_add);
self.inner.size_tracker.update_len(store.len());
Ok(())
}
fn delete(&self, tx: B256) -> Result<(), BlobStoreError> {
let mut store = self.inner.store.write();
let sub = remove_size(&mut store, &tx);
self.inner.size_tracker.sub_size(sub);
self.inner.size_tracker.update_len(store.len());
Ok(())
}
fn delete_all(&self, txs: Vec<B256>) -> Result<(), BlobStoreError> {
if txs.is_empty() {
return Ok(())
}
let mut store = self.inner.store.write();
let mut total_sub = 0;
for tx in txs {
total_sub += remove_size(&mut store, &tx);
}
self.inner.size_tracker.sub_size(total_sub);
self.inner.size_tracker.update_len(store.len());
Ok(())
}
fn cleanup(&self) -> BlobStoreCleanupStat {
BlobStoreCleanupStat::default()
}
// Retrieves the decoded blob data for the given transaction hash.
fn get(&self, tx: B256) -> Result<Option<Arc<BlobTransactionSidecarVariant>>, BlobStoreError> {
Ok(self.inner.store.read().get(&tx).cloned())
}
fn contains(&self, tx: B256) -> Result<bool, BlobStoreError> {
Ok(self.inner.store.read().contains_key(&tx))
}
fn get_all(
&self,
txs: Vec<B256>,
) -> Result<Vec<(B256, Arc<BlobTransactionSidecarVariant>)>, BlobStoreError> {
let store = self.inner.store.read();
Ok(txs.into_iter().filter_map(|tx| store.get(&tx).map(|item| (tx, item.clone()))).collect())
}
fn get_exact(
&self,
txs: Vec<B256>,
) -> Result<Vec<Arc<BlobTransactionSidecarVariant>>, BlobStoreError> {
let store = self.inner.store.read();
Ok(txs.into_iter().filter_map(|tx| store.get(&tx).cloned()).collect())
}
fn get_by_versioned_hashes_v1(
&self,
versioned_hashes: &[B256],
) -> Result<Vec<Option<BlobAndProofV1>>, BlobStoreError> {
let mut result = vec![None; versioned_hashes.len()];
for (_tx_hash, blob_sidecar) in self.inner.store.read().iter() {
if let Some(blob_sidecar) = blob_sidecar.as_eip4844() {
for (hash_idx, match_result) in
blob_sidecar.match_versioned_hashes(versioned_hashes)
{
result[hash_idx] = Some(match_result);
}
}
// Return early if all blobs are found.
if result.iter().all(|blob| blob.is_some()) {
break;
}
}
Ok(result)
}
fn get_by_versioned_hashes_v2(
&self,
versioned_hashes: &[B256],
) -> Result<Option<Vec<BlobAndProofV2>>, BlobStoreError> {
let mut result = vec![None; versioned_hashes.len()];
for (_tx_hash, blob_sidecar) in self.inner.store.read().iter() {
if let Some(blob_sidecar) = blob_sidecar.as_eip7594() {
for (hash_idx, match_result) in
blob_sidecar.match_versioned_hashes(versioned_hashes)
{
result[hash_idx] = Some(match_result);
}
}
if result.iter().all(|blob| blob.is_some()) {
break;
}
}
if result.iter().all(|blob| blob.is_some()) {
Ok(Some(result.into_iter().map(Option::unwrap).collect()))
} else {
Ok(None)
}
}
fn data_size_hint(&self) -> Option<usize> {
Some(self.inner.size_tracker.data_size())
}
fn blobs_len(&self) -> usize {
self.inner.size_tracker.blobs_len()
}
}
/// Removes the given blob from the store and returns the size of the blob that was removed.
#[inline]
fn remove_size(store: &mut HashMap<B256, Arc<BlobTransactionSidecarVariant>>, tx: &B256) -> usize {
store.remove(tx).map(|rem| rem.size()).unwrap_or_default()
}
/// Inserts the given blob into the store and returns the size of the blob that was added.
///
/// We don't need to handle the size updates for replacements because transactions are unique.
#[inline]
fn insert_size(
store: &mut HashMap<B256, Arc<BlobTransactionSidecarVariant>>,
tx: B256,
blob: BlobTransactionSidecarVariant,
) -> usize {
let add = blob.size();
store.insert(tx, Arc::new(blob));
add
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/transaction-pool/src/blobstore/disk.rs | crates/transaction-pool/src/blobstore/disk.rs | //! A simple diskstore for blobs
use crate::blobstore::{BlobStore, BlobStoreCleanupStat, BlobStoreError, BlobStoreSize};
use alloy_eips::{
eip4844::{BlobAndProofV1, BlobAndProofV2},
eip7594::BlobTransactionSidecarVariant,
};
use alloy_primitives::{TxHash, B256};
use parking_lot::{Mutex, RwLock};
use schnellru::{ByLength, LruMap};
use std::{collections::HashSet, fmt, fs, io, path::PathBuf, sync::Arc};
use tracing::{debug, trace};
/// How many [`BlobTransactionSidecarVariant`] to cache in memory.
pub const DEFAULT_MAX_CACHED_BLOBS: u32 = 100;
/// A blob store that stores blob data on disk.
///
/// The type uses deferred deletion, meaning that blobs are not immediately deleted from disk, but
/// it's expected that the maintenance task will call [`BlobStore::cleanup`] to remove the deleted
/// blobs from disk.
#[derive(Clone, Debug)]
pub struct DiskFileBlobStore {
inner: Arc<DiskFileBlobStoreInner>,
}
impl DiskFileBlobStore {
/// Opens and initializes a new disk file blob store according to the given options.
pub fn open(
blob_dir: impl Into<PathBuf>,
opts: DiskFileBlobStoreConfig,
) -> Result<Self, DiskFileBlobStoreError> {
let blob_dir = blob_dir.into();
let DiskFileBlobStoreConfig { max_cached_entries, .. } = opts;
let inner = DiskFileBlobStoreInner::new(blob_dir, max_cached_entries);
// initialize the blob store
inner.delete_all()?;
inner.create_blob_dir()?;
Ok(Self { inner: Arc::new(inner) })
}
#[cfg(test)]
fn is_cached(&self, tx: &B256) -> bool {
self.inner.blob_cache.lock().get(tx).is_some()
}
#[cfg(test)]
fn clear_cache(&self) {
self.inner.blob_cache.lock().clear()
}
}
impl BlobStore for DiskFileBlobStore {
fn insert(&self, tx: B256, data: BlobTransactionSidecarVariant) -> Result<(), BlobStoreError> {
self.inner.insert_one(tx, data)
}
fn insert_all(
&self,
txs: Vec<(B256, BlobTransactionSidecarVariant)>,
) -> Result<(), BlobStoreError> {
if txs.is_empty() {
return Ok(())
}
self.inner.insert_many(txs)
}
fn delete(&self, tx: B256) -> Result<(), BlobStoreError> {
if self.inner.contains(tx)? {
self.inner.txs_to_delete.write().insert(tx);
}
Ok(())
}
fn delete_all(&self, txs: Vec<B256>) -> Result<(), BlobStoreError> {
let txs = self.inner.retain_existing(txs)?;
self.inner.txs_to_delete.write().extend(txs);
Ok(())
}
fn cleanup(&self) -> BlobStoreCleanupStat {
let txs_to_delete = std::mem::take(&mut *self.inner.txs_to_delete.write());
let mut stat = BlobStoreCleanupStat::default();
let mut subsize = 0;
debug!(target:"txpool::blob", num_blobs=%txs_to_delete.len(), "Removing blobs from disk");
for tx in txs_to_delete {
let path = self.inner.blob_disk_file(tx);
let filesize = fs::metadata(&path).map_or(0, |meta| meta.len());
match fs::remove_file(&path) {
Ok(_) => {
stat.delete_succeed += 1;
subsize += filesize;
}
Err(e) => {
stat.delete_failed += 1;
let err = DiskFileBlobStoreError::DeleteFile(tx, path, e);
debug!(target:"txpool::blob", %err);
}
};
}
self.inner.size_tracker.sub_size(subsize as usize);
self.inner.size_tracker.sub_len(stat.delete_succeed);
stat
}
fn get(&self, tx: B256) -> Result<Option<Arc<BlobTransactionSidecarVariant>>, BlobStoreError> {
self.inner.get_one(tx)
}
fn contains(&self, tx: B256) -> Result<bool, BlobStoreError> {
self.inner.contains(tx)
}
fn get_all(
&self,
txs: Vec<B256>,
) -> Result<Vec<(B256, Arc<BlobTransactionSidecarVariant>)>, BlobStoreError> {
if txs.is_empty() {
return Ok(Vec::new())
}
self.inner.get_all(txs)
}
fn get_exact(
&self,
txs: Vec<B256>,
) -> Result<Vec<Arc<BlobTransactionSidecarVariant>>, BlobStoreError> {
if txs.is_empty() {
return Ok(Vec::new())
}
self.inner.get_exact(txs)
}
fn get_by_versioned_hashes_v1(
&self,
versioned_hashes: &[B256],
) -> Result<Vec<Option<BlobAndProofV1>>, BlobStoreError> {
// the response must always be the same len as the request, misses must be None
let mut result = vec![None; versioned_hashes.len()];
// first scan all cached full sidecars
for (_tx_hash, blob_sidecar) in self.inner.blob_cache.lock().iter() {
if let Some(blob_sidecar) = blob_sidecar.as_eip4844() {
for (hash_idx, match_result) in
blob_sidecar.match_versioned_hashes(versioned_hashes)
{
result[hash_idx] = Some(match_result);
}
}
// return early if all blobs are found.
if result.iter().all(|blob| blob.is_some()) {
return Ok(result);
}
}
// not all versioned hashes were be found, try to look up a matching tx
let mut missing_tx_hashes = Vec::new();
{
let mut versioned_to_txhashes = self.inner.versioned_hashes_to_txhash.lock();
for (idx, _) in
result.iter().enumerate().filter(|(_, blob_and_proof)| blob_and_proof.is_none())
{
// this is safe because the result vec has the same len
let versioned_hash = versioned_hashes[idx];
if let Some(tx_hash) = versioned_to_txhashes.get(&versioned_hash).copied() {
missing_tx_hashes.push(tx_hash);
}
}
}
// if we have missing blobs, try to read them from disk and try again
if !missing_tx_hashes.is_empty() {
let blobs_from_disk = self.inner.read_many_decoded(missing_tx_hashes);
for (_, blob_sidecar) in blobs_from_disk {
if let Some(blob_sidecar) = blob_sidecar.as_eip4844() {
for (hash_idx, match_result) in
blob_sidecar.match_versioned_hashes(versioned_hashes)
{
if result[hash_idx].is_none() {
result[hash_idx] = Some(match_result);
}
}
}
}
}
Ok(result)
}
fn get_by_versioned_hashes_v2(
&self,
versioned_hashes: &[B256],
) -> Result<Option<Vec<BlobAndProofV2>>, BlobStoreError> {
// we must return the blobs in order but we don't necessarily find them in the requested
// order
let mut result = vec![None; versioned_hashes.len()];
// first scan all cached full sidecars
for (_tx_hash, blob_sidecar) in self.inner.blob_cache.lock().iter() {
if let Some(blob_sidecar) = blob_sidecar.as_eip7594() {
for (hash_idx, match_result) in
blob_sidecar.match_versioned_hashes(versioned_hashes)
{
result[hash_idx] = Some(match_result);
}
}
// return early if all blobs are found.
if result.iter().all(|blob| blob.is_some()) {
// got all blobs, can return early
return Ok(Some(result.into_iter().map(Option::unwrap).collect()))
}
}
// not all versioned hashes were found, try to look up a matching tx
let mut missing_tx_hashes = Vec::new();
{
let mut versioned_to_txhashes = self.inner.versioned_hashes_to_txhash.lock();
for (idx, _) in
result.iter().enumerate().filter(|(_, blob_and_proof)| blob_and_proof.is_none())
{
// this is safe because the result vec has the same len
let versioned_hash = versioned_hashes[idx];
if let Some(tx_hash) = versioned_to_txhashes.get(&versioned_hash).copied() {
missing_tx_hashes.push(tx_hash);
}
}
}
// if we have missing blobs, try to read them from disk and try again
if !missing_tx_hashes.is_empty() {
let blobs_from_disk = self.inner.read_many_decoded(missing_tx_hashes);
for (_, blob_sidecar) in blobs_from_disk {
if let Some(blob_sidecar) = blob_sidecar.as_eip7594() {
for (hash_idx, match_result) in
blob_sidecar.match_versioned_hashes(versioned_hashes)
{
if result[hash_idx].is_none() {
result[hash_idx] = Some(match_result);
}
}
}
}
}
// only return the blobs if we found all requested versioned hashes
if result.iter().all(|blob| blob.is_some()) {
Ok(Some(result.into_iter().map(Option::unwrap).collect()))
} else {
Ok(None)
}
}
fn data_size_hint(&self) -> Option<usize> {
Some(self.inner.size_tracker.data_size())
}
fn blobs_len(&self) -> usize {
self.inner.size_tracker.blobs_len()
}
}
struct DiskFileBlobStoreInner {
blob_dir: PathBuf,
blob_cache: Mutex<LruMap<TxHash, Arc<BlobTransactionSidecarVariant>, ByLength>>,
size_tracker: BlobStoreSize,
file_lock: RwLock<()>,
txs_to_delete: RwLock<HashSet<B256>>,
/// Tracks of known versioned hashes and a transaction they exist in
///
/// Note: It is possible that one blob can appear in multiple transactions but this only tracks
/// the most recent one.
versioned_hashes_to_txhash: Mutex<LruMap<B256, B256>>,
}
impl DiskFileBlobStoreInner {
/// Creates a new empty disk file blob store with the given maximum length of the blob cache.
fn new(blob_dir: PathBuf, max_length: u32) -> Self {
Self {
blob_dir,
blob_cache: Mutex::new(LruMap::new(ByLength::new(max_length))),
size_tracker: Default::default(),
file_lock: Default::default(),
txs_to_delete: Default::default(),
versioned_hashes_to_txhash: Mutex::new(LruMap::new(ByLength::new(max_length * 6))),
}
}
/// Creates the directory where blobs will be stored on disk.
fn create_blob_dir(&self) -> Result<(), DiskFileBlobStoreError> {
debug!(target:"txpool::blob", blob_dir = ?self.blob_dir, "Creating blob store");
fs::create_dir_all(&self.blob_dir)
.map_err(|e| DiskFileBlobStoreError::Open(self.blob_dir.clone(), e))
}
/// Deletes the entire blob store.
fn delete_all(&self) -> Result<(), DiskFileBlobStoreError> {
match fs::remove_dir_all(&self.blob_dir) {
Ok(_) => {
debug!(target:"txpool::blob", blob_dir = ?self.blob_dir, "Removed blob store directory");
}
Err(err) if err.kind() == io::ErrorKind::NotFound => {}
Err(err) => return Err(DiskFileBlobStoreError::Open(self.blob_dir.clone(), err)),
}
Ok(())
}
/// Ensures blob is in the blob cache and written to the disk.
fn insert_one(
&self,
tx: B256,
data: BlobTransactionSidecarVariant,
) -> Result<(), BlobStoreError> {
let mut buf = Vec::with_capacity(data.rlp_encoded_fields_length());
data.rlp_encode_fields(&mut buf);
{
// cache the versioned hashes to tx hash
let mut map = self.versioned_hashes_to_txhash.lock();
data.versioned_hashes().for_each(|hash| {
map.insert(hash, tx);
});
}
self.blob_cache.lock().insert(tx, Arc::new(data));
let size = self.write_one_encoded(tx, &buf)?;
self.size_tracker.add_size(size);
self.size_tracker.inc_len(1);
Ok(())
}
/// Ensures blobs are in the blob cache and written to the disk.
fn insert_many(
&self,
txs: Vec<(B256, BlobTransactionSidecarVariant)>,
) -> Result<(), BlobStoreError> {
let raw = txs
.iter()
.map(|(tx, data)| {
let mut buf = Vec::with_capacity(data.rlp_encoded_fields_length());
data.rlp_encode_fields(&mut buf);
(self.blob_disk_file(*tx), buf)
})
.collect::<Vec<_>>();
{
// cache versioned hashes to tx hash
let mut map = self.versioned_hashes_to_txhash.lock();
for (tx, data) in &txs {
data.versioned_hashes().for_each(|hash| {
map.insert(hash, *tx);
});
}
}
{
// cache blobs
let mut cache = self.blob_cache.lock();
for (tx, data) in txs {
cache.insert(tx, Arc::new(data));
}
}
let mut add = 0;
let mut num = 0;
{
let _lock = self.file_lock.write();
for (path, data) in raw {
if path.exists() {
debug!(target:"txpool::blob", ?path, "Blob already exists");
} else if let Err(err) = fs::write(&path, &data) {
debug!(target:"txpool::blob", %err, ?path, "Failed to write blob file");
} else {
add += data.len();
num += 1;
}
}
}
self.size_tracker.add_size(add);
self.size_tracker.inc_len(num);
Ok(())
}
/// Returns true if the blob for the given transaction hash is in the blob cache or on disk.
fn contains(&self, tx: B256) -> Result<bool, BlobStoreError> {
if self.blob_cache.lock().get(&tx).is_some() {
return Ok(true)
}
// we only check if the file exists and assume it's valid
Ok(self.blob_disk_file(tx).is_file())
}
/// Returns all the blob transactions which are in the cache or on the disk.
fn retain_existing(&self, txs: Vec<B256>) -> Result<Vec<B256>, BlobStoreError> {
let (in_cache, not_in_cache): (Vec<B256>, Vec<B256>) = {
let mut cache = self.blob_cache.lock();
txs.into_iter().partition(|tx| cache.get(tx).is_some())
};
let mut existing = in_cache;
for tx in not_in_cache {
if self.blob_disk_file(tx).is_file() {
existing.push(tx);
}
}
Ok(existing)
}
/// Retrieves the blob for the given transaction hash from the blob cache or disk.
fn get_one(
&self,
tx: B256,
) -> Result<Option<Arc<BlobTransactionSidecarVariant>>, BlobStoreError> {
if let Some(blob) = self.blob_cache.lock().get(&tx) {
return Ok(Some(blob.clone()))
}
if let Some(blob) = self.read_one(tx)? {
let blob_arc = Arc::new(blob);
self.blob_cache.lock().insert(tx, blob_arc.clone());
return Ok(Some(blob_arc))
}
Ok(None)
}
/// Returns the path to the blob file for the given transaction hash.
#[inline]
fn blob_disk_file(&self, tx: B256) -> PathBuf {
self.blob_dir.join(format!("{tx:x}"))
}
/// Retrieves the blob data for the given transaction hash.
#[inline]
fn read_one(&self, tx: B256) -> Result<Option<BlobTransactionSidecarVariant>, BlobStoreError> {
let path = self.blob_disk_file(tx);
let data = {
let _lock = self.file_lock.read();
match fs::read(&path) {
Ok(data) => data,
Err(e) if e.kind() == io::ErrorKind::NotFound => return Ok(None),
Err(e) => {
return Err(BlobStoreError::Other(Box::new(DiskFileBlobStoreError::ReadFile(
tx, path, e,
))))
}
}
};
BlobTransactionSidecarVariant::rlp_decode_fields(&mut data.as_slice())
.map(Some)
.map_err(BlobStoreError::DecodeError)
}
/// Returns decoded blobs read from disk.
///
/// Only returns sidecars that were found and successfully decoded.
fn read_many_decoded(&self, txs: Vec<TxHash>) -> Vec<(TxHash, BlobTransactionSidecarVariant)> {
self.read_many_raw(txs)
.into_iter()
.filter_map(|(tx, data)| {
BlobTransactionSidecarVariant::rlp_decode_fields(&mut data.as_slice())
.map(|sidecar| (tx, sidecar))
.ok()
})
.collect()
}
/// Retrieves the raw blob data for the given transaction hashes.
///
/// Only returns the blobs that were found on file.
#[inline]
fn read_many_raw(&self, txs: Vec<TxHash>) -> Vec<(TxHash, Vec<u8>)> {
let mut res = Vec::with_capacity(txs.len());
let _lock = self.file_lock.read();
for tx in txs {
let path = self.blob_disk_file(tx);
match fs::read(&path) {
Ok(data) => {
res.push((tx, data));
}
Err(err) => {
debug!(target:"txpool::blob", %err, ?tx, "Failed to read blob file");
}
};
}
res
}
/// Writes the blob data for the given transaction hash to the disk.
#[inline]
fn write_one_encoded(&self, tx: B256, data: &[u8]) -> Result<usize, DiskFileBlobStoreError> {
trace!(target:"txpool::blob", "[{:?}] writing blob file", tx);
let mut add = 0;
let path = self.blob_disk_file(tx);
{
let _lock = self.file_lock.write();
if !path.exists() {
fs::write(&path, data)
.map_err(|e| DiskFileBlobStoreError::WriteFile(tx, path, e))?;
add = data.len();
}
}
Ok(add)
}
/// Retrieves blobs for the given transaction hashes from the blob cache or disk.
///
/// This will not return an error if there are missing blobs. Therefore, the result may be a
/// subset of the request or an empty vector if none of the blobs were found.
#[inline]
fn get_all(
&self,
txs: Vec<B256>,
) -> Result<Vec<(B256, Arc<BlobTransactionSidecarVariant>)>, BlobStoreError> {
let mut res = Vec::with_capacity(txs.len());
let mut cache_miss = Vec::new();
{
let mut cache = self.blob_cache.lock();
for tx in txs {
if let Some(blob) = cache.get(&tx) {
res.push((tx, blob.clone()));
} else {
cache_miss.push(tx)
}
}
}
if cache_miss.is_empty() {
return Ok(res)
}
let from_disk = self.read_many_decoded(cache_miss);
if from_disk.is_empty() {
return Ok(res)
}
let from_disk = from_disk
.into_iter()
.map(|(tx, data)| {
let data = Arc::new(data);
res.push((tx, data.clone()));
(tx, data)
})
.collect::<Vec<_>>();
let mut cache = self.blob_cache.lock();
for (tx, data) in from_disk {
cache.insert(tx, data);
}
Ok(res)
}
/// Retrieves blobs for the given transaction hashes from the blob cache or disk.
///
/// Returns an error if there are any missing blobs.
#[inline]
fn get_exact(
&self,
txs: Vec<B256>,
) -> Result<Vec<Arc<BlobTransactionSidecarVariant>>, BlobStoreError> {
txs.into_iter()
.map(|tx| self.get_one(tx)?.ok_or(BlobStoreError::MissingSidecar(tx)))
.collect()
}
}
impl fmt::Debug for DiskFileBlobStoreInner {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("DiskFileBlobStoreInner")
.field("blob_dir", &self.blob_dir)
.field("cached_blobs", &self.blob_cache.try_lock().map(|lock| lock.len()))
.field("txs_to_delete", &self.txs_to_delete.try_read())
.finish()
}
}
/// Errors that can occur when interacting with a disk file blob store.
#[derive(Debug, thiserror::Error)]
pub enum DiskFileBlobStoreError {
/// Thrown during [`DiskFileBlobStore::open`] if the blob store directory cannot be opened.
#[error("failed to open blobstore at {0}: {1}")]
/// Indicates a failure to open the blob store directory.
Open(PathBuf, io::Error),
/// Failure while reading a blob file.
#[error("[{0}] failed to read blob file at {1}: {2}")]
/// Indicates a failure while reading a blob file.
ReadFile(TxHash, PathBuf, io::Error),
/// Failure while writing a blob file.
#[error("[{0}] failed to write blob file at {1}: {2}")]
/// Indicates a failure while writing a blob file.
WriteFile(TxHash, PathBuf, io::Error),
/// Failure while deleting a blob file.
#[error("[{0}] failed to delete blob file at {1}: {2}")]
/// Indicates a failure while deleting a blob file.
DeleteFile(TxHash, PathBuf, io::Error),
}
impl From<DiskFileBlobStoreError> for BlobStoreError {
fn from(value: DiskFileBlobStoreError) -> Self {
Self::Other(Box::new(value))
}
}
/// Configuration for a disk file blob store.
#[derive(Debug, Clone)]
pub struct DiskFileBlobStoreConfig {
/// The maximum number of blobs to keep in the in memory blob cache.
pub max_cached_entries: u32,
/// How to open the blob store.
pub open: OpenDiskFileBlobStore,
}
impl Default for DiskFileBlobStoreConfig {
fn default() -> Self {
Self { max_cached_entries: DEFAULT_MAX_CACHED_BLOBS, open: Default::default() }
}
}
impl DiskFileBlobStoreConfig {
/// Set maximum number of blobs to keep in the in memory blob cache.
pub const fn with_max_cached_entries(mut self, max_cached_entries: u32) -> Self {
self.max_cached_entries = max_cached_entries;
self
}
}
/// How to open a disk file blob store.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
pub enum OpenDiskFileBlobStore {
/// Clear everything in the blob store.
#[default]
Clear,
/// Keep the existing blob store and index
ReIndex,
}
#[cfg(test)]
mod tests {
use alloy_consensus::BlobTransactionSidecar;
use alloy_eips::eip7594::BlobTransactionSidecarVariant;
use super::*;
use std::sync::atomic::Ordering;
fn tmp_store() -> (DiskFileBlobStore, tempfile::TempDir) {
let dir = tempfile::tempdir().unwrap();
let store = DiskFileBlobStore::open(dir.path(), Default::default()).unwrap();
(store, dir)
}
fn rng_blobs(num: usize) -> Vec<(TxHash, BlobTransactionSidecarVariant)> {
let mut rng = rand::rng();
(0..num)
.map(|_| {
let tx = TxHash::random_with(&mut rng);
let blob = BlobTransactionSidecarVariant::Eip4844(BlobTransactionSidecar {
blobs: vec![],
commitments: vec![],
proofs: vec![],
});
(tx, blob)
})
.collect()
}
#[test]
fn disk_insert_all_get_all() {
let (store, _dir) = tmp_store();
let blobs = rng_blobs(10);
let all_hashes = blobs.iter().map(|(tx, _)| *tx).collect::<Vec<_>>();
store.insert_all(blobs.clone()).unwrap();
// all cached
for (tx, blob) in &blobs {
assert!(store.is_cached(tx));
let b = store.get(*tx).unwrap().map(Arc::unwrap_or_clone).unwrap();
assert_eq!(b, *blob);
}
let all = store.get_all(all_hashes.clone()).unwrap();
for (tx, blob) in all {
assert!(blobs.contains(&(tx, Arc::unwrap_or_clone(blob))), "missing blob {tx:?}");
}
assert!(store.contains(all_hashes[0]).unwrap());
store.delete_all(all_hashes.clone()).unwrap();
assert!(store.inner.txs_to_delete.read().contains(&all_hashes[0]));
store.clear_cache();
store.cleanup();
assert!(store.get(blobs[0].0).unwrap().is_none());
let all = store.get_all(all_hashes.clone()).unwrap();
assert!(all.is_empty());
assert!(!store.contains(all_hashes[0]).unwrap());
assert!(store.get_exact(all_hashes).is_err());
assert_eq!(store.data_size_hint(), Some(0));
assert_eq!(store.inner.size_tracker.num_blobs.load(Ordering::Relaxed), 0);
}
#[test]
fn disk_insert_and_retrieve() {
let (store, _dir) = tmp_store();
let (tx, blob) = rng_blobs(1).into_iter().next().unwrap();
store.insert(tx, blob.clone()).unwrap();
assert!(store.is_cached(&tx));
let retrieved_blob = store.get(tx).unwrap().map(Arc::unwrap_or_clone).unwrap();
assert_eq!(retrieved_blob, blob);
}
#[test]
fn disk_delete_blob() {
let (store, _dir) = tmp_store();
let (tx, blob) = rng_blobs(1).into_iter().next().unwrap();
store.insert(tx, blob).unwrap();
assert!(store.is_cached(&tx));
store.delete(tx).unwrap();
assert!(store.inner.txs_to_delete.read().contains(&tx));
store.cleanup();
let result = store.get(tx).unwrap();
assert_eq!(
result,
Some(Arc::new(BlobTransactionSidecarVariant::Eip4844(BlobTransactionSidecar {
blobs: vec![],
commitments: vec![],
proofs: vec![]
})))
);
}
#[test]
fn disk_insert_all_and_delete_all() {
let (store, _dir) = tmp_store();
let blobs = rng_blobs(5);
let txs = blobs.iter().map(|(tx, _)| *tx).collect::<Vec<_>>();
store.insert_all(blobs.clone()).unwrap();
for (tx, _) in &blobs {
assert!(store.is_cached(tx));
}
store.delete_all(txs.clone()).unwrap();
store.cleanup();
for tx in txs {
let result = store.get(tx).unwrap();
assert_eq!(
result,
Some(Arc::new(BlobTransactionSidecarVariant::Eip4844(BlobTransactionSidecar {
blobs: vec![],
commitments: vec![],
proofs: vec![]
})))
);
}
}
#[test]
fn disk_get_all_blobs() {
let (store, _dir) = tmp_store();
let blobs = rng_blobs(3);
let txs = blobs.iter().map(|(tx, _)| *tx).collect::<Vec<_>>();
store.insert_all(blobs.clone()).unwrap();
let retrieved_blobs = store.get_all(txs.clone()).unwrap();
for (tx, blob) in retrieved_blobs {
assert!(blobs.contains(&(tx, Arc::unwrap_or_clone(blob))));
}
store.delete_all(txs).unwrap();
store.cleanup();
}
#[test]
fn disk_get_exact_blobs_success() {
let (store, _dir) = tmp_store();
let blobs = rng_blobs(3);
let txs = blobs.iter().map(|(tx, _)| *tx).collect::<Vec<_>>();
store.insert_all(blobs.clone()).unwrap();
let retrieved_blobs = store.get_exact(txs).unwrap();
for (retrieved_blob, (_, original_blob)) in retrieved_blobs.into_iter().zip(blobs) {
assert_eq!(Arc::unwrap_or_clone(retrieved_blob), original_blob);
}
}
#[test]
fn disk_get_exact_blobs_failure() {
let (store, _dir) = tmp_store();
let blobs = rng_blobs(2);
let txs = blobs.iter().map(|(tx, _)| *tx).collect::<Vec<_>>();
store.insert_all(blobs).unwrap();
// Try to get a blob that was never inserted
let missing_tx = TxHash::random();
let result = store.get_exact(vec![txs[0], missing_tx]);
assert!(result.is_err());
}
#[test]
fn disk_data_size_hint() {
let (store, _dir) = tmp_store();
assert_eq!(store.data_size_hint(), Some(0));
let blobs = rng_blobs(2);
store.insert_all(blobs).unwrap();
assert!(store.data_size_hint().unwrap() > 0);
}
#[test]
fn disk_cleanup_stat() {
let (store, _dir) = tmp_store();
let blobs = rng_blobs(3);
let txs = blobs.iter().map(|(tx, _)| *tx).collect::<Vec<_>>();
store.insert_all(blobs).unwrap();
store.delete_all(txs).unwrap();
let stat = store.cleanup();
assert_eq!(stat.delete_succeed, 3);
assert_eq!(stat.delete_failed, 0);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/transaction-pool/src/validate/eth.rs | crates/transaction-pool/src/validate/eth.rs | //! Ethereum transaction validator.
use super::constants::DEFAULT_MAX_TX_INPUT_BYTES;
use crate::{
blobstore::BlobStore,
error::{
Eip4844PoolTransactionError, Eip7702PoolTransactionError, InvalidPoolTransactionError,
},
metrics::TxPoolValidationMetrics,
traits::TransactionOrigin,
validate::{ValidTransaction, ValidationTask, MAX_INIT_CODE_BYTE_SIZE},
Address, BlobTransactionSidecarVariant, EthBlobTransactionSidecar, EthPoolTransaction,
LocalTransactionConfig, TransactionValidationOutcome, TransactionValidationTaskExecutor,
TransactionValidator,
};
use alloy_consensus::{
constants::{
EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID,
LEGACY_TX_TYPE_ID,
},
BlockHeader,
};
use alloy_eips::{
eip1559::ETHEREUM_BLOCK_GAS_LIMIT_30M, eip4844::env_settings::EnvKzgSettings,
eip7840::BlobParams,
};
use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardforks};
use reth_primitives_traits::{
constants::MAX_TX_GAS_LIMIT_OSAKA, transaction::error::InvalidTransactionError, Account, Block,
GotExpected, SealedBlock,
};
use reth_storage_api::{AccountInfoReader, BytecodeReader, StateProviderFactory};
use reth_tasks::TaskSpawner;
use seismic_alloy_consensus::SEISMIC_TX_TYPE_ID;
use std::{
marker::PhantomData,
sync::{
atomic::{AtomicBool, AtomicU64},
Arc,
},
time::Instant,
};
use tokio::sync::Mutex;
/// A [`TransactionValidator`] implementation that validates ethereum transaction.
///
/// It supports all known ethereum transaction types:
/// - Legacy
/// - EIP-2718
/// - EIP-1559
/// - EIP-4844
/// - EIP-7702
///
/// And enforces additional constraints such as:
/// - Maximum transaction size
/// - Maximum gas limit
///
/// And adheres to the configured [`LocalTransactionConfig`].
#[derive(Debug)]
pub struct EthTransactionValidator<Client, T> {
/// This type fetches account info from the db
client: Client,
/// Blobstore used for fetching re-injected blob transactions.
blob_store: Box<dyn BlobStore>,
/// tracks activated forks relevant for transaction validation
fork_tracker: ForkTracker,
/// Fork indicator whether we are using EIP-2718 type transactions.
eip2718: bool,
/// Fork indicator whether we are using EIP-1559 type transactions.
eip1559: bool,
/// Fork indicator whether we are using EIP-4844 blob transactions.
eip4844: bool,
/// Fork indicator whether we are using EIP-7702 type transactions.
eip7702: bool,
/// The current max gas limit
block_gas_limit: AtomicU64,
/// The current tx fee cap limit in wei locally submitted into the pool.
tx_fee_cap: Option<u128>,
/// Minimum priority fee to enforce for acceptance into the pool.
minimum_priority_fee: Option<u128>,
/// Stores the setup and parameters needed for validating KZG proofs.
kzg_settings: EnvKzgSettings,
/// How to handle [`TransactionOrigin::Local`](TransactionOrigin) transactions.
local_transactions_config: LocalTransactionConfig,
/// Maximum size in bytes a single transaction can have in order to be accepted into the pool.
max_tx_input_bytes: usize,
/// Maximum gas limit for individual transactions
max_tx_gas_limit: Option<u64>,
/// Disable balance checks during transaction validation
disable_balance_check: bool,
/// Marker for the transaction type
_marker: PhantomData<T>,
/// Metrics for tsx pool validation
validation_metrics: TxPoolValidationMetrics,
}
impl<Client, Tx> EthTransactionValidator<Client, Tx> {
/// Returns the configured chain spec
pub fn chain_spec(&self) -> Arc<Client::ChainSpec>
where
Client: ChainSpecProvider,
{
self.client().chain_spec()
}
/// Returns the configured chain id
pub fn chain_id(&self) -> u64
where
Client: ChainSpecProvider,
{
self.client().chain_spec().chain().id()
}
/// Returns the configured client
pub const fn client(&self) -> &Client {
&self.client
}
/// Returns the tracks activated forks relevant for transaction validation
pub const fn fork_tracker(&self) -> &ForkTracker {
&self.fork_tracker
}
/// Returns if there are EIP-2718 type transactions
pub const fn eip2718(&self) -> bool {
self.eip2718
}
/// Returns if there are EIP-1559 type transactions
pub const fn eip1559(&self) -> bool {
self.eip1559
}
/// Returns if there are EIP-4844 blob transactions
pub const fn eip4844(&self) -> bool {
self.eip4844
}
/// Returns if there are EIP-7702 type transactions
pub const fn eip7702(&self) -> bool {
self.eip7702
}
/// Returns the current tx fee cap limit in wei locally submitted into the pool
pub const fn tx_fee_cap(&self) -> &Option<u128> {
&self.tx_fee_cap
}
/// Returns the minimum priority fee to enforce for acceptance into the pool
pub const fn minimum_priority_fee(&self) -> &Option<u128> {
&self.minimum_priority_fee
}
/// Returns the setup and parameters needed for validating KZG proofs.
pub const fn kzg_settings(&self) -> &EnvKzgSettings {
&self.kzg_settings
}
/// Returns the config to handle [`TransactionOrigin::Local`](TransactionOrigin) transactions..
pub const fn local_transactions_config(&self) -> &LocalTransactionConfig {
&self.local_transactions_config
}
/// Returns the maximum size in bytes a single transaction can have in order to be accepted into
/// the pool.
pub const fn max_tx_input_bytes(&self) -> usize {
self.max_tx_input_bytes
}
/// Returns whether balance checks are disabled for this validator.
pub const fn disable_balance_check(&self) -> bool {
self.disable_balance_check
}
}
impl<Client, Tx> EthTransactionValidator<Client, Tx>
where
Client: ChainSpecProvider<ChainSpec: EthereumHardforks> + StateProviderFactory,
Tx: EthPoolTransaction,
{
/// Returns the current max gas limit
pub fn block_gas_limit(&self) -> u64 {
self.max_gas_limit()
}
/// Validates a single transaction.
///
/// See also [`TransactionValidator::validate_transaction`]
pub fn validate_one(
&self,
origin: TransactionOrigin,
transaction: Tx,
) -> TransactionValidationOutcome<Tx> {
self.validate_one_with_provider(origin, transaction, &mut None)
}
/// Validates a single transaction with the provided state provider.
///
/// This allows reusing the same provider across multiple transaction validations,
/// which can improve performance when validating many transactions.
///
/// If `state` is `None`, a new state provider will be created.
pub fn validate_one_with_state(
&self,
origin: TransactionOrigin,
transaction: Tx,
state: &mut Option<Box<dyn AccountInfoReader>>,
) -> TransactionValidationOutcome<Tx> {
self.validate_one_with_provider(origin, transaction, state)
}
/// Validates a single transaction using an optional cached state provider.
/// If no provider is passed, a new one will be created. This allows reusing
/// the same provider across multiple txs.
fn validate_one_with_provider(
&self,
origin: TransactionOrigin,
transaction: Tx,
maybe_state: &mut Option<Box<dyn AccountInfoReader>>,
) -> TransactionValidationOutcome<Tx> {
match self.validate_one_no_state(origin, transaction) {
Ok(transaction) => {
// stateless checks passed, pass transaction down stateful validation pipeline
// If we don't have a state provider yet, fetch the latest state
if maybe_state.is_none() {
match self.client.latest() {
Ok(new_state) => {
*maybe_state = Some(Box::new(new_state));
}
Err(err) => {
return TransactionValidationOutcome::Error(
*transaction.hash(),
Box::new(err),
)
}
}
}
let state = maybe_state.as_deref().expect("provider is set");
self.validate_one_against_state(origin, transaction, state)
}
Err(invalid_outcome) => invalid_outcome,
}
}
/// Performs stateless validation on single transaction. Returns unaltered input transaction
/// if all checks pass, so transaction can continue through to stateful validation as argument
/// to [`validate_one_against_state`](Self::validate_one_against_state).
fn validate_one_no_state(
&self,
origin: TransactionOrigin,
transaction: Tx,
) -> Result<Tx, TransactionValidationOutcome<Tx>> {
// Checks for tx_type
match transaction.ty() {
LEGACY_TX_TYPE_ID => {
// Accept legacy transactions
}
EIP2930_TX_TYPE_ID => {
// Accept only legacy transactions until EIP-2718/2930 activates
if !self.eip2718 {
return Err(TransactionValidationOutcome::Invalid(
transaction,
InvalidTransactionError::Eip2930Disabled.into(),
))
}
}
EIP1559_TX_TYPE_ID => {
// Reject dynamic fee transactions until EIP-1559 activates.
if !self.eip1559 {
return Err(TransactionValidationOutcome::Invalid(
transaction,
InvalidTransactionError::Eip1559Disabled.into(),
))
}
}
EIP4844_TX_TYPE_ID => {
// Reject blob transactions.
if !self.eip4844 {
return Err(TransactionValidationOutcome::Invalid(
transaction,
InvalidTransactionError::Eip4844Disabled.into(),
))
}
}
EIP7702_TX_TYPE_ID => {
// Reject EIP-7702 transactions.
if !self.eip7702 {
return Err(TransactionValidationOutcome::Invalid(
transaction,
InvalidTransactionError::Eip7702Disabled.into(),
))
}
}
SEISMIC_TX_TYPE_ID => {
// Accept seismic transactions
}
_ => {
return Err(TransactionValidationOutcome::Invalid(
transaction,
InvalidTransactionError::TxTypeNotSupported.into(),
))
}
};
// Reject transactions with a nonce equal to U64::max according to EIP-2681
let tx_nonce = transaction.nonce();
if tx_nonce == u64::MAX {
return Err(TransactionValidationOutcome::Invalid(
transaction,
InvalidPoolTransactionError::Eip2681,
))
}
// Reject transactions over defined size to prevent DOS attacks
if transaction.is_eip4844() {
// Since blob transactions are pulled instead of pushed, and only the consensus data is
// kept in memory while the sidecar is cached on disk, there is no critical limit that
// should be enforced. Still, enforcing some cap on the input bytes. blob txs also must
// be executable right away when they enter the pool.
let tx_input_len = transaction.input().len();
if tx_input_len > self.max_tx_input_bytes {
return Err(TransactionValidationOutcome::Invalid(
transaction,
InvalidPoolTransactionError::OversizedData(
tx_input_len,
self.max_tx_input_bytes,
),
))
}
} else {
// ensure the size of the non-blob transaction
let tx_size = transaction.encoded_length();
if tx_size > self.max_tx_input_bytes {
return Err(TransactionValidationOutcome::Invalid(
transaction,
InvalidPoolTransactionError::OversizedData(tx_size, self.max_tx_input_bytes),
))
}
}
// Check whether the init code size has been exceeded.
if self.fork_tracker.is_shanghai_activated() {
if let Err(err) = transaction.ensure_max_init_code_size(MAX_INIT_CODE_BYTE_SIZE) {
return Err(TransactionValidationOutcome::Invalid(transaction, err))
}
}
// Checks for gas limit
let transaction_gas_limit = transaction.gas_limit();
let block_gas_limit = self.max_gas_limit();
if transaction_gas_limit > block_gas_limit {
return Err(TransactionValidationOutcome::Invalid(
transaction,
InvalidPoolTransactionError::ExceedsGasLimit(
transaction_gas_limit,
block_gas_limit,
),
))
}
// Check individual transaction gas limit if configured
if let Some(max_tx_gas_limit) = self.max_tx_gas_limit {
if transaction_gas_limit > max_tx_gas_limit {
return Err(TransactionValidationOutcome::Invalid(
transaction,
InvalidPoolTransactionError::MaxTxGasLimitExceeded(
transaction_gas_limit,
max_tx_gas_limit,
),
))
}
}
// Ensure max_priority_fee_per_gas (if EIP1559) is less than max_fee_per_gas if any.
if transaction.max_priority_fee_per_gas() > Some(transaction.max_fee_per_gas()) {
return Err(TransactionValidationOutcome::Invalid(
transaction,
InvalidTransactionError::TipAboveFeeCap.into(),
))
}
// determine whether the transaction should be treated as local
let is_local = self.local_transactions_config.is_local(origin, transaction.sender_ref());
// Ensure max possible transaction fee doesn't exceed configured transaction fee cap.
// Only for transactions locally submitted for acceptance into the pool.
if is_local {
match self.tx_fee_cap {
Some(0) | None => {} // Skip if cap is 0 or None
Some(tx_fee_cap_wei) => {
// max possible tx fee is (gas_price * gas_limit)
// (if EIP1559) max possible tx fee is (max_fee_per_gas * gas_limit)
let gas_price = transaction.max_fee_per_gas();
let max_tx_fee_wei = gas_price.saturating_mul(transaction.gas_limit() as u128);
if max_tx_fee_wei > tx_fee_cap_wei {
return Err(TransactionValidationOutcome::Invalid(
transaction,
InvalidPoolTransactionError::ExceedsFeeCap {
max_tx_fee_wei,
tx_fee_cap_wei,
},
))
}
}
}
}
// Drop non-local transactions with a fee lower than the configured fee for acceptance into
// the pool.
if !is_local &&
transaction.is_dynamic_fee() &&
transaction.max_priority_fee_per_gas() < self.minimum_priority_fee
{
return Err(TransactionValidationOutcome::Invalid(
transaction,
InvalidPoolTransactionError::PriorityFeeBelowMinimum {
minimum_priority_fee: self
.minimum_priority_fee
.expect("minimum priority fee is expected inside if statement"),
},
))
}
// Checks for chainid
if let Some(chain_id) = transaction.chain_id() {
if chain_id != self.chain_id() {
return Err(TransactionValidationOutcome::Invalid(
transaction,
InvalidTransactionError::ChainIdMismatch.into(),
))
}
}
if transaction.is_eip7702() {
// Prague fork is required for 7702 txs
if !self.fork_tracker.is_prague_activated() {
return Err(TransactionValidationOutcome::Invalid(
transaction,
InvalidTransactionError::TxTypeNotSupported.into(),
))
}
if transaction.authorization_list().is_none_or(|l| l.is_empty()) {
return Err(TransactionValidationOutcome::Invalid(
transaction,
Eip7702PoolTransactionError::MissingEip7702AuthorizationList.into(),
))
}
}
if let Err(err) = ensure_intrinsic_gas(&transaction, &self.fork_tracker) {
return Err(TransactionValidationOutcome::Invalid(transaction, err))
}
// light blob tx pre-checks
if transaction.is_eip4844() {
// Cancun fork is required for blob txs
if !self.fork_tracker.is_cancun_activated() {
return Err(TransactionValidationOutcome::Invalid(
transaction,
InvalidTransactionError::TxTypeNotSupported.into(),
))
}
let blob_count = transaction.blob_count().unwrap_or(0);
if blob_count == 0 {
// no blobs
return Err(TransactionValidationOutcome::Invalid(
transaction,
InvalidPoolTransactionError::Eip4844(
Eip4844PoolTransactionError::NoEip4844Blobs,
),
))
}
let max_blob_count = self.fork_tracker.max_blob_count();
if blob_count > max_blob_count {
return Err(TransactionValidationOutcome::Invalid(
transaction,
InvalidPoolTransactionError::Eip4844(
Eip4844PoolTransactionError::TooManyEip4844Blobs {
have: blob_count,
permitted: max_blob_count,
},
),
))
}
}
// Osaka validation of max tx gas.
if self.fork_tracker.is_osaka_activated() &&
transaction.gas_limit() > MAX_TX_GAS_LIMIT_OSAKA
{
return Err(TransactionValidationOutcome::Invalid(
transaction,
InvalidTransactionError::GasLimitTooHigh.into(),
))
}
Ok(transaction)
}
/// Validates a single transaction using given state provider.
fn validate_one_against_state<P>(
&self,
origin: TransactionOrigin,
mut transaction: Tx,
state: P,
) -> TransactionValidationOutcome<Tx>
where
P: AccountInfoReader,
{
// Use provider to get account info
let account = match state.basic_account(transaction.sender_ref()) {
Ok(account) => account.unwrap_or_default(),
Err(err) => {
return TransactionValidationOutcome::Error(*transaction.hash(), Box::new(err))
}
};
// check for bytecode
match self.validate_sender_bytecode(&transaction, &account, &state) {
Err(outcome) => return outcome,
Ok(Err(err)) => return TransactionValidationOutcome::Invalid(transaction, err),
_ => {}
};
// Checks for nonce
if let Err(err) = self.validate_sender_nonce(&transaction, &account) {
return TransactionValidationOutcome::Invalid(transaction, err)
}
// checks for max cost not exceedng account_balance
if let Err(err) = self.validate_sender_balance(&transaction, &account) {
return TransactionValidationOutcome::Invalid(transaction, err)
}
// heavy blob tx validation
let maybe_blob_sidecar = match self.validate_eip4844(&mut transaction) {
Err(err) => return TransactionValidationOutcome::Invalid(transaction, err),
Ok(sidecar) => sidecar,
};
let authorities = self.recover_authorities(&transaction);
// Return the valid transaction
TransactionValidationOutcome::Valid {
balance: account.balance,
state_nonce: account.nonce,
bytecode_hash: account.bytecode_hash,
transaction: ValidTransaction::new(transaction, maybe_blob_sidecar),
// by this point assume all external transactions should be propagated
propagate: match origin {
TransactionOrigin::External => true,
TransactionOrigin::Local => {
self.local_transactions_config.propagate_local_transactions
}
TransactionOrigin::Private => false,
},
authorities,
}
}
/// Validates that the senderβs account has valid or no bytecode.
pub fn validate_sender_bytecode(
&self,
transaction: &Tx,
sender: &Account,
state: impl BytecodeReader,
) -> Result<Result<(), InvalidPoolTransactionError>, TransactionValidationOutcome<Tx>> {
// Unless Prague is active, the signer account shouldn't have bytecode.
//
// If Prague is active, only EIP-7702 bytecode is allowed for the sender.
//
// Any other case means that the account is not an EOA, and should not be able to send
// transactions.
if let Some(code_hash) = &sender.bytecode_hash {
let is_eip7702 = if self.fork_tracker.is_prague_activated() {
match state.bytecode_by_hash(code_hash) {
Ok(bytecode) => bytecode.unwrap_or_default().is_eip7702(),
Err(err) => {
return Err(TransactionValidationOutcome::Error(
*transaction.hash(),
Box::new(err),
))
}
}
} else {
false
};
if !is_eip7702 {
return Ok(Err(InvalidTransactionError::SignerAccountHasBytecode.into()))
}
}
Ok(Ok(()))
}
/// Checks if the transaction nonce is valid.
pub fn validate_sender_nonce(
&self,
transaction: &Tx,
sender: &Account,
) -> Result<(), InvalidPoolTransactionError> {
let tx_nonce = transaction.nonce();
if tx_nonce < sender.nonce {
return Err(InvalidTransactionError::NonceNotConsistent {
tx: tx_nonce,
state: sender.nonce,
}
.into())
}
Ok(())
}
/// Ensures the sender has sufficient account balance.
pub fn validate_sender_balance(
&self,
transaction: &Tx,
sender: &Account,
) -> Result<(), InvalidPoolTransactionError> {
let cost = transaction.cost();
if !self.disable_balance_check && cost > &sender.balance {
let expected = *cost;
return Err(InvalidTransactionError::InsufficientFunds(
GotExpected { got: sender.balance, expected }.into(),
)
.into())
}
Ok(())
}
/// Validates EIP-4844 blob sidecar data and returns the extracted sidecar, if any.
pub fn validate_eip4844(
&self,
transaction: &mut Tx,
) -> Result<Option<BlobTransactionSidecarVariant>, InvalidPoolTransactionError> {
let mut maybe_blob_sidecar = None;
// heavy blob tx validation
if transaction.is_eip4844() {
// extract the blob from the transaction
match transaction.take_blob() {
EthBlobTransactionSidecar::None => {
// this should not happen
return Err(InvalidTransactionError::TxTypeNotSupported.into())
}
EthBlobTransactionSidecar::Missing => {
// This can happen for re-injected blob transactions (on re-org), since the blob
// is stripped from the transaction and not included in a block.
// check if the blob is in the store, if it's included we previously validated
// it and inserted it
if self.blob_store.contains(*transaction.hash()).is_ok_and(|c| c) {
// validated transaction is already in the store
} else {
return Err(InvalidPoolTransactionError::Eip4844(
Eip4844PoolTransactionError::MissingEip4844BlobSidecar,
))
}
}
EthBlobTransactionSidecar::Present(sidecar) => {
let now = Instant::now();
if self.fork_tracker.is_osaka_activated() {
if sidecar.is_eip4844() {
return Err(InvalidPoolTransactionError::Eip4844(
Eip4844PoolTransactionError::UnexpectedEip4844SidecarAfterOsaka,
))
}
} else if sidecar.is_eip7594() {
return Err(InvalidPoolTransactionError::Eip4844(
Eip4844PoolTransactionError::UnexpectedEip7594SidecarBeforeOsaka,
))
}
// validate the blob
if let Err(err) = transaction.validate_blob(&sidecar, self.kzg_settings.get()) {
return Err(InvalidPoolTransactionError::Eip4844(
Eip4844PoolTransactionError::InvalidEip4844Blob(err),
))
}
// Record the duration of successful blob validation as histogram
self.validation_metrics.blob_validation_duration.record(now.elapsed());
// store the extracted blob
maybe_blob_sidecar = Some(sidecar);
}
}
}
Ok(maybe_blob_sidecar)
}
/// Returns the recovered authorities for the given transaction
fn recover_authorities(&self, transaction: &Tx) -> std::option::Option<Vec<Address>> {
transaction
.authorization_list()
.map(|auths| auths.iter().flat_map(|auth| auth.recover_authority()).collect::<Vec<_>>())
}
/// Validates all given transactions.
fn validate_batch(
&self,
transactions: Vec<(TransactionOrigin, Tx)>,
) -> Vec<TransactionValidationOutcome<Tx>> {
let mut provider = None;
transactions
.into_iter()
.map(|(origin, tx)| self.validate_one_with_provider(origin, tx, &mut provider))
.collect()
}
/// Validates all given transactions with origin.
fn validate_batch_with_origin(
&self,
origin: TransactionOrigin,
transactions: impl IntoIterator<Item = Tx> + Send,
) -> Vec<TransactionValidationOutcome<Tx>> {
let mut provider = None;
transactions
.into_iter()
.map(|tx| self.validate_one_with_provider(origin, tx, &mut provider))
.collect()
}
fn on_new_head_block<T: BlockHeader>(&self, new_tip_block: &T) {
let timestamp = if cfg!(feature = "timestamp-in-seconds") {
new_tip_block.timestamp()
} else {
new_tip_block.timestamp() / 1000
};
// update all forks
if self.chain_spec().is_shanghai_active_at_timestamp(timestamp) {
self.fork_tracker.shanghai.store(true, std::sync::atomic::Ordering::Relaxed);
}
if self.chain_spec().is_cancun_active_at_timestamp(timestamp) {
self.fork_tracker.cancun.store(true, std::sync::atomic::Ordering::Relaxed);
}
if self.chain_spec().is_prague_active_at_timestamp(timestamp) {
self.fork_tracker.prague.store(true, std::sync::atomic::Ordering::Relaxed);
}
if self.chain_spec().is_osaka_active_at_timestamp(timestamp) {
self.fork_tracker.osaka.store(true, std::sync::atomic::Ordering::Relaxed);
}
if let Some(blob_params) = self.chain_spec().blob_params_at_timestamp(timestamp) {
self.fork_tracker
.max_blob_count
.store(blob_params.max_blobs_per_tx, std::sync::atomic::Ordering::Relaxed);
}
self.block_gas_limit.store(new_tip_block.gas_limit(), std::sync::atomic::Ordering::Relaxed);
}
fn max_gas_limit(&self) -> u64 {
self.block_gas_limit.load(std::sync::atomic::Ordering::Relaxed)
}
}
impl<Client, Tx> TransactionValidator for EthTransactionValidator<Client, Tx>
where
Client: ChainSpecProvider<ChainSpec: EthereumHardforks> + StateProviderFactory,
Tx: EthPoolTransaction,
{
type Transaction = Tx;
async fn validate_transaction(
&self,
origin: TransactionOrigin,
transaction: Self::Transaction,
) -> TransactionValidationOutcome<Self::Transaction> {
self.validate_one(origin, transaction)
}
async fn validate_transactions(
&self,
transactions: Vec<(TransactionOrigin, Self::Transaction)>,
) -> Vec<TransactionValidationOutcome<Self::Transaction>> {
self.validate_batch(transactions)
}
async fn validate_transactions_with_origin(
&self,
origin: TransactionOrigin,
transactions: impl IntoIterator<Item = Self::Transaction> + Send,
) -> Vec<TransactionValidationOutcome<Self::Transaction>> {
self.validate_batch_with_origin(origin, transactions)
}
fn on_new_head_block<B>(&self, new_tip_block: &SealedBlock<B>)
where
B: Block,
{
self.on_new_head_block(new_tip_block.header())
}
}
/// A builder for [`EthTransactionValidator`] and [`TransactionValidationTaskExecutor`]
#[derive(Debug)]
pub struct EthTransactionValidatorBuilder<Client> {
client: Client,
/// Fork indicator whether we are in the Shanghai stage.
shanghai: bool,
/// Fork indicator whether we are in the Cancun hardfork.
cancun: bool,
/// Fork indicator whether we are in the Prague hardfork.
prague: bool,
/// Fork indicator whether we are in the Osaka hardfork.
osaka: bool,
/// Max blob count at the block's timestamp.
max_blob_count: u64,
/// Whether using EIP-2718 type transactions is allowed
eip2718: bool,
/// Whether using EIP-1559 type transactions is allowed
eip1559: bool,
/// Whether using EIP-4844 type transactions is allowed
eip4844: bool,
/// Whether using EIP-7702 type transactions is allowed
eip7702: bool,
/// The current max gas limit
block_gas_limit: AtomicU64,
/// The current tx fee cap limit in wei locally submitted into the pool.
tx_fee_cap: Option<u128>,
/// Minimum priority fee to enforce for acceptance into the pool.
minimum_priority_fee: Option<u128>,
/// Determines how many additional tasks to spawn
///
/// Default is 1
additional_tasks: usize,
/// Stores the setup and parameters needed for validating KZG proofs.
kzg_settings: EnvKzgSettings,
/// How to handle [`TransactionOrigin::Local`](TransactionOrigin) transactions.
local_transactions_config: LocalTransactionConfig,
/// Max size in bytes of a single transaction allowed
max_tx_input_bytes: usize,
/// Maximum gas limit for individual transactions
max_tx_gas_limit: Option<u64>,
/// Disable balance checks during transaction validation
disable_balance_check: bool,
}
impl<Client> EthTransactionValidatorBuilder<Client> {
/// Creates a new builder for the given client
///
/// By default this assumes the network is on the `Prague` hardfork and the following
/// transactions are allowed:
/// - Legacy
/// - EIP-2718
/// - EIP-1559
/// - EIP-4844
/// - EIP-7702
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | true |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/transaction-pool/src/validate/mod.rs | crates/transaction-pool/src/validate/mod.rs | //! Transaction validation abstractions.
use crate::{
error::InvalidPoolTransactionError,
identifier::{SenderId, TransactionId},
traits::{PoolTransaction, TransactionOrigin},
PriceBumpConfig,
};
use alloy_eips::{eip7594::BlobTransactionSidecarVariant, eip7702::SignedAuthorization};
use alloy_primitives::{Address, TxHash, B256, U256};
use futures_util::future::Either;
use reth_primitives_traits::{Recovered, SealedBlock};
use std::{fmt, fmt::Debug, future::Future, time::Instant};
mod constants;
mod eth;
mod task;
pub use eth::*;
pub use task::{TransactionValidationTaskExecutor, ValidationTask};
/// Validation constants.
pub use constants::{
DEFAULT_MAX_TX_INPUT_BYTES, MAX_CODE_BYTE_SIZE, MAX_INIT_CODE_BYTE_SIZE, TX_SLOT_BYTE_SIZE,
};
use reth_primitives_traits::Block;
/// A Result type returned after checking a transaction's validity.
#[derive(Debug)]
pub enum TransactionValidationOutcome<T: PoolTransaction> {
/// The transaction is considered _currently_ valid and can be inserted into the pool.
Valid {
/// Balance of the sender at the current point.
balance: U256,
/// Current nonce of the sender.
state_nonce: u64,
/// Code hash of the sender.
bytecode_hash: Option<B256>,
/// The validated transaction.
///
/// See also [`ValidTransaction`].
///
/// If this is a _new_ EIP-4844 blob transaction, then this must contain the extracted
/// sidecar.
transaction: ValidTransaction<T>,
/// Whether to propagate the transaction to the network.
propagate: bool,
/// The authorities of EIP-7702 transaction.
authorities: Option<Vec<Address>>,
},
/// The transaction is considered invalid indefinitely: It violates constraints that prevent
/// this transaction from ever becoming valid.
Invalid(T, InvalidPoolTransactionError),
/// An error occurred while trying to validate the transaction
Error(TxHash, Box<dyn core::error::Error + Send + Sync>),
}
impl<T: PoolTransaction> TransactionValidationOutcome<T> {
/// Returns the hash of the transactions
pub fn tx_hash(&self) -> TxHash {
match self {
Self::Valid { transaction, .. } => *transaction.hash(),
Self::Invalid(transaction, ..) => *transaction.hash(),
Self::Error(hash, ..) => *hash,
}
}
/// Returns the [`InvalidPoolTransactionError`] if this is an invalid variant.
pub const fn as_invalid(&self) -> Option<&InvalidPoolTransactionError> {
match self {
Self::Invalid(_, err) => Some(err),
_ => None,
}
}
/// Returns true if the transaction is valid.
pub const fn is_valid(&self) -> bool {
matches!(self, Self::Valid { .. })
}
/// Returns true if the transaction is invalid.
pub const fn is_invalid(&self) -> bool {
matches!(self, Self::Invalid(_, _))
}
/// Returns true if validation resulted in an error.
pub const fn is_error(&self) -> bool {
matches!(self, Self::Error(_, _))
}
}
/// A wrapper type for a transaction that is valid and has an optional extracted EIP-4844 blob
/// transaction sidecar.
///
/// If this is provided, then the sidecar will be temporarily stored in the blob store until the
/// transaction is finalized.
///
/// Note: Since blob transactions can be re-injected without their sidecar (after reorg), the
/// validator can omit the sidecar if it is still in the blob store and return a
/// [`ValidTransaction::Valid`] instead.
#[derive(Debug)]
pub enum ValidTransaction<T> {
/// A valid transaction without a sidecar.
Valid(T),
/// A valid transaction for which a sidecar should be stored.
///
/// Caution: The [`TransactionValidator`] must ensure that this is only returned for EIP-4844
/// transactions.
ValidWithSidecar {
/// The valid EIP-4844 transaction.
transaction: T,
/// The extracted sidecar of that transaction
sidecar: BlobTransactionSidecarVariant,
},
}
impl<T> ValidTransaction<T> {
/// Creates a new valid transaction with an optional sidecar.
pub fn new(transaction: T, sidecar: Option<BlobTransactionSidecarVariant>) -> Self {
if let Some(sidecar) = sidecar {
Self::ValidWithSidecar { transaction, sidecar }
} else {
Self::Valid(transaction)
}
}
}
impl<T: PoolTransaction> ValidTransaction<T> {
/// Returns the transaction.
#[inline]
pub const fn transaction(&self) -> &T {
match self {
Self::Valid(transaction) | Self::ValidWithSidecar { transaction, .. } => transaction,
}
}
/// Consumes the wrapper and returns the transaction.
pub fn into_transaction(self) -> T {
match self {
Self::Valid(transaction) | Self::ValidWithSidecar { transaction, .. } => transaction,
}
}
/// Returns the address of that transaction.
#[inline]
pub(crate) fn sender(&self) -> Address {
self.transaction().sender()
}
/// Returns the hash of the transaction.
#[inline]
pub fn hash(&self) -> &B256 {
self.transaction().hash()
}
/// Returns the nonce of the transaction.
#[inline]
pub fn nonce(&self) -> u64 {
self.transaction().nonce()
}
}
/// Provides support for validating transaction at any given state of the chain
pub trait TransactionValidator: Debug + Send + Sync {
/// The transaction type to validate.
type Transaction: PoolTransaction;
/// Validates the transaction and returns a [`TransactionValidationOutcome`] describing the
/// validity of the given transaction.
///
/// This will be used by the transaction-pool to check whether the transaction should be
/// inserted into the pool or discarded right away.
///
/// Implementers of this trait must ensure that the transaction is well-formed, i.e. that it
/// complies at least all static constraints, which includes checking for:
///
/// * chain id
/// * gas limit
/// * max cost
/// * nonce >= next nonce of the sender
/// * ...
///
/// See [`InvalidTransactionError`](reth_primitives_traits::transaction::error::InvalidTransactionError) for common
/// errors variants.
///
/// The transaction pool makes no additional assumptions about the validity of the transaction
/// at the time of this call before it inserts it into the pool. However, the validity of
/// this transaction is still subject to future (dynamic) changes enforced by the pool, for
/// example nonce or balance changes. Hence, any validation checks must be applied in this
/// function.
///
/// See [`TransactionValidationTaskExecutor`] for a reference implementation.
fn validate_transaction(
&self,
origin: TransactionOrigin,
transaction: Self::Transaction,
) -> impl Future<Output = TransactionValidationOutcome<Self::Transaction>> + Send;
/// Validates a batch of transactions.
///
/// Must return all outcomes for the given transactions in the same order.
///
/// See also [`Self::validate_transaction`].
fn validate_transactions(
&self,
transactions: Vec<(TransactionOrigin, Self::Transaction)>,
) -> impl Future<Output = Vec<TransactionValidationOutcome<Self::Transaction>>> + Send {
futures_util::future::join_all(
transactions.into_iter().map(|(origin, tx)| self.validate_transaction(origin, tx)),
)
}
/// Validates a batch of transactions with that given origin.
///
/// Must return all outcomes for the given transactions in the same order.
///
/// See also [`Self::validate_transaction`].
fn validate_transactions_with_origin(
&self,
origin: TransactionOrigin,
transactions: impl IntoIterator<Item = Self::Transaction> + Send,
) -> impl Future<Output = Vec<TransactionValidationOutcome<Self::Transaction>>> + Send {
let futures = transactions.into_iter().map(|tx| self.validate_transaction(origin, tx));
futures_util::future::join_all(futures)
}
/// Invoked when the head block changes.
///
/// This can be used to update fork specific values (timestamp).
fn on_new_head_block<B>(&self, _new_tip_block: &SealedBlock<B>)
where
B: Block,
{
}
}
impl<A, B> TransactionValidator for Either<A, B>
where
A: TransactionValidator,
B: TransactionValidator<Transaction = A::Transaction>,
{
type Transaction = A::Transaction;
async fn validate_transaction(
&self,
origin: TransactionOrigin,
transaction: Self::Transaction,
) -> TransactionValidationOutcome<Self::Transaction> {
match self {
Self::Left(v) => v.validate_transaction(origin, transaction).await,
Self::Right(v) => v.validate_transaction(origin, transaction).await,
}
}
async fn validate_transactions(
&self,
transactions: Vec<(TransactionOrigin, Self::Transaction)>,
) -> Vec<TransactionValidationOutcome<Self::Transaction>> {
match self {
Self::Left(v) => v.validate_transactions(transactions).await,
Self::Right(v) => v.validate_transactions(transactions).await,
}
}
async fn validate_transactions_with_origin(
&self,
origin: TransactionOrigin,
transactions: impl IntoIterator<Item = Self::Transaction> + Send,
) -> Vec<TransactionValidationOutcome<Self::Transaction>> {
match self {
Self::Left(v) => v.validate_transactions_with_origin(origin, transactions).await,
Self::Right(v) => v.validate_transactions_with_origin(origin, transactions).await,
}
}
fn on_new_head_block<Bl>(&self, new_tip_block: &SealedBlock<Bl>)
where
Bl: Block,
{
match self {
Self::Left(v) => v.on_new_head_block(new_tip_block),
Self::Right(v) => v.on_new_head_block(new_tip_block),
}
}
}
/// A valid transaction in the pool.
///
/// This is used as the internal representation of a transaction inside the pool.
///
/// For EIP-4844 blob transactions this will _not_ contain the blob sidecar which is stored
/// separately in the [`BlobStore`](crate::blobstore::BlobStore).
pub struct ValidPoolTransaction<T: PoolTransaction> {
/// The transaction
pub transaction: T,
/// The identifier for this transaction.
pub transaction_id: TransactionId,
/// Whether it is allowed to propagate the transaction.
pub propagate: bool,
/// Timestamp when this was added to the pool.
pub timestamp: Instant,
/// Where this transaction originated from.
pub origin: TransactionOrigin,
/// The sender ids of the 7702 transaction authorities.
pub authority_ids: Option<Vec<SenderId>>,
}
// === impl ValidPoolTransaction ===
impl<T: PoolTransaction> ValidPoolTransaction<T> {
/// Returns the hash of the transaction.
pub fn hash(&self) -> &TxHash {
self.transaction.hash()
}
/// Returns the type identifier of the transaction
pub fn tx_type(&self) -> u8 {
self.transaction.ty()
}
/// Returns the address of the sender
pub fn sender(&self) -> Address {
self.transaction.sender()
}
/// Returns a reference to the address of the sender
pub fn sender_ref(&self) -> &Address {
self.transaction.sender_ref()
}
/// Returns the recipient of the transaction if it is not a CREATE transaction.
pub fn to(&self) -> Option<Address> {
self.transaction.to()
}
/// Returns the internal identifier for the sender of this transaction
pub const fn sender_id(&self) -> SenderId {
self.transaction_id.sender
}
/// Returns the internal identifier for this transaction.
pub const fn id(&self) -> &TransactionId {
&self.transaction_id
}
/// Returns the length of the rlp encoded transaction
#[inline]
pub fn encoded_length(&self) -> usize {
self.transaction.encoded_length()
}
/// Returns the nonce set for this transaction.
pub fn nonce(&self) -> u64 {
self.transaction.nonce()
}
/// Returns the cost that this transaction is allowed to consume:
///
/// For EIP-1559 transactions: `max_fee_per_gas * gas_limit + tx_value`.
/// For legacy transactions: `gas_price * gas_limit + tx_value`.
pub fn cost(&self) -> &U256 {
self.transaction.cost()
}
/// Returns the EIP-4844 max blob fee the caller is willing to pay.
///
/// For non-EIP-4844 transactions, this returns [None].
pub fn max_fee_per_blob_gas(&self) -> Option<u128> {
self.transaction.max_fee_per_blob_gas()
}
/// Returns the EIP-1559 Max base fee the caller is willing to pay.
///
/// For legacy transactions this is `gas_price`.
pub fn max_fee_per_gas(&self) -> u128 {
self.transaction.max_fee_per_gas()
}
/// Returns the effective tip for this transaction.
///
/// For EIP-1559 transactions: `min(max_fee_per_gas - base_fee, max_priority_fee_per_gas)`.
/// For legacy transactions: `gas_price - base_fee`.
pub fn effective_tip_per_gas(&self, base_fee: u64) -> Option<u128> {
self.transaction.effective_tip_per_gas(base_fee)
}
/// Returns the max priority fee per gas if the transaction is an EIP-1559 transaction, and
/// otherwise returns the gas price.
pub fn priority_fee_or_price(&self) -> u128 {
self.transaction.priority_fee_or_price()
}
/// Maximum amount of gas that the transaction is allowed to consume.
pub fn gas_limit(&self) -> u64 {
self.transaction.gas_limit()
}
/// Whether the transaction originated locally.
pub const fn is_local(&self) -> bool {
self.origin.is_local()
}
/// Whether the transaction is an EIP-4844 blob transaction.
#[inline]
pub fn is_eip4844(&self) -> bool {
self.transaction.is_eip4844()
}
/// The heap allocated size of this transaction.
pub(crate) fn size(&self) -> usize {
self.transaction.size()
}
/// Returns the [`SignedAuthorization`] list of the transaction.
///
/// Returns `None` if this transaction is not EIP-7702.
pub fn authorization_list(&self) -> Option<&[SignedAuthorization]> {
self.transaction.authorization_list()
}
/// Returns the number of blobs of [`SignedAuthorization`] in this transactions
///
/// This is convenience function for `len(authorization_list)`.
///
/// Returns `None` for non-eip7702 transactions.
pub fn authorization_count(&self) -> Option<u64> {
self.transaction.authorization_count()
}
/// EIP-4844 blob transactions and normal transactions are treated as mutually exclusive per
/// account.
///
/// Returns true if the transaction is an EIP-4844 blob transaction and the other is not, or
/// vice versa.
#[inline]
pub(crate) fn tx_type_conflicts_with(&self, other: &Self) -> bool {
self.is_eip4844() != other.is_eip4844()
}
/// Converts to this type into the consensus transaction of the pooled transaction.
///
/// Note: this takes `&self` since indented usage is via `Arc<Self>`.
pub fn to_consensus(&self) -> Recovered<T::Consensus> {
self.transaction.clone_into_consensus()
}
/// Determines whether a candidate transaction (`maybe_replacement`) is underpriced compared to
/// an existing transaction in the pool.
///
/// A transaction is considered underpriced if it doesn't meet the required fee bump threshold.
/// This applies to both standard gas fees and, for blob-carrying transactions (EIP-4844),
/// the blob-specific fees.
#[inline]
pub(crate) fn is_underpriced(
&self,
maybe_replacement: &Self,
price_bumps: &PriceBumpConfig,
) -> bool {
// Retrieve the required price bump percentage for this type of transaction.
//
// The bump is different for EIP-4844 and other transactions. See `PriceBumpConfig`.
let price_bump = price_bumps.price_bump(self.tx_type());
// Check if the max fee per gas is underpriced.
if maybe_replacement.max_fee_per_gas() < self.max_fee_per_gas() * (100 + price_bump) / 100 {
return true
}
let existing_max_priority_fee_per_gas =
self.transaction.max_priority_fee_per_gas().unwrap_or_default();
let replacement_max_priority_fee_per_gas =
maybe_replacement.transaction.max_priority_fee_per_gas().unwrap_or_default();
// Check max priority fee per gas (relevant for EIP-1559 transactions only)
if existing_max_priority_fee_per_gas != 0 &&
replacement_max_priority_fee_per_gas != 0 &&
replacement_max_priority_fee_per_gas <
existing_max_priority_fee_per_gas * (100 + price_bump) / 100
{
return true
}
// Check max blob fee per gas
if let Some(existing_max_blob_fee_per_gas) = self.transaction.max_fee_per_blob_gas() {
// This enforces that blob txs can only be replaced by blob txs
let replacement_max_blob_fee_per_gas =
maybe_replacement.transaction.max_fee_per_blob_gas().unwrap_or_default();
if replacement_max_blob_fee_per_gas <
existing_max_blob_fee_per_gas * (100 + price_bump) / 100
{
return true
}
}
false
}
}
#[cfg(test)]
impl<T: PoolTransaction> Clone for ValidPoolTransaction<T> {
fn clone(&self) -> Self {
Self {
transaction: self.transaction.clone(),
transaction_id: self.transaction_id,
propagate: self.propagate,
timestamp: self.timestamp,
origin: self.origin,
authority_ids: self.authority_ids.clone(),
}
}
}
impl<T: PoolTransaction> fmt::Debug for ValidPoolTransaction<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("ValidPoolTransaction")
.field("id", &self.transaction_id)
.field("pragate", &self.propagate)
.field("origin", &self.origin)
.field("hash", self.transaction.hash())
.field("tx", &self.transaction)
.finish()
}
}
/// Validation Errors that can occur during transaction validation.
#[derive(thiserror::Error, Debug)]
pub enum TransactionValidatorError {
/// Failed to communicate with the validation service.
#[error("validation service unreachable")]
ValidationServiceUnreachable,
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/transaction-pool/src/validate/task.rs | crates/transaction-pool/src/validate/task.rs | //! A validation service for transactions.
use crate::{
blobstore::BlobStore,
metrics::TxPoolValidatorMetrics,
validate::{EthTransactionValidatorBuilder, TransactionValidatorError},
EthTransactionValidator, PoolTransaction, TransactionOrigin, TransactionValidationOutcome,
TransactionValidator,
};
use futures_util::{lock::Mutex, StreamExt};
use reth_primitives_traits::{Block, SealedBlock};
use reth_tasks::TaskSpawner;
use std::{future::Future, pin::Pin, sync::Arc};
use tokio::{
sync,
sync::{mpsc, oneshot},
};
use tokio_stream::wrappers::ReceiverStream;
/// Represents a future outputting unit type and is sendable.
type ValidationFuture = Pin<Box<dyn Future<Output = ()> + Send>>;
/// Represents a stream of validation futures.
type ValidationStream = ReceiverStream<ValidationFuture>;
/// A service that performs validation jobs.
///
/// This listens for incoming validation jobs and executes them.
///
/// This should be spawned as a task: [`ValidationTask::run`]
#[derive(Clone)]
pub struct ValidationTask {
validation_jobs: Arc<Mutex<ValidationStream>>,
}
impl ValidationTask {
/// Creates a new cloneable task pair.
///
/// The sender sends new (transaction) validation tasks to an available validation task.
pub fn new() -> (ValidationJobSender, Self) {
Self::with_capacity(1)
}
/// Creates a new cloneable task pair with the given channel capacity.
pub fn with_capacity(capacity: usize) -> (ValidationJobSender, Self) {
let (tx, rx) = mpsc::channel(capacity);
let metrics = TxPoolValidatorMetrics::default();
(ValidationJobSender { tx, metrics }, Self::with_receiver(rx))
}
/// Creates a new task with the given receiver.
pub fn with_receiver(jobs: mpsc::Receiver<Pin<Box<dyn Future<Output = ()> + Send>>>) -> Self {
Self { validation_jobs: Arc::new(Mutex::new(ReceiverStream::new(jobs))) }
}
/// Executes all new validation jobs that come in.
///
/// This will run as long as the channel is alive and is expected to be spawned as a task.
pub async fn run(self) {
while let Some(task) = self.validation_jobs.lock().await.next().await {
task.await;
}
}
}
impl std::fmt::Debug for ValidationTask {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("ValidationTask").field("validation_jobs", &"...").finish()
}
}
/// A sender new type for sending validation jobs to [`ValidationTask`].
#[derive(Debug)]
pub struct ValidationJobSender {
tx: mpsc::Sender<Pin<Box<dyn Future<Output = ()> + Send>>>,
metrics: TxPoolValidatorMetrics,
}
impl ValidationJobSender {
/// Sends the given job to the validation task.
pub async fn send(
&self,
job: Pin<Box<dyn Future<Output = ()> + Send>>,
) -> Result<(), TransactionValidatorError> {
self.metrics.inflight_validation_jobs.increment(1);
let res = self
.tx
.send(job)
.await
.map_err(|_| TransactionValidatorError::ValidationServiceUnreachable);
self.metrics.inflight_validation_jobs.decrement(1);
res
}
}
/// A [`TransactionValidator`] implementation that validates ethereum transaction.
/// This validator is non-blocking, all validation work is done in a separate task.
#[derive(Debug)]
pub struct TransactionValidationTaskExecutor<V> {
/// The validator that will validate transactions on a separate task.
pub validator: Arc<V>,
/// The sender half to validation tasks that perform the actual validation.
pub to_validation_task: Arc<sync::Mutex<ValidationJobSender>>,
}
impl<V> Clone for TransactionValidationTaskExecutor<V> {
fn clone(&self) -> Self {
Self {
validator: self.validator.clone(),
to_validation_task: self.to_validation_task.clone(),
}
}
}
// === impl TransactionValidationTaskExecutor ===
impl TransactionValidationTaskExecutor<()> {
/// Convenience method to create a [`EthTransactionValidatorBuilder`]
pub fn eth_builder<Client>(client: Client) -> EthTransactionValidatorBuilder<Client> {
EthTransactionValidatorBuilder::new(client)
}
}
impl<V> TransactionValidationTaskExecutor<V> {
/// Maps the given validator to a new type.
pub fn map<F, T>(self, mut f: F) -> TransactionValidationTaskExecutor<T>
where
F: FnMut(V) -> T,
{
TransactionValidationTaskExecutor {
validator: Arc::new(f(Arc::into_inner(self.validator).unwrap())),
to_validation_task: self.to_validation_task,
}
}
/// Returns the validator.
pub fn validator(&self) -> &V {
&self.validator
}
}
impl<Client, Tx> TransactionValidationTaskExecutor<EthTransactionValidator<Client, Tx>> {
/// Creates a new instance for the given client
///
/// This will spawn a single validation tasks that performs the actual validation.
/// See [`TransactionValidationTaskExecutor::eth_with_additional_tasks`]
pub fn eth<T, S: BlobStore>(client: Client, blob_store: S, tasks: T) -> Self
where
T: TaskSpawner,
{
Self::eth_with_additional_tasks(client, blob_store, tasks, 0)
}
/// Creates a new instance for the given client
///
/// By default this will enable support for:
/// - shanghai
/// - eip1559
/// - eip2930
///
/// This will always spawn a validation task that performs the actual validation. It will spawn
/// `num_additional_tasks` additional tasks.
pub fn eth_with_additional_tasks<T, S: BlobStore>(
client: Client,
blob_store: S,
tasks: T,
num_additional_tasks: usize,
) -> Self
where
T: TaskSpawner,
{
EthTransactionValidatorBuilder::new(client)
.with_additional_tasks(num_additional_tasks)
.build_with_tasks::<Tx, T, S>(tasks, blob_store)
}
}
impl<V> TransactionValidationTaskExecutor<V> {
/// Creates a new executor instance with the given validator for transaction validation.
///
/// Initializes the executor with the provided validator and sets up communication for
/// validation tasks.
pub fn new(validator: V) -> Self {
let (tx, _) = ValidationTask::new();
Self { validator: Arc::new(validator), to_validation_task: Arc::new(sync::Mutex::new(tx)) }
}
}
impl<V> TransactionValidator for TransactionValidationTaskExecutor<V>
where
V: TransactionValidator + 'static,
{
type Transaction = <V as TransactionValidator>::Transaction;
async fn validate_transaction(
&self,
origin: TransactionOrigin,
transaction: Self::Transaction,
) -> TransactionValidationOutcome<Self::Transaction> {
let hash = *transaction.hash();
let (tx, rx) = oneshot::channel();
{
let res = {
let to_validation_task = self.to_validation_task.clone();
let validator = self.validator.clone();
let fut = Box::pin(async move {
let res = validator.validate_transaction(origin, transaction).await;
let _ = tx.send(res);
});
let to_validation_task = to_validation_task.lock().await;
to_validation_task.send(fut).await
};
if res.is_err() {
return TransactionValidationOutcome::Error(
hash,
Box::new(TransactionValidatorError::ValidationServiceUnreachable),
);
}
}
match rx.await {
Ok(res) => res,
Err(_) => TransactionValidationOutcome::Error(
hash,
Box::new(TransactionValidatorError::ValidationServiceUnreachable),
),
}
}
async fn validate_transactions(
&self,
transactions: Vec<(TransactionOrigin, Self::Transaction)>,
) -> Vec<TransactionValidationOutcome<Self::Transaction>> {
let hashes: Vec<_> = transactions.iter().map(|(_, tx)| *tx.hash()).collect();
let (tx, rx) = oneshot::channel();
{
let res = {
let to_validation_task = self.to_validation_task.clone();
let validator = self.validator.clone();
let fut = Box::pin(async move {
let res = validator.validate_transactions(transactions).await;
let _ = tx.send(res);
});
let to_validation_task = to_validation_task.lock().await;
to_validation_task.send(fut).await
};
if res.is_err() {
return hashes
.into_iter()
.map(|hash| {
TransactionValidationOutcome::Error(
hash,
Box::new(TransactionValidatorError::ValidationServiceUnreachable),
)
})
.collect();
}
}
match rx.await {
Ok(res) => res,
Err(_) => hashes
.into_iter()
.map(|hash| {
TransactionValidationOutcome::Error(
hash,
Box::new(TransactionValidatorError::ValidationServiceUnreachable),
)
})
.collect(),
}
}
async fn validate_transactions_with_origin(
&self,
origin: TransactionOrigin,
transactions: impl IntoIterator<Item = Self::Transaction> + Send,
) -> Vec<TransactionValidationOutcome<Self::Transaction>> {
self.validate_transactions(transactions.into_iter().map(|tx| (origin, tx)).collect()).await
}
fn on_new_head_block<B>(&self, new_tip_block: &SealedBlock<B>)
where
B: Block,
{
self.validator.on_new_head_block(new_tip_block)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/transaction-pool/src/validate/constants.rs | crates/transaction-pool/src/validate/constants.rs | /// [`TX_SLOT_BYTE_SIZE`] is used to calculate how many data slots a single transaction
/// takes up based on its byte size. The slots are used as `DoS` protection, ensuring
/// that validating a new transaction remains a constant operation (in reality
/// O(maxslots), where max slots are 4 currently).
pub const TX_SLOT_BYTE_SIZE: usize = 32 * 1024;
/// [`DEFAULT_MAX_TX_INPUT_BYTES`] is the default maximum size a single transaction can have. This
/// field has non-trivial consequences: larger transactions are significantly harder and
/// more expensive to propagate; larger transactions also take more resources
/// to validate whether they fit into the pool or not. Default is 4 times [`TX_SLOT_BYTE_SIZE`],
/// which defaults to 32 KiB, so 128 KiB.
pub const DEFAULT_MAX_TX_INPUT_BYTES: usize = 4 * TX_SLOT_BYTE_SIZE; // 128KB
/// Maximum bytecode to permit for a contract.
pub const MAX_CODE_BYTE_SIZE: usize = revm_primitives::eip170::MAX_CODE_SIZE;
/// Maximum initcode to permit in a creation transaction and create instructions.
pub const MAX_INIT_CODE_BYTE_SIZE: usize = revm_primitives::eip3860::MAX_INITCODE_SIZE;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/transaction-pool/src/test_utils/mod.rs | crates/transaction-pool/src/test_utils/mod.rs | //! Internal helpers for testing.
use crate::{blobstore::InMemoryBlobStore, noop::MockTransactionValidator, Pool, PoolConfig};
use std::ops::Deref;
mod tx_gen;
pub use tx_gen::*;
mod mock;
pub use mock::*;
mod pool;
mod okvalidator;
pub use okvalidator::*;
/// A [Pool] used for testing
pub type TestPool =
Pool<MockTransactionValidator<MockTransaction>, MockOrdering, InMemoryBlobStore>;
/// Structure encapsulating a [`TestPool`] used for testing
#[derive(Debug, Clone)]
pub struct TestPoolBuilder(TestPool);
impl Default for TestPoolBuilder {
fn default() -> Self {
Self(Pool::new(
MockTransactionValidator::default(),
MockOrdering::default(),
InMemoryBlobStore::default(),
Default::default(),
))
}
}
impl TestPoolBuilder {
/// Returns a new [`TestPoolBuilder`] with a custom validator used for testing purposes
pub fn with_validator(self, validator: MockTransactionValidator<MockTransaction>) -> Self {
Self(Pool::new(
validator,
MockOrdering::default(),
self.pool.blob_store().clone(),
self.pool.config().clone(),
))
}
/// Returns a new [`TestPoolBuilder`] with a custom ordering used for testing purposes
pub fn with_ordering(self, ordering: MockOrdering) -> Self {
Self(Pool::new(
self.pool.validator().clone(),
ordering,
self.pool.blob_store().clone(),
self.pool.config().clone(),
))
}
/// Returns a new [`TestPoolBuilder`] with a custom blob store used for testing purposes
pub fn with_blob_store(self, blob_store: InMemoryBlobStore) -> Self {
Self(Pool::new(
self.pool.validator().clone(),
MockOrdering::default(),
blob_store,
self.pool.config().clone(),
))
}
/// Returns a new [`TestPoolBuilder`] with a custom configuration used for testing purposes
pub fn with_config(self, config: PoolConfig) -> Self {
Self(Pool::new(
self.pool.validator().clone(),
MockOrdering::default(),
self.pool.blob_store().clone(),
config,
))
}
}
impl From<TestPoolBuilder> for TestPool {
fn from(wrapper: TestPoolBuilder) -> Self {
wrapper.0
}
}
impl Deref for TestPoolBuilder {
type Target = TestPool;
fn deref(&self) -> &Self::Target {
&self.0
}
}
/// Returns a new [Pool] with default field values used for testing purposes
pub fn testing_pool() -> TestPool {
TestPoolBuilder::default().into()
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/transaction-pool/src/test_utils/tx_gen.rs | crates/transaction-pool/src/test_utils/tx_gen.rs | use crate::{EthPooledTransaction, PoolTransaction};
use alloy_consensus::{SignableTransaction, TxEip1559, TxEip4844, TxLegacy};
use alloy_eips::{eip1559::MIN_PROTOCOL_BASE_FEE, eip2718::Encodable2718, eip2930::AccessList};
use alloy_primitives::{Address, Bytes, TxKind, B256, U256};
use rand::{Rng, RngCore};
use reth_chainspec::MAINNET;
use reth_ethereum_primitives::{Transaction, TransactionSigned};
use reth_primitives_traits::{crypto::secp256k1::sign_message, SignedTransaction};
/// A generator for transactions for testing purposes.
#[derive(Debug)]
pub struct TransactionGenerator<R> {
/// The random number generator used for generating keys and selecting signers.
pub rng: R,
/// The set of signer keys available for transaction generation.
pub signer_keys: Vec<B256>,
/// The base fee for transactions.
pub base_fee: u128,
/// The gas limit for transactions.
pub gas_limit: u64,
}
impl<R: RngCore> TransactionGenerator<R> {
/// Initializes the generator with 10 random signers
pub fn new(rng: R) -> Self {
Self::with_num_signers(rng, 10)
}
/// Generates random signers
pub fn with_num_signers(rng: R, num_signers: usize) -> Self {
Self {
rng,
signer_keys: (0..num_signers).map(|_| B256::random()).collect(),
base_fee: MIN_PROTOCOL_BASE_FEE as u128,
gas_limit: 300_000,
}
}
/// Adds a new signer to the set
pub fn push_signer(&mut self, signer: B256) -> &mut Self {
self.signer_keys.push(signer);
self
}
/// Sets the default gas limit for all generated transactions
pub const fn set_gas_limit(&mut self, gas_limit: u64) -> &mut Self {
self.gas_limit = gas_limit;
self
}
/// Sets the default gas limit for all generated transactions
pub const fn with_gas_limit(mut self, gas_limit: u64) -> Self {
self.gas_limit = gas_limit;
self
}
/// Sets the base fee for the generated transactions
pub const fn set_base_fee(&mut self, base_fee: u64) -> &mut Self {
self.base_fee = base_fee as u128;
self
}
/// Sets the base fee for the generated transactions
pub const fn with_base_fee(mut self, base_fee: u64) -> Self {
self.base_fee = base_fee as u128;
self
}
/// Adds the given signers to the set.
pub fn extend_signers(&mut self, signers: impl IntoIterator<Item = B256>) -> &mut Self {
self.signer_keys.extend(signers);
self
}
/// Returns a random signer from the set
fn rng_signer(&mut self) -> B256 {
let idx = self.rng.random_range(0..self.signer_keys.len());
self.signer_keys[idx]
}
/// Creates a new transaction with a random signer
pub fn transaction(&mut self) -> TransactionBuilder {
TransactionBuilder::default()
.signer(self.rng_signer())
.max_fee_per_gas(self.base_fee)
.max_priority_fee_per_gas(self.base_fee)
.gas_limit(self.gas_limit)
}
/// Creates a new transaction with a random signer
pub fn gen_eip1559(&mut self) -> TransactionSigned {
self.transaction().into_eip1559()
}
/// Creates a new transaction with a random signer
pub fn gen_eip4844(&mut self) -> TransactionSigned {
self.transaction().into_eip4844()
}
/// Generates and returns a pooled EIP-1559 transaction with a random signer.
pub fn gen_eip1559_pooled(&mut self) -> EthPooledTransaction {
EthPooledTransaction::try_from_consensus(
SignedTransaction::try_into_recovered(self.gen_eip1559()).unwrap(),
)
.unwrap()
}
/// Generates and returns a pooled EIP-4844 transaction with a random signer.
pub fn gen_eip4844_pooled(&mut self) -> EthPooledTransaction {
let tx = self.gen_eip4844().try_into_recovered().unwrap();
let encoded_length = tx.encode_2718_len();
EthPooledTransaction::new(tx, encoded_length)
}
}
/// A Builder type to configure and create a transaction.
#[derive(Debug)]
pub struct TransactionBuilder {
/// The signer used to sign the transaction.
pub signer: B256,
/// The chain ID on which the transaction will be executed.
pub chain_id: u64,
/// The nonce value for the transaction to prevent replay attacks.
pub nonce: u64,
/// The maximum amount of gas units that the transaction can consume.
pub gas_limit: u64,
/// The maximum fee per gas unit that the sender is willing to pay.
pub max_fee_per_gas: u128,
/// The maximum priority fee per gas unit that the sender is willing to pay for faster
/// processing.
pub max_priority_fee_per_gas: u128,
/// The recipient or contract address of the transaction.
pub to: TxKind,
/// The value to be transferred in the transaction.
pub value: U256,
/// The list of addresses and storage keys that the transaction can access.
pub access_list: AccessList,
/// The input data for the transaction, typically containing function parameters for contract
/// calls.
pub input: Bytes,
}
impl TransactionBuilder {
/// Converts the transaction builder into a legacy transaction format.
pub fn into_legacy(self) -> TransactionSigned {
Self::signed(
TxLegacy {
chain_id: Some(self.chain_id),
nonce: self.nonce,
gas_limit: self.gas_limit,
gas_price: self.max_fee_per_gas,
to: self.to,
value: self.value,
input: self.input,
}
.into(),
self.signer,
)
}
/// Converts the transaction builder into a transaction format using EIP-1559.
pub fn into_eip1559(self) -> TransactionSigned {
Self::signed(
TxEip1559 {
chain_id: self.chain_id,
nonce: self.nonce,
gas_limit: self.gas_limit,
max_fee_per_gas: self.max_fee_per_gas,
max_priority_fee_per_gas: self.max_priority_fee_per_gas,
to: self.to,
value: self.value,
access_list: self.access_list,
input: self.input,
}
.into(),
self.signer,
)
}
/// Converts the transaction builder into a transaction format using EIP-4844.
pub fn into_eip4844(self) -> TransactionSigned {
Self::signed(
TxEip4844 {
chain_id: self.chain_id,
nonce: self.nonce,
gas_limit: self.gas_limit,
max_fee_per_gas: self.max_fee_per_gas,
max_priority_fee_per_gas: self.max_priority_fee_per_gas,
to: match self.to {
TxKind::Call(to) => to,
TxKind::Create => Address::default(),
},
value: self.value,
access_list: self.access_list,
input: self.input,
blob_versioned_hashes: Default::default(),
max_fee_per_blob_gas: Default::default(),
}
.into(),
self.signer,
)
}
/// Signs the provided transaction using the specified signer and returns a signed transaction.
fn signed(transaction: Transaction, signer: B256) -> TransactionSigned {
let signature = sign_message(signer, transaction.signature_hash()).unwrap();
TransactionSigned::new_unhashed(transaction, signature)
}
/// Sets the signer for the transaction builder.
pub const fn signer(mut self, signer: B256) -> Self {
self.signer = signer;
self
}
/// Sets the gas limit for the transaction builder.
pub const fn gas_limit(mut self, gas_limit: u64) -> Self {
self.gas_limit = gas_limit;
self
}
/// Sets the nonce for the transaction builder.
pub const fn nonce(mut self, nonce: u64) -> Self {
self.nonce = nonce;
self
}
/// Increments the nonce value of the transaction builder by 1.
pub const fn inc_nonce(mut self) -> Self {
self.nonce += 1;
self
}
/// Decrements the nonce value of the transaction builder by 1, avoiding underflow.
pub const fn decr_nonce(mut self) -> Self {
self.nonce = self.nonce.saturating_sub(1);
self
}
/// Sets the maximum fee per gas for the transaction builder.
pub const fn max_fee_per_gas(mut self, max_fee_per_gas: u128) -> Self {
self.max_fee_per_gas = max_fee_per_gas;
self
}
/// Sets the maximum priority fee per gas for the transaction builder.
pub const fn max_priority_fee_per_gas(mut self, max_priority_fee_per_gas: u128) -> Self {
self.max_priority_fee_per_gas = max_priority_fee_per_gas;
self
}
/// Sets the recipient or contract address for the transaction builder.
pub const fn to(mut self, to: Address) -> Self {
self.to = TxKind::Call(to);
self
}
/// Sets the value to be transferred in the transaction.
pub fn value(mut self, value: u128) -> Self {
self.value = U256::from(value);
self
}
/// Sets the access list for the transaction builder.
pub fn access_list(mut self, access_list: AccessList) -> Self {
self.access_list = access_list;
self
}
/// Sets the transaction input data.
pub fn input(mut self, input: impl Into<Bytes>) -> Self {
self.input = input.into();
self
}
/// Sets the chain ID for the transaction.
pub const fn chain_id(mut self, chain_id: u64) -> Self {
self.chain_id = chain_id;
self
}
/// Sets the chain ID for the transaction, mutable reference version.
pub const fn set_chain_id(&mut self, chain_id: u64) -> &mut Self {
self.chain_id = chain_id;
self
}
/// Sets the nonce for the transaction, mutable reference version.
pub const fn set_nonce(&mut self, nonce: u64) -> &mut Self {
self.nonce = nonce;
self
}
/// Sets the gas limit for the transaction, mutable reference version.
pub const fn set_gas_limit(&mut self, gas_limit: u64) -> &mut Self {
self.gas_limit = gas_limit;
self
}
/// Sets the maximum fee per gas for the transaction, mutable reference version.
pub const fn set_max_fee_per_gas(&mut self, max_fee_per_gas: u128) -> &mut Self {
self.max_fee_per_gas = max_fee_per_gas;
self
}
/// Sets the maximum priority fee per gas for the transaction, mutable reference version.
pub const fn set_max_priority_fee_per_gas(
&mut self,
max_priority_fee_per_gas: u128,
) -> &mut Self {
self.max_priority_fee_per_gas = max_priority_fee_per_gas;
self
}
/// Sets the recipient or contract address for the transaction, mutable reference version.
pub fn set_to(&mut self, to: Address) -> &mut Self {
self.to = to.into();
self
}
/// Sets the value to be transferred in the transaction, mutable reference version.
pub fn set_value(&mut self, value: u128) -> &mut Self {
self.value = U256::from(value);
self
}
/// Sets the access list for the transaction, mutable reference version.
pub fn set_access_list(&mut self, access_list: AccessList) -> &mut Self {
self.access_list = access_list;
self
}
/// Sets the signer for the transaction, mutable reference version.
pub const fn set_signer(&mut self, signer: B256) -> &mut Self {
self.signer = signer;
self
}
/// Sets the transaction input data, mutable reference version.
pub fn set_input(&mut self, input: impl Into<Bytes>) -> &mut Self {
self.input = input.into();
self
}
}
impl Default for TransactionBuilder {
fn default() -> Self {
Self {
signer: B256::random(),
chain_id: MAINNET.chain.id(),
nonce: 0,
gas_limit: 0,
max_fee_per_gas: 0,
max_priority_fee_per_gas: 0,
to: Default::default(),
value: Default::default(),
access_list: Default::default(),
input: Default::default(),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use rand::rng;
#[test]
fn test_generate_transaction() {
let rng = rng();
let mut tx_gen = TransactionGenerator::new(rng);
let _tx = tx_gen.transaction().into_legacy();
let _tx = tx_gen.transaction().into_eip1559();
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/transaction-pool/src/test_utils/mock.rs | crates/transaction-pool/src/test_utils/mock.rs | //! Mock types.
use crate::{
identifier::{SenderIdentifiers, TransactionId},
pool::txpool::TxPool,
traits::TransactionOrigin,
CoinbaseTipOrdering, EthBlobTransactionSidecar, EthPoolTransaction, PoolTransaction,
ValidPoolTransaction,
};
use alloy_consensus::{
constants::{
EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID,
LEGACY_TX_TYPE_ID,
},
EthereumTxEnvelope, Signed, TxEip1559, TxEip2930, TxEip4844, TxEip4844Variant, TxEip7702,
TxLegacy, TxType, Typed2718,
};
use alloy_eips::{
eip1559::MIN_PROTOCOL_BASE_FEE,
eip2930::AccessList,
eip4844::{BlobTransactionSidecar, BlobTransactionValidationError, DATA_GAS_PER_BLOB},
eip7594::BlobTransactionSidecarVariant,
eip7702::SignedAuthorization,
};
use alloy_primitives::{Address, Bytes, ChainId, Signature, TxHash, TxKind, B256, U256};
use paste::paste;
use rand::{distr::Uniform, prelude::Distribution};
use reth_ethereum_primitives::{PooledTransactionVariant, Transaction, TransactionSigned};
use reth_primitives_traits::{
transaction::error::TryFromRecoveredTransactionError, InMemorySize, Recovered,
SignedTransaction,
};
use alloy_consensus::error::ValueError;
use alloy_eips::eip4844::env_settings::KzgSettings;
use rand::distr::weighted::WeightedIndex;
use std::{ops::Range, sync::Arc, time::Instant, vec::IntoIter};
/// A transaction pool implementation using [`MockOrdering`] for transaction ordering.
///
/// This type is an alias for [`TxPool<MockOrdering>`].
pub type MockTxPool = TxPool<MockOrdering>;
/// A validated transaction in the transaction pool, using [`MockTransaction`] as the transaction
/// type.
///
/// This type is an alias for [`ValidPoolTransaction<MockTransaction>`].
pub type MockValidTx = ValidPoolTransaction<MockTransaction>;
/// Create an empty `TxPool`
pub fn mock_tx_pool() -> MockTxPool {
MockTxPool::new(Default::default(), Default::default())
}
/// Sets the value for the field
macro_rules! set_value {
($this:ident => $field:ident) => {
let new_value = $field;
match $this {
MockTransaction::Legacy { ref mut $field, .. } |
MockTransaction::Eip1559 { ref mut $field, .. } |
MockTransaction::Eip4844 { ref mut $field, .. } |
MockTransaction::Eip2930 { ref mut $field, .. } |
MockTransaction::Eip7702 { ref mut $field, .. } => {
*$field = new_value;
}
}
// Ensure the tx cost is always correct after each mutation.
$this.update_cost();
};
}
/// Gets the value for the field
macro_rules! get_value {
($this:tt => $field:ident) => {
match $this {
MockTransaction::Legacy { $field, .. } |
MockTransaction::Eip1559 { $field, .. } |
MockTransaction::Eip4844 { $field, .. } |
MockTransaction::Eip2930 { $field, .. } |
MockTransaction::Eip7702 { $field, .. } => $field,
}
};
}
// Generates all setters and getters
macro_rules! make_setters_getters {
($($name:ident => $t:ty);*) => {
paste! {$(
/// Sets the value of the specified field.
pub fn [<set_ $name>](&mut self, $name: $t) -> &mut Self {
set_value!(self => $name);
self
}
/// Sets the value of the specified field using a fluent interface.
pub fn [<with_ $name>](mut self, $name: $t) -> Self {
set_value!(self => $name);
self
}
/// Gets the value of the specified field.
pub const fn [<get_ $name>](&self) -> &$t {
get_value!(self => $name)
}
)*}
};
}
/// A Bare transaction type used for testing.
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum MockTransaction {
/// Legacy transaction type.
Legacy {
/// The chain id of the transaction.
chain_id: Option<ChainId>,
/// The hash of the transaction.
hash: B256,
/// The sender's address.
sender: Address,
/// The transaction nonce.
nonce: u64,
/// The gas price for the transaction.
gas_price: u128,
/// The gas limit for the transaction.
gas_limit: u64,
/// The transaction's destination.
to: TxKind,
/// The value of the transaction.
value: U256,
/// The transaction input data.
input: Bytes,
/// The size of the transaction, returned in the implementation of [`PoolTransaction`].
size: usize,
/// The cost of the transaction, returned in the implementation of [`PoolTransaction`].
cost: U256,
},
/// EIP-2930 transaction type.
Eip2930 {
/// The chain id of the transaction.
chain_id: ChainId,
/// The hash of the transaction.
hash: B256,
/// The sender's address.
sender: Address,
/// The transaction nonce.
nonce: u64,
/// The transaction's destination.
to: TxKind,
/// The gas limit for the transaction.
gas_limit: u64,
/// The transaction input data.
input: Bytes,
/// The value of the transaction.
value: U256,
/// The gas price for the transaction.
gas_price: u128,
/// The access list associated with the transaction.
access_list: AccessList,
/// The size of the transaction, returned in the implementation of [`PoolTransaction`].
size: usize,
/// The cost of the transaction, returned in the implementation of [`PoolTransaction`].
cost: U256,
},
/// EIP-1559 transaction type.
Eip1559 {
/// The chain id of the transaction.
chain_id: ChainId,
/// The hash of the transaction.
hash: B256,
/// The sender's address.
sender: Address,
/// The transaction nonce.
nonce: u64,
/// The maximum fee per gas for the transaction.
max_fee_per_gas: u128,
/// The maximum priority fee per gas for the transaction.
max_priority_fee_per_gas: u128,
/// The gas limit for the transaction.
gas_limit: u64,
/// The transaction's destination.
to: TxKind,
/// The value of the transaction.
value: U256,
/// The access list associated with the transaction.
access_list: AccessList,
/// The transaction input data.
input: Bytes,
/// The size of the transaction, returned in the implementation of [`PoolTransaction`].
size: usize,
/// The cost of the transaction, returned in the implementation of [`PoolTransaction`].
cost: U256,
},
/// EIP-4844 transaction type.
Eip4844 {
/// The chain id of the transaction.
chain_id: ChainId,
/// The hash of the transaction.
hash: B256,
/// The sender's address.
sender: Address,
/// The transaction nonce.
nonce: u64,
/// The maximum fee per gas for the transaction.
max_fee_per_gas: u128,
/// The maximum priority fee per gas for the transaction.
max_priority_fee_per_gas: u128,
/// The maximum fee per blob gas for the transaction.
max_fee_per_blob_gas: u128,
/// The gas limit for the transaction.
gas_limit: u64,
/// The transaction's destination.
to: Address,
/// The value of the transaction.
value: U256,
/// The access list associated with the transaction.
access_list: AccessList,
/// The transaction input data.
input: Bytes,
/// The sidecar information for the transaction.
sidecar: BlobTransactionSidecarVariant,
/// The blob versioned hashes for the transaction.
blob_versioned_hashes: Vec<B256>,
/// The size of the transaction, returned in the implementation of [`PoolTransaction`].
size: usize,
/// The cost of the transaction, returned in the implementation of [`PoolTransaction`].
cost: U256,
},
/// EIP-7702 transaction type.
Eip7702 {
/// The chain id of the transaction.
chain_id: ChainId,
/// The hash of the transaction.
hash: B256,
/// The sender's address.
sender: Address,
/// The transaction nonce.
nonce: u64,
/// The maximum fee per gas for the transaction.
max_fee_per_gas: u128,
/// The maximum priority fee per gas for the transaction.
max_priority_fee_per_gas: u128,
/// The gas limit for the transaction.
gas_limit: u64,
/// The transaction's destination.
to: Address,
/// The value of the transaction.
value: U256,
/// The access list associated with the transaction.
access_list: AccessList,
/// The authorization list associated with the transaction.
authorization_list: Vec<SignedAuthorization>,
/// The transaction input data.
input: Bytes,
/// The size of the transaction, returned in the implementation of [`PoolTransaction`].
size: usize,
/// The cost of the transaction, returned in the implementation of [`PoolTransaction`].
cost: U256,
},
}
// === impl MockTransaction ===
impl MockTransaction {
make_setters_getters! {
nonce => u64;
hash => B256;
sender => Address;
gas_limit => u64;
value => U256;
input => Bytes;
size => usize
}
/// Returns a new legacy transaction with random address and hash and empty values
pub fn legacy() -> Self {
Self::Legacy {
chain_id: Some(1),
hash: B256::random(),
sender: Address::random(),
nonce: 0,
gas_price: 0,
gas_limit: 0,
to: Address::random().into(),
value: Default::default(),
input: Default::default(),
size: Default::default(),
cost: U256::ZERO,
}
}
/// Returns a new EIP2930 transaction with random address and hash and empty values
pub fn eip2930() -> Self {
Self::Eip2930 {
chain_id: 1,
hash: B256::random(),
sender: Address::random(),
nonce: 0,
to: Address::random().into(),
gas_limit: 0,
input: Bytes::new(),
value: Default::default(),
gas_price: 0,
access_list: Default::default(),
size: Default::default(),
cost: U256::ZERO,
}
}
/// Returns a new EIP1559 transaction with random address and hash and empty values
pub fn eip1559() -> Self {
Self::Eip1559 {
chain_id: 1,
hash: B256::random(),
sender: Address::random(),
nonce: 0,
max_fee_per_gas: MIN_PROTOCOL_BASE_FEE as u128,
max_priority_fee_per_gas: MIN_PROTOCOL_BASE_FEE as u128,
gas_limit: 0,
to: Address::random().into(),
value: Default::default(),
input: Bytes::new(),
access_list: Default::default(),
size: Default::default(),
cost: U256::ZERO,
}
}
/// Returns a new EIP7702 transaction with random address and hash and empty values
pub fn eip7702() -> Self {
Self::Eip7702 {
chain_id: 1,
hash: B256::random(),
sender: Address::random(),
nonce: 0,
max_fee_per_gas: MIN_PROTOCOL_BASE_FEE as u128,
max_priority_fee_per_gas: MIN_PROTOCOL_BASE_FEE as u128,
gas_limit: 0,
to: Address::random(),
value: Default::default(),
input: Bytes::new(),
access_list: Default::default(),
authorization_list: vec![],
size: Default::default(),
cost: U256::ZERO,
}
}
/// Returns a new EIP4844 transaction with random address and hash and empty values
pub fn eip4844() -> Self {
Self::Eip4844 {
chain_id: 1,
hash: B256::random(),
sender: Address::random(),
nonce: 0,
max_fee_per_gas: MIN_PROTOCOL_BASE_FEE as u128,
max_priority_fee_per_gas: MIN_PROTOCOL_BASE_FEE as u128,
max_fee_per_blob_gas: DATA_GAS_PER_BLOB as u128,
gas_limit: 0,
to: Address::random(),
value: Default::default(),
input: Bytes::new(),
access_list: Default::default(),
sidecar: BlobTransactionSidecarVariant::Eip4844(Default::default()),
blob_versioned_hashes: Default::default(),
size: Default::default(),
cost: U256::ZERO,
}
}
/// Returns a new EIP4844 transaction with a provided sidecar
pub fn eip4844_with_sidecar(sidecar: BlobTransactionSidecarVariant) -> Self {
let mut transaction = Self::eip4844();
if let Self::Eip4844 { sidecar: existing_sidecar, blob_versioned_hashes, .. } =
&mut transaction
{
*blob_versioned_hashes = sidecar.versioned_hashes().collect();
*existing_sidecar = sidecar;
}
transaction
}
/// Creates a new transaction with the given [`TxType`].
///
/// See the default constructors for each of the transaction types:
///
/// * [`MockTransaction::legacy`]
/// * [`MockTransaction::eip2930`]
/// * [`MockTransaction::eip1559`]
/// * [`MockTransaction::eip4844`]
pub fn new_from_type(tx_type: TxType) -> Self {
match tx_type {
TxType::Legacy => Self::legacy(),
TxType::Eip2930 => Self::eip2930(),
TxType::Eip1559 => Self::eip1559(),
TxType::Eip4844 => Self::eip4844(),
TxType::Eip7702 => Self::eip7702(),
}
}
/// Sets the max fee per blob gas for EIP-4844 transactions,
pub const fn with_blob_fee(mut self, val: u128) -> Self {
self.set_blob_fee(val);
self
}
/// Sets the max fee per blob gas for EIP-4844 transactions,
pub const fn set_blob_fee(&mut self, val: u128) -> &mut Self {
if let Self::Eip4844 { max_fee_per_blob_gas, .. } = self {
*max_fee_per_blob_gas = val;
}
self
}
/// Sets the priority fee for dynamic fee transactions (EIP-1559 and EIP-4844)
pub const fn set_priority_fee(&mut self, val: u128) -> &mut Self {
if let Self::Eip1559 { max_priority_fee_per_gas, .. } |
Self::Eip4844 { max_priority_fee_per_gas, .. } = self
{
*max_priority_fee_per_gas = val;
}
self
}
/// Sets the priority fee for dynamic fee transactions (EIP-1559 and EIP-4844)
pub const fn with_priority_fee(mut self, val: u128) -> Self {
self.set_priority_fee(val);
self
}
/// Gets the priority fee for dynamic fee transactions (EIP-1559 and EIP-4844)
pub const fn get_priority_fee(&self) -> Option<u128> {
match self {
Self::Eip1559 { max_priority_fee_per_gas, .. } |
Self::Eip4844 { max_priority_fee_per_gas, .. } |
Self::Eip7702 { max_priority_fee_per_gas, .. } => Some(*max_priority_fee_per_gas),
_ => None,
}
}
/// Sets the max fee for dynamic fee transactions (EIP-1559 and EIP-4844)
pub const fn set_max_fee(&mut self, val: u128) -> &mut Self {
if let Self::Eip1559 { max_fee_per_gas, .. } |
Self::Eip4844 { max_fee_per_gas, .. } |
Self::Eip7702 { max_fee_per_gas, .. } = self
{
*max_fee_per_gas = val;
}
self
}
/// Sets the max fee for dynamic fee transactions (EIP-1559 and EIP-4844)
pub const fn with_max_fee(mut self, val: u128) -> Self {
self.set_max_fee(val);
self
}
/// Gets the max fee for dynamic fee transactions (EIP-1559 and EIP-4844)
pub const fn get_max_fee(&self) -> Option<u128> {
match self {
Self::Eip1559 { max_fee_per_gas, .. } |
Self::Eip4844 { max_fee_per_gas, .. } |
Self::Eip7702 { max_fee_per_gas, .. } => Some(*max_fee_per_gas),
_ => None,
}
}
/// Sets the access list for transactions supporting EIP-1559, EIP-4844, and EIP-2930.
pub fn set_accesslist(&mut self, list: AccessList) -> &mut Self {
match self {
Self::Legacy { .. } => {}
Self::Eip1559 { access_list: accesslist, .. } |
Self::Eip4844 { access_list: accesslist, .. } |
Self::Eip2930 { access_list: accesslist, .. } |
Self::Eip7702 { access_list: accesslist, .. } => {
*accesslist = list;
}
}
self
}
/// Sets the authorization list for EIP-7702 transactions.
pub fn set_authorization_list(&mut self, list: Vec<SignedAuthorization>) -> &mut Self {
if let Self::Eip7702 { authorization_list, .. } = self {
*authorization_list = list;
}
self
}
/// Sets the gas price for the transaction.
pub const fn set_gas_price(&mut self, val: u128) -> &mut Self {
match self {
Self::Legacy { gas_price, .. } | Self::Eip2930 { gas_price, .. } => {
*gas_price = val;
}
Self::Eip1559 { max_fee_per_gas, max_priority_fee_per_gas, .. } |
Self::Eip4844 { max_fee_per_gas, max_priority_fee_per_gas, .. } |
Self::Eip7702 { max_fee_per_gas, max_priority_fee_per_gas, .. } => {
*max_fee_per_gas = val;
*max_priority_fee_per_gas = val;
}
}
self
}
/// Sets the gas price for the transaction.
pub const fn with_gas_price(mut self, val: u128) -> Self {
match self {
Self::Legacy { ref mut gas_price, .. } | Self::Eip2930 { ref mut gas_price, .. } => {
*gas_price = val;
}
Self::Eip1559 { ref mut max_fee_per_gas, ref mut max_priority_fee_per_gas, .. } |
Self::Eip4844 { ref mut max_fee_per_gas, ref mut max_priority_fee_per_gas, .. } |
Self::Eip7702 { ref mut max_fee_per_gas, ref mut max_priority_fee_per_gas, .. } => {
*max_fee_per_gas = val;
*max_priority_fee_per_gas = val;
}
}
self
}
/// Gets the gas price for the transaction.
pub const fn get_gas_price(&self) -> u128 {
match self {
Self::Legacy { gas_price, .. } | Self::Eip2930 { gas_price, .. } => *gas_price,
Self::Eip1559 { max_fee_per_gas, .. } |
Self::Eip4844 { max_fee_per_gas, .. } |
Self::Eip7702 { max_fee_per_gas, .. } => *max_fee_per_gas,
}
}
/// Returns a clone with a decreased nonce
pub fn prev(&self) -> Self {
self.clone().with_hash(B256::random()).with_nonce(self.get_nonce() - 1)
}
/// Returns a clone with an increased nonce
pub fn next(&self) -> Self {
self.clone().with_hash(B256::random()).with_nonce(self.get_nonce() + 1)
}
/// Returns a clone with an increased nonce
pub fn skip(&self, skip: u64) -> Self {
self.clone().with_hash(B256::random()).with_nonce(self.get_nonce() + skip + 1)
}
/// Returns a clone with incremented nonce
pub fn inc_nonce(self) -> Self {
let nonce = self.get_nonce() + 1;
self.with_nonce(nonce)
}
/// Sets a new random hash
pub fn rng_hash(self) -> Self {
self.with_hash(B256::random())
}
/// Returns a new transaction with a higher gas price +1
pub fn inc_price(&self) -> Self {
self.inc_price_by(1)
}
/// Returns a new transaction with a higher gas price
pub fn inc_price_by(&self, value: u128) -> Self {
self.clone().with_gas_price(self.get_gas_price().checked_add(value).unwrap())
}
/// Returns a new transaction with a lower gas price -1
pub fn decr_price(&self) -> Self {
self.decr_price_by(1)
}
/// Returns a new transaction with a lower gas price
pub fn decr_price_by(&self, value: u128) -> Self {
self.clone().with_gas_price(self.get_gas_price().checked_sub(value).unwrap())
}
/// Returns a new transaction with a higher value
pub fn inc_value(&self) -> Self {
self.clone().with_value(self.get_value().checked_add(U256::from(1)).unwrap())
}
/// Returns a new transaction with a higher gas limit
pub fn inc_limit(&self) -> Self {
self.clone().with_gas_limit(self.get_gas_limit() + 1)
}
/// Returns a new transaction with a higher blob fee +1
///
/// If it's an EIP-4844 transaction.
pub fn inc_blob_fee(&self) -> Self {
self.inc_blob_fee_by(1)
}
/// Returns a new transaction with a higher blob fee
///
/// If it's an EIP-4844 transaction.
pub fn inc_blob_fee_by(&self, value: u128) -> Self {
let mut this = self.clone();
if let Self::Eip4844 { max_fee_per_blob_gas, .. } = &mut this {
*max_fee_per_blob_gas = max_fee_per_blob_gas.checked_add(value).unwrap();
}
this
}
/// Returns a new transaction with a lower blob fee -1
///
/// If it's an EIP-4844 transaction.
pub fn decr_blob_fee(&self) -> Self {
self.decr_price_by(1)
}
/// Returns a new transaction with a lower blob fee
///
/// If it's an EIP-4844 transaction.
pub fn decr_blob_fee_by(&self, value: u128) -> Self {
let mut this = self.clone();
if let Self::Eip4844 { max_fee_per_blob_gas, .. } = &mut this {
*max_fee_per_blob_gas = max_fee_per_blob_gas.checked_sub(value).unwrap();
}
this
}
/// Returns the transaction type identifier associated with the current [`MockTransaction`].
pub const fn tx_type(&self) -> u8 {
match self {
Self::Legacy { .. } => LEGACY_TX_TYPE_ID,
Self::Eip1559 { .. } => EIP1559_TX_TYPE_ID,
Self::Eip4844 { .. } => EIP4844_TX_TYPE_ID,
Self::Eip2930 { .. } => EIP2930_TX_TYPE_ID,
Self::Eip7702 { .. } => EIP7702_TX_TYPE_ID,
}
}
/// Checks if the transaction is of the legacy type.
pub const fn is_legacy(&self) -> bool {
matches!(self, Self::Legacy { .. })
}
/// Checks if the transaction is of the EIP-1559 type.
pub const fn is_eip1559(&self) -> bool {
matches!(self, Self::Eip1559 { .. })
}
/// Checks if the transaction is of the EIP-4844 type.
pub const fn is_eip4844(&self) -> bool {
matches!(self, Self::Eip4844 { .. })
}
/// Checks if the transaction is of the EIP-2930 type.
pub const fn is_eip2930(&self) -> bool {
matches!(self, Self::Eip2930 { .. })
}
/// Checks if the transaction is of the EIP-7702 type.
pub const fn is_eip7702(&self) -> bool {
matches!(self, Self::Eip7702 { .. })
}
fn update_cost(&mut self) {
match self {
Self::Legacy { cost, gas_limit, gas_price, value, .. } |
Self::Eip2930 { cost, gas_limit, gas_price, value, .. } => {
*cost = U256::from(*gas_limit) * U256::from(*gas_price) + *value
}
Self::Eip1559 { cost, gas_limit, max_fee_per_gas, value, .. } |
Self::Eip4844 { cost, gas_limit, max_fee_per_gas, value, .. } |
Self::Eip7702 { cost, gas_limit, max_fee_per_gas, value, .. } => {
*cost = U256::from(*gas_limit) * U256::from(*max_fee_per_gas) + *value
}
};
}
}
impl PoolTransaction for MockTransaction {
type TryFromConsensusError = ValueError<EthereumTxEnvelope<TxEip4844>>;
type Consensus = TransactionSigned;
type Pooled = PooledTransactionVariant;
fn into_consensus(self) -> Recovered<Self::Consensus> {
self.into()
}
fn from_pooled(pooled: Recovered<Self::Pooled>) -> Self {
pooled.into()
}
fn hash(&self) -> &TxHash {
self.get_hash()
}
fn sender(&self) -> Address {
*self.get_sender()
}
fn sender_ref(&self) -> &Address {
self.get_sender()
}
// Having `get_cost` from `make_setters_getters` would be cleaner but we didn't
// want to also generate the error-prone cost setters. For now cost should be
// correct at construction and auto-updated per field update via `update_cost`,
// not to be manually set.
fn cost(&self) -> &U256 {
match self {
Self::Legacy { cost, .. } |
Self::Eip2930 { cost, .. } |
Self::Eip1559 { cost, .. } |
Self::Eip4844 { cost, .. } |
Self::Eip7702 { cost, .. } => cost,
}
}
/// Returns the encoded length of the transaction.
fn encoded_length(&self) -> usize {
self.size()
}
}
impl InMemorySize for MockTransaction {
fn size(&self) -> usize {
*self.get_size()
}
}
impl Typed2718 for MockTransaction {
fn ty(&self) -> u8 {
match self {
Self::Legacy { .. } => TxType::Legacy.into(),
Self::Eip1559 { .. } => TxType::Eip1559.into(),
Self::Eip4844 { .. } => TxType::Eip4844.into(),
Self::Eip2930 { .. } => TxType::Eip2930.into(),
Self::Eip7702 { .. } => TxType::Eip7702.into(),
}
}
}
impl alloy_consensus::Transaction for MockTransaction {
fn chain_id(&self) -> Option<u64> {
match self {
Self::Legacy { chain_id, .. } => *chain_id,
Self::Eip1559 { chain_id, .. } |
Self::Eip4844 { chain_id, .. } |
Self::Eip2930 { chain_id, .. } |
Self::Eip7702 { chain_id, .. } => Some(*chain_id),
}
}
fn nonce(&self) -> u64 {
*self.get_nonce()
}
fn gas_limit(&self) -> u64 {
*self.get_gas_limit()
}
fn gas_price(&self) -> Option<u128> {
match self {
Self::Legacy { gas_price, .. } | Self::Eip2930 { gas_price, .. } => Some(*gas_price),
_ => None,
}
}
fn max_fee_per_gas(&self) -> u128 {
match self {
Self::Legacy { gas_price, .. } | Self::Eip2930 { gas_price, .. } => *gas_price,
Self::Eip1559 { max_fee_per_gas, .. } |
Self::Eip4844 { max_fee_per_gas, .. } |
Self::Eip7702 { max_fee_per_gas, .. } => *max_fee_per_gas,
}
}
fn max_priority_fee_per_gas(&self) -> Option<u128> {
match self {
Self::Legacy { .. } | Self::Eip2930 { .. } => None,
Self::Eip1559 { max_priority_fee_per_gas, .. } |
Self::Eip4844 { max_priority_fee_per_gas, .. } |
Self::Eip7702 { max_priority_fee_per_gas, .. } => Some(*max_priority_fee_per_gas),
}
}
fn max_fee_per_blob_gas(&self) -> Option<u128> {
match self {
Self::Eip4844 { max_fee_per_blob_gas, .. } => Some(*max_fee_per_blob_gas),
_ => None,
}
}
fn priority_fee_or_price(&self) -> u128 {
match self {
Self::Legacy { gas_price, .. } | Self::Eip2930 { gas_price, .. } => *gas_price,
Self::Eip1559 { max_priority_fee_per_gas, .. } |
Self::Eip4844 { max_priority_fee_per_gas, .. } |
Self::Eip7702 { max_priority_fee_per_gas, .. } => *max_priority_fee_per_gas,
}
}
fn effective_gas_price(&self, base_fee: Option<u64>) -> u128 {
base_fee.map_or_else(
|| self.max_fee_per_gas(),
|base_fee| {
// if the tip is greater than the max priority fee per gas, set it to the max
// priority fee per gas + base fee
let tip = self.max_fee_per_gas().saturating_sub(base_fee as u128);
if let Some(max_tip) = self.max_priority_fee_per_gas() {
if tip > max_tip {
max_tip + base_fee as u128
} else {
// otherwise return the max fee per gas
self.max_fee_per_gas()
}
} else {
self.max_fee_per_gas()
}
},
)
}
fn is_dynamic_fee(&self) -> bool {
!matches!(self, Self::Legacy { .. } | Self::Eip2930 { .. })
}
fn kind(&self) -> TxKind {
match self {
Self::Legacy { to, .. } | Self::Eip1559 { to, .. } | Self::Eip2930 { to, .. } => *to,
Self::Eip4844 { to, .. } | Self::Eip7702 { to, .. } => TxKind::Call(*to),
}
}
fn is_create(&self) -> bool {
match self {
Self::Legacy { to, .. } | Self::Eip1559 { to, .. } | Self::Eip2930 { to, .. } => {
to.is_create()
}
Self::Eip4844 { .. } | Self::Eip7702 { .. } => false,
}
}
fn value(&self) -> U256 {
match self {
Self::Legacy { value, .. } |
Self::Eip1559 { value, .. } |
Self::Eip2930 { value, .. } |
Self::Eip4844 { value, .. } |
Self::Eip7702 { value, .. } => *value,
}
}
fn input(&self) -> &Bytes {
self.get_input()
}
fn access_list(&self) -> Option<&AccessList> {
match self {
Self::Legacy { .. } => None,
Self::Eip1559 { access_list: accesslist, .. } |
Self::Eip4844 { access_list: accesslist, .. } |
Self::Eip2930 { access_list: accesslist, .. } |
Self::Eip7702 { access_list: accesslist, .. } => Some(accesslist),
}
}
fn blob_versioned_hashes(&self) -> Option<&[B256]> {
match self {
Self::Eip4844 { blob_versioned_hashes, .. } => Some(blob_versioned_hashes),
_ => None,
}
}
fn authorization_list(&self) -> Option<&[SignedAuthorization]> {
match self {
Self::Eip7702 { authorization_list, .. } => Some(authorization_list),
_ => None,
}
}
}
impl EthPoolTransaction for MockTransaction {
fn take_blob(&mut self) -> EthBlobTransactionSidecar {
match self {
Self::Eip4844 { sidecar, .. } => EthBlobTransactionSidecar::Present(sidecar.clone()),
_ => EthBlobTransactionSidecar::None,
}
}
fn try_into_pooled_eip4844(
self,
sidecar: Arc<BlobTransactionSidecarVariant>,
) -> Option<Recovered<Self::Pooled>> {
let (tx, signer) = self.into_consensus().into_parts();
tx.try_into_pooled_eip4844(Arc::unwrap_or_clone(sidecar))
.map(|tx| tx.with_signer(signer))
.ok()
}
fn try_from_eip4844(
tx: Recovered<Self::Consensus>,
sidecar: BlobTransactionSidecarVariant,
) -> Option<Self> {
let (tx, signer) = tx.into_parts();
tx.try_into_pooled_eip4844(sidecar)
.map(|tx| tx.with_signer(signer))
.ok()
.map(Self::from_pooled)
}
fn validate_blob(
&self,
_blob: &BlobTransactionSidecarVariant,
_settings: &KzgSettings,
) -> Result<(), alloy_eips::eip4844::BlobTransactionValidationError> {
match &self {
Self::Eip4844 { .. } => Ok(()),
_ => Err(BlobTransactionValidationError::NotBlobTransaction(self.tx_type())),
}
}
}
impl TryFrom<Recovered<TransactionSigned>> for MockTransaction {
type Error = TryFromRecoveredTransactionError;
fn try_from(tx: Recovered<TransactionSigned>) -> Result<Self, Self::Error> {
let sender = tx.signer();
let transaction = tx.into_inner();
let hash = *transaction.tx_hash();
let size = transaction.size();
match transaction.into_typed_transaction() {
Transaction::Legacy(TxLegacy {
chain_id,
nonce,
gas_price,
gas_limit,
to,
value,
input,
}) => Ok(Self::Legacy {
chain_id,
hash,
sender,
nonce,
gas_price,
gas_limit,
to,
value,
input,
size,
cost: U256::from(gas_limit) * U256::from(gas_price) + value,
}),
Transaction::Eip2930(TxEip2930 {
chain_id,
nonce,
gas_price,
gas_limit,
to,
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | true |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/transaction-pool/src/test_utils/okvalidator.rs | crates/transaction-pool/src/test_utils/okvalidator.rs | use std::marker::PhantomData;
use crate::{
validate::ValidTransaction, EthPooledTransaction, PoolTransaction, TransactionOrigin,
TransactionValidationOutcome, TransactionValidator,
};
/// A transaction validator that determines all transactions to be valid.
#[derive(Debug)]
#[non_exhaustive]
pub struct OkValidator<T = EthPooledTransaction> {
_phantom: PhantomData<T>,
/// Whether to mark transactions as propagatable.
propagate: bool,
}
impl<T> OkValidator<T> {
/// Determines whether transactions should be allowed to be propagated
pub const fn set_propagate_transactions(mut self, propagate: bool) -> Self {
self.propagate = propagate;
self
}
}
impl<T> Default for OkValidator<T> {
fn default() -> Self {
Self { _phantom: Default::default(), propagate: false }
}
}
impl<T> TransactionValidator for OkValidator<T>
where
T: PoolTransaction,
{
type Transaction = T;
async fn validate_transaction(
&self,
_origin: TransactionOrigin,
transaction: Self::Transaction,
) -> TransactionValidationOutcome<Self::Transaction> {
// Always return valid
let authorities = transaction.authorization_list().map(|auths| {
auths.iter().flat_map(|auth| auth.recover_authority()).collect::<Vec<_>>()
});
TransactionValidationOutcome::Valid {
balance: *transaction.cost(),
state_nonce: transaction.nonce(),
bytecode_hash: None,
transaction: ValidTransaction::Valid(transaction),
propagate: self.propagate,
authorities,
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/transaction-pool/src/test_utils/pool.rs | crates/transaction-pool/src/test_utils/pool.rs | //! Test helpers for mocking an entire pool.
#![allow(dead_code)]
use crate::{
pool::{txpool::TxPool, AddedTransaction},
test_utils::{MockOrdering, MockTransactionDistribution, MockTransactionFactory},
TransactionOrdering,
};
use alloy_primitives::{Address, U256};
use rand::Rng;
use std::{
collections::HashMap,
ops::{Deref, DerefMut},
};
/// A wrapped `TxPool` with additional helpers for testing
pub(crate) struct MockPool<T: TransactionOrdering = MockOrdering> {
// The wrapped pool.
pool: TxPool<T>,
}
impl MockPool {
/// The total size of all subpools
fn total_subpool_size(&self) -> usize {
self.pool.pending().len() + self.pool.base_fee().len() + self.pool.queued().len()
}
/// Checks that all pool invariants hold.
fn enforce_invariants(&self) {
assert_eq!(
self.pool.len(),
self.total_subpool_size(),
"Tx in AllTransactions and sum(subpools) must match"
);
}
}
impl Default for MockPool {
fn default() -> Self {
Self { pool: TxPool::new(MockOrdering::default(), Default::default()) }
}
}
impl<T: TransactionOrdering> Deref for MockPool<T> {
type Target = TxPool<T>;
fn deref(&self) -> &Self::Target {
&self.pool
}
}
impl<T: TransactionOrdering> DerefMut for MockPool<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.pool
}
}
/// Simulates transaction execution.
pub(crate) struct MockTransactionSimulator<R: Rng> {
/// The pending base fee
base_fee: u128,
/// Generator for transactions
tx_generator: MockTransactionDistribution,
/// represents the on chain balance of a sender.
balances: HashMap<Address, U256>,
/// represents the on chain nonce of a sender.
nonces: HashMap<Address, u64>,
/// A set of addresses to use as senders.
senders: Vec<Address>,
/// What scenarios to execute.
scenarios: Vec<ScenarioType>,
/// All previous scenarios executed by a sender.
executed: HashMap<Address, ExecutedScenarios>,
/// "Validates" generated transactions.
validator: MockTransactionFactory,
/// The rng instance used to select senders and scenarios.
rng: R,
}
impl<R: Rng> MockTransactionSimulator<R> {
/// Returns a new mock instance
pub(crate) fn new(mut rng: R, config: MockSimulatorConfig) -> Self {
let senders = config.addresses(&mut rng);
Self {
base_fee: config.base_fee,
balances: senders.iter().copied().map(|a| (a, rng.random())).collect(),
nonces: senders.iter().copied().map(|a| (a, 0)).collect(),
senders,
scenarios: config.scenarios,
tx_generator: config.tx_generator,
executed: Default::default(),
validator: Default::default(),
rng,
}
}
/// Returns a random address from the senders set
fn rng_address(&mut self) -> Address {
let idx = self.rng.random_range(0..self.senders.len());
self.senders[idx]
}
/// Returns a random scenario from the scenario set
fn rng_scenario(&mut self) -> ScenarioType {
let idx = self.rng.random_range(0..self.scenarios.len());
self.scenarios[idx].clone()
}
/// Executes the next scenario and applies it to the pool
pub(crate) fn next(&mut self, pool: &mut MockPool) {
let sender = self.rng_address();
let scenario = self.rng_scenario();
let on_chain_nonce = self.nonces[&sender];
let on_chain_balance = self.balances[&sender];
match scenario {
ScenarioType::OnchainNonce => {
let tx = self
.tx_generator
.tx(on_chain_nonce, &mut self.rng)
.with_gas_price(self.base_fee);
let valid_tx = self.validator.validated(tx);
let res =
pool.add_transaction(valid_tx, on_chain_balance, on_chain_nonce, None).unwrap();
// TODO(mattsse): need a way expect based on the current state of the pool and tx
// settings
match res {
AddedTransaction::Pending(_) => {}
AddedTransaction::Parked { .. } => {
panic!("expected pending")
}
}
// TODO(mattsse): check subpools
}
ScenarioType::HigherNonce { .. } => {
unimplemented!()
}
}
// make sure everything is set
pool.enforce_invariants()
}
}
/// How to configure a new mock transaction stream
pub(crate) struct MockSimulatorConfig {
/// How many senders to generate.
pub(crate) num_senders: usize,
/// Scenarios to test
pub(crate) scenarios: Vec<ScenarioType>,
/// The start base fee
pub(crate) base_fee: u128,
/// generator for transactions
pub(crate) tx_generator: MockTransactionDistribution,
}
impl MockSimulatorConfig {
/// Generates a set of random addresses
pub(crate) fn addresses(&self, rng: &mut impl rand::Rng) -> Vec<Address> {
std::iter::repeat_with(|| Address::random_with(rng)).take(self.num_senders).collect()
}
}
/// Represents the different types of test scenarios.
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub(crate) enum ScenarioType {
OnchainNonce,
HigherNonce { skip: u64 },
}
/// The actual scenario, ready to be executed
///
/// A scenario produces one or more transactions and expects a certain Outcome.
///
/// An executed scenario can affect previous executed transactions
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub(crate) enum Scenario {
/// Send a tx with the same nonce as on chain.
OnchainNonce { nonce: u64 },
/// Send a tx with a higher nonce that what the sender has on chain
HigherNonce { onchain: u64, nonce: u64 },
Multi {
// Execute multiple test scenarios
scenario: Vec<Scenario>,
},
}
/// Represents an executed scenario
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub(crate) struct ExecutedScenario {
/// balance at the time of execution
balance: U256,
/// nonce at the time of execution
nonce: u64,
/// The executed scenario
scenario: Scenario,
}
/// All executed scenarios by a sender
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub(crate) struct ExecutedScenarios {
sender: Address,
scenarios: Vec<ExecutedScenario>,
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test_utils::{MockFeeRange, MockTransactionRatio};
#[test]
fn test_on_chain_nonce_scenario() {
let transaction_ratio = MockTransactionRatio {
legacy_pct: 30,
dynamic_fee_pct: 70,
access_list_pct: 0,
blob_pct: 0,
};
let fee_ranges = MockFeeRange {
gas_price: (10u128..100).try_into().unwrap(),
priority_fee: (10u128..100).try_into().unwrap(),
max_fee: (100u128..110).try_into().unwrap(),
max_fee_blob: (1u128..100).try_into().unwrap(),
};
let config = MockSimulatorConfig {
num_senders: 10,
scenarios: vec![ScenarioType::OnchainNonce],
base_fee: 10,
tx_generator: MockTransactionDistribution::new(
transaction_ratio,
fee_ranges,
10..100,
10..100,
),
};
let mut simulator = MockTransactionSimulator::new(rand::rng(), config);
let mut pool = MockPool::default();
simulator.next(&mut pool);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/transaction-pool/tests/it/pending.rs | crates/transaction-pool/tests/it/pending.rs | use assert_matches::assert_matches;
use reth_transaction_pool::{
test_utils::{MockTransactionFactory, TestPoolBuilder},
TransactionOrigin, TransactionPool,
};
#[tokio::test(flavor = "multi_thread")]
async fn txpool_new_pending_txs() {
let txpool = TestPoolBuilder::default();
let mut mock_tx_factory = MockTransactionFactory::default();
let transaction = mock_tx_factory.create_eip1559();
let added_result =
txpool.add_transaction(TransactionOrigin::External, transaction.transaction.clone()).await;
assert_matches!(added_result, Ok(outcome) if outcome.hash == *transaction.transaction.get_hash());
let mut best_txns = txpool.best_transactions();
assert_matches!(best_txns.next(), Some(tx) if tx.transaction.get_hash() == transaction.transaction.get_hash());
assert_matches!(best_txns.next(), None);
let transaction = mock_tx_factory.create_eip1559();
let added_result =
txpool.add_transaction(TransactionOrigin::External, transaction.transaction.clone()).await;
assert_matches!(added_result, Ok(outcome) if outcome.hash == *transaction.transaction.get_hash());
assert_matches!(best_txns.next(), Some(tx) if tx.transaction.get_hash() == transaction.transaction.get_hash());
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/transaction-pool/tests/it/evict.rs | crates/transaction-pool/tests/it/evict.rs | //! Transaction pool eviction tests.
use alloy_consensus::Transaction;
use alloy_eips::eip1559::{ETHEREUM_BLOCK_GAS_LIMIT_30M, MIN_PROTOCOL_BASE_FEE};
use alloy_primitives::{Address, B256};
use rand::distr::Uniform;
use reth_transaction_pool::{
error::PoolErrorKind,
test_utils::{
MockFeeRange, MockTransactionDistribution, MockTransactionRatio, TestPool, TestPoolBuilder,
},
AddedTransactionOutcome, BlockInfo, PoolConfig, SubPoolLimit, TransactionOrigin,
TransactionPool, TransactionPoolExt,
};
#[tokio::test(flavor = "multi_thread")]
async fn only_blobs_eviction() {
// This test checks that blob transactions can be inserted into the pool, and at each step the
// blob pool can be truncated to the correct size
// set the pool limits to something small
let pool_config = PoolConfig {
pending_limit: SubPoolLimit { max_txs: 10, max_size: 1000 },
queued_limit: SubPoolLimit { max_txs: 10, max_size: 1000 },
basefee_limit: SubPoolLimit { max_txs: 10, max_size: 1000 },
blob_limit: SubPoolLimit { max_txs: 10, max_size: 1000 },
..Default::default()
};
let pool: TestPool = TestPoolBuilder::default().with_config(pool_config.clone()).into();
let block_info = BlockInfo {
block_gas_limit: ETHEREUM_BLOCK_GAS_LIMIT_30M,
last_seen_block_hash: B256::ZERO,
last_seen_block_number: 0,
pending_basefee: 10,
pending_blob_fee: Some(10),
};
pool.set_block_info(block_info);
// this is how many times the test will regenerate transactions and insert them into the pool
let total_txs = 1000;
// If we have a wide size range we can cover cases both where we have a lot of small txs and a
// lot of large txs
let size_range = 10..1100;
// create mock tx distribution, 100% blobs
let tx_ratio = MockTransactionRatio {
legacy_pct: 0,
dynamic_fee_pct: 0,
blob_pct: 100,
access_list_pct: 0,
};
// Vary the amount of senders
let senders = [1, 10, 100, total_txs];
for sender_amt in &senders {
let gas_limit_range = 100_000..1_000_000;
// split the total txs into the amount of senders
let txs_per_sender = total_txs / sender_amt;
let nonce_range = 0..txs_per_sender;
let pending_blob_fee = block_info.pending_blob_fee.unwrap();
// start the fees at zero, some transactions will be underpriced
let fee_range = MockFeeRange {
gas_price: Uniform::try_from(0u128..(block_info.pending_basefee as u128 + 1000))
.unwrap(),
priority_fee: Uniform::try_from(0u128..(block_info.pending_basefee as u128 + 1000))
.unwrap(),
// we need to set the max fee to at least the min protocol base fee, or transactions
// generated could be rejected
max_fee: Uniform::try_from(
MIN_PROTOCOL_BASE_FEE as u128..(block_info.pending_basefee as u128 + 2000),
)
.unwrap(),
max_fee_blob: Uniform::try_from(pending_blob_fee..(pending_blob_fee + 1000)).unwrap(),
};
let distribution = MockTransactionDistribution::new(
tx_ratio.clone(),
fee_range,
gas_limit_range,
size_range.clone(),
);
for _ in 0..*sender_amt {
// use a random sender, create the tx set
let sender = Address::random();
let set = distribution.tx_set(sender, nonce_range.clone(), &mut rand::rng());
let set = set.into_vec();
// ensure that the first nonce is 0
assert_eq!(set[0].nonce(), 0);
// and finally insert it into the pool
let results = pool.add_transactions(TransactionOrigin::External, set).await;
for (i, result) in results.iter().enumerate() {
match result {
Ok(AddedTransactionOutcome { hash, .. }) => {
println!("β
Inserted tx into pool with hash: {hash}");
}
Err(e) => {
match e.kind {
PoolErrorKind::DiscardedOnInsert => {
println!("β
Discarded tx on insert, like we should have");
}
PoolErrorKind::SpammerExceededCapacity(addr) => {
// ensure the address is the same as the sender
assert_eq!(addr, sender);
// ensure that this is only returned when the sender is over the
// pool limit per account
assert!(
i + 1 >= pool_config.max_account_slots,
"Spammer exceeded capacity, but it shouldn't have. Max accounts slots: {}, current txs by sender: {}",
pool_config.max_account_slots,
i + 1
);
// at this point we know that the sender has been limited, so we
// keep going
}
_ => {
panic!("Failed to insert tx into pool with unexpected error: {e}");
}
}
}
}
}
// after every insert, ensure that it's under the pool limits
assert!(!pool.is_exceeded());
}
}
}
#[tokio::test(flavor = "multi_thread")]
async fn mixed_eviction() {
// This test checks that many transaction types can be inserted into the pool. The fees need
// to be set so that the transactions will actually pass validation. Transactions here do not
// have nonce gaps.
let pool_config = PoolConfig {
pending_limit: SubPoolLimit { max_txs: 20, max_size: 2000 },
queued_limit: SubPoolLimit { max_txs: 20, max_size: 2000 },
basefee_limit: SubPoolLimit { max_txs: 20, max_size: 2000 },
blob_limit: SubPoolLimit { max_txs: 20, max_size: 2000 },
..Default::default()
};
let pool: TestPool = TestPoolBuilder::default().with_config(pool_config.clone()).into();
let block_info = BlockInfo {
block_gas_limit: ETHEREUM_BLOCK_GAS_LIMIT_30M,
last_seen_block_hash: B256::ZERO,
last_seen_block_number: 0,
pending_basefee: 10,
pending_blob_fee: Some(20),
};
pool.set_block_info(block_info);
let total_txs = 100;
let size_range = 10..1100;
// Adjust the ratios to include a mix of transaction types
let tx_ratio = MockTransactionRatio {
legacy_pct: 25,
dynamic_fee_pct: 25,
blob_pct: 25,
access_list_pct: 25,
};
let senders = [1, 5, 10];
for sender_amt in &senders {
let gas_limit_range = 100_000..1_000_000;
let txs_per_sender = total_txs / sender_amt;
let nonce_range = 0..txs_per_sender;
let pending_blob_fee = block_info.pending_blob_fee.unwrap();
// Make sure transactions are not immediately rejected
let min_gas_price = block_info.pending_basefee as u128 + 1;
let min_priority_fee = 1u128;
let min_max_fee = block_info.pending_basefee as u128 + 10;
let fee_range = MockFeeRange {
gas_price: Uniform::try_from(min_gas_price..(min_gas_price + 1000)).unwrap(),
priority_fee: Uniform::try_from(min_priority_fee..(min_priority_fee + 1000)).unwrap(),
max_fee: Uniform::try_from(min_max_fee..(min_max_fee + 2000)).unwrap(),
max_fee_blob: Uniform::try_from(pending_blob_fee..(pending_blob_fee + 1000)).unwrap(),
};
let distribution = MockTransactionDistribution::new(
tx_ratio.clone(),
fee_range,
gas_limit_range,
size_range.clone(),
);
for _ in 0..*sender_amt {
let sender = Address::random();
let set = distribution.tx_set_non_conflicting_types(
sender,
nonce_range.clone(),
&mut rand::rng(),
);
let set = set.into_inner().into_vec();
assert_eq!(set[0].nonce(), 0);
let results = pool.add_transactions(TransactionOrigin::External, set).await;
for (i, result) in results.iter().enumerate() {
match result {
Ok(_) => {
// Transaction inserted successfully
}
Err(e) => {
match e.kind {
PoolErrorKind::DiscardedOnInsert => {
// Transaction discarded on insert
println!("β
Discarded tx on insert, like we should have");
}
PoolErrorKind::SpammerExceededCapacity(addr) => {
// ensure the address is the same as the sender
assert_eq!(addr, sender);
// ensure that this is only returned when the sender is over the
// pool limit per account
assert!(
i + 1 >= pool_config.max_account_slots,
"Spammer exceeded capacity, but it shouldn't have. Max accounts slots: {}, current txs by sender: {}",
pool_config.max_account_slots,
i + 1
);
}
_ => panic!("Failed to insert tx into pool with unexpected error: {e}"),
}
}
}
}
assert!(!pool.is_exceeded());
}
}
}
#[tokio::test(flavor = "multi_thread")]
async fn nonce_gaps_eviction() {
// This test checks that many transaction types can be inserted into the pool.
//
// This test also inserts nonce gaps into the non-blob transactions.
let pool_config = PoolConfig {
pending_limit: SubPoolLimit { max_txs: 20, max_size: 2000 },
queued_limit: SubPoolLimit { max_txs: 20, max_size: 2000 },
basefee_limit: SubPoolLimit { max_txs: 20, max_size: 2000 },
blob_limit: SubPoolLimit { max_txs: 20, max_size: 2000 },
..Default::default()
};
let pool: TestPool = TestPoolBuilder::default().with_config(pool_config.clone()).into();
let block_info = BlockInfo {
block_gas_limit: ETHEREUM_BLOCK_GAS_LIMIT_30M,
last_seen_block_hash: B256::ZERO,
last_seen_block_number: 0,
pending_basefee: 10,
pending_blob_fee: Some(20),
};
pool.set_block_info(block_info);
let total_txs = 100;
let size_range = 10..1100;
// Adjust the ratios to include a mix of transaction types
let tx_ratio = MockTransactionRatio {
legacy_pct: 25,
dynamic_fee_pct: 25,
blob_pct: 25,
access_list_pct: 25,
};
let senders = [1, 5, 10];
for sender_amt in &senders {
let gas_limit_range = 100_000..1_000_000;
let txs_per_sender = total_txs / sender_amt;
let nonce_range = 0..txs_per_sender;
let pending_blob_fee = block_info.pending_blob_fee.unwrap();
// Make sure transactions are not immediately rejected
let min_gas_price = block_info.pending_basefee as u128 + 1;
let min_priority_fee = 1u128;
let min_max_fee = block_info.pending_basefee as u128 + 10;
let fee_range = MockFeeRange {
gas_price: Uniform::try_from(min_gas_price..(min_gas_price + 1000)).unwrap(),
priority_fee: Uniform::try_from(min_priority_fee..(min_priority_fee + 1000)).unwrap(),
max_fee: Uniform::try_from(min_max_fee..(min_max_fee + 2000)).unwrap(),
max_fee_blob: Uniform::try_from(pending_blob_fee..(pending_blob_fee + 1000)).unwrap(),
};
let distribution = MockTransactionDistribution::new(
tx_ratio.clone(),
fee_range,
gas_limit_range,
size_range.clone(),
);
// set up gap percentages and sizes, 30% chance for transactions to be followed by a gap,
// and the gap size is between 1 and 5
let gap_pct = 30;
let gap_range = 1u64..6;
for _ in 0..*sender_amt {
let sender = Address::random();
let mut set = distribution.tx_set_non_conflicting_types(
sender,
nonce_range.clone(),
&mut rand::rng(),
);
set.with_nonce_gaps(gap_pct, gap_range.clone(), &mut rand::rng());
let set = set.into_inner().into_vec();
let results = pool.add_transactions(TransactionOrigin::External, set).await;
for (i, result) in results.iter().enumerate() {
match result {
Ok(_) => {
// Transaction inserted successfully
}
Err(e) => {
match e.kind {
PoolErrorKind::DiscardedOnInsert => {
// Transaction discarded on insert
println!("β
Discarded tx on insert, like we should have");
}
PoolErrorKind::SpammerExceededCapacity(addr) => {
// ensure the address is the same as the sender
assert_eq!(addr, sender);
// ensure that this is only returned when the sender is over the
// pool limit per account
assert!(
i + 1 >= pool_config.max_account_slots,
"Spammer exceeded capacity, but it shouldn't have. Max accounts slots: {}, current txs by sender: {}",
pool_config.max_account_slots,
i + 1
);
}
_ => panic!("Failed to insert tx into pool with unexpected error: {e}"),
}
}
}
}
assert!(!pool.is_exceeded());
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/transaction-pool/tests/it/listeners.rs | crates/transaction-pool/tests/it/listeners.rs | use assert_matches::assert_matches;
use reth_transaction_pool::{
noop::MockTransactionValidator,
test_utils::{MockTransactionFactory, TestPoolBuilder},
FullTransactionEvent, PoolTransaction, TransactionEvent, TransactionListenerKind,
TransactionOrigin, TransactionPool,
};
use std::{future::poll_fn, task::Poll};
use tokio_stream::StreamExt;
#[tokio::test(flavor = "multi_thread")]
async fn txpool_listener_by_hash() {
let txpool = TestPoolBuilder::default();
let mut mock_tx_factory = MockTransactionFactory::default();
let transaction = mock_tx_factory.create_eip1559();
let result = txpool
.add_transaction_and_subscribe(TransactionOrigin::External, transaction.transaction.clone())
.await;
assert_matches!(result, Ok(_));
let mut events = result.unwrap();
assert_matches!(events.next().await, Some(TransactionEvent::Pending));
let removed_txs = txpool.remove_transactions(vec![*transaction.transaction.hash()]);
assert_eq!(transaction.transaction.hash(), removed_txs[0].transaction.hash());
assert_matches!(events.next().await, Some(TransactionEvent::Discarded));
}
#[tokio::test(flavor = "multi_thread")]
async fn txpool_listener_replace_event() {
let txpool = TestPoolBuilder::default();
let mut mock_tx_factory = MockTransactionFactory::default();
let transaction = mock_tx_factory.create_eip1559();
let mut all_tx_events = txpool.all_transactions_event_listener();
let old_transaction = transaction.transaction.clone();
let mut result = txpool
.add_transaction_and_subscribe(TransactionOrigin::External, old_transaction.clone())
.await;
assert_matches!(result, Ok(_));
let mut events = result.unwrap();
assert_matches!(events.next().await, Some(TransactionEvent::Pending));
assert_matches!(all_tx_events.next().await, Some(FullTransactionEvent::Pending(hash)) if hash == *old_transaction.get_hash());
// add replace tx.
let replace_transaction = transaction.transaction.clone().rng_hash().inc_price();
result = txpool
.add_transaction_and_subscribe(TransactionOrigin::External, replace_transaction.clone())
.await;
assert_matches!(result, Ok(_));
let mut new_events = result.unwrap();
assert_matches!(new_events.next().await, Some(TransactionEvent::Pending));
// The listener of old transaction should receive replaced event.
assert_matches!(events.next().await, Some(TransactionEvent::Replaced(hash)) if hash == *replace_transaction.get_hash());
// The listener of all should receive one pending event of new transaction and one replaced
// event of old transaction.
assert_matches!(all_tx_events.next().await, Some(FullTransactionEvent::Pending(hash)) if hash == *replace_transaction.get_hash());
assert_matches!(all_tx_events.next().await, Some(FullTransactionEvent::Replaced { transaction, replaced_by }) if *transaction.transaction.get_hash() == *old_transaction.get_hash() && replaced_by == *replace_transaction.get_hash());
}
#[tokio::test(flavor = "multi_thread")]
async fn txpool_listener_queued_event() {
let txpool = TestPoolBuilder::default();
let mut mock_tx_factory = MockTransactionFactory::default();
let transaction = mock_tx_factory.create_eip1559().transaction.inc_nonce();
let mut all_tx_events = txpool.all_transactions_event_listener();
let result = txpool
.add_transaction_and_subscribe(TransactionOrigin::External, transaction.clone())
.await;
assert_matches!(result, Ok(_));
let mut events = result.unwrap();
assert_matches!(events.next().await, Some(TransactionEvent::Queued));
// The listener of all should receive queued event as well.
assert_matches!(all_tx_events.next().await, Some(FullTransactionEvent::Queued(hash)) if hash == *transaction.get_hash());
}
#[tokio::test(flavor = "multi_thread")]
async fn txpool_listener_invalid_event() {
let txpool =
TestPoolBuilder::default().with_validator(MockTransactionValidator::return_invalid());
let mut mock_tx_factory = MockTransactionFactory::default();
let transaction = mock_tx_factory.create_eip1559().transaction;
let mut all_tx_events = txpool.all_transactions_event_listener();
let result = txpool
.add_transaction_and_subscribe(TransactionOrigin::External, transaction.clone())
.await;
assert_matches!(result, Err(_));
// The listener of all should receive invalid event.
assert_matches!(all_tx_events.next().await, Some(FullTransactionEvent::Invalid(hash)) if hash == *transaction.get_hash());
}
#[tokio::test(flavor = "multi_thread")]
async fn txpool_listener_all() {
let txpool = TestPoolBuilder::default();
let mut mock_tx_factory = MockTransactionFactory::default();
let transaction = mock_tx_factory.create_eip1559();
let mut all_tx_events = txpool.all_transactions_event_listener();
let added_result =
txpool.add_transaction(TransactionOrigin::External, transaction.transaction.clone()).await;
assert_matches!(added_result, Ok(outcome) if outcome.hash == *transaction.transaction.get_hash());
assert_matches!(
all_tx_events.next().await,
Some(FullTransactionEvent::Pending(hash)) if hash == *transaction.transaction.get_hash()
);
let removed_txs = txpool.remove_transactions(vec![*transaction.transaction.hash()]);
assert_eq!(transaction.transaction.hash(), removed_txs[0].transaction.hash());
assert_matches!(all_tx_events.next().await, Some(FullTransactionEvent::Discarded(hash)) if hash == *transaction.transaction.get_hash());
}
#[tokio::test(flavor = "multi_thread")]
async fn txpool_listener_propagate_only() {
let txpool =
TestPoolBuilder::default().with_validator(MockTransactionValidator::no_propagate_local());
let mut mock_tx_factory = MockTransactionFactory::default();
let transaction = mock_tx_factory.create_eip1559();
let expected = *transaction.hash();
let mut listener_network = txpool.pending_transactions_listener();
let mut listener_all = txpool.pending_transactions_listener_for(TransactionListenerKind::All);
let result =
txpool.add_transaction(TransactionOrigin::Local, transaction.transaction.clone()).await;
assert!(result.is_ok());
let inserted = listener_all.recv().await.unwrap();
assert_eq!(inserted, expected);
poll_fn(|cx| {
// no propagation
assert!(listener_network.poll_recv(cx).is_pending());
Poll::Ready(())
})
.await;
}
#[tokio::test(flavor = "multi_thread")]
async fn txpool_listener_new_propagate_only() {
let txpool =
TestPoolBuilder::default().with_validator(MockTransactionValidator::no_propagate_local());
let mut mock_tx_factory = MockTransactionFactory::default();
let transaction = mock_tx_factory.create_eip1559();
let expected = *transaction.hash();
let mut listener_network = txpool.new_transactions_listener();
let mut listener_all = txpool.new_transactions_listener_for(TransactionListenerKind::All);
let result =
txpool.add_transaction(TransactionOrigin::Local, transaction.transaction.clone()).await;
assert!(result.is_ok());
let inserted = listener_all.recv().await.unwrap();
let actual = *inserted.transaction.hash();
assert_eq!(actual, expected);
poll_fn(|cx| {
// no propagation
assert!(listener_network.poll_recv(cx).is_pending());
Poll::Ready(())
})
.await;
}
#[tokio::test(flavor = "multi_thread")]
async fn txpool_listener_blob_sidecar() {
let txpool =
TestPoolBuilder::default().with_validator(MockTransactionValidator::no_propagate_local());
let mut mock_tx_factory = MockTransactionFactory::default();
let blob_transaction = mock_tx_factory.create_eip4844();
let expected = *blob_transaction.hash();
let mut listener_blob = txpool.blob_transaction_sidecars_listener();
let result = txpool
.add_transaction(TransactionOrigin::Local, blob_transaction.transaction.clone())
.await;
assert!(result.is_ok());
let inserted = listener_blob.recv().await.unwrap();
assert_eq!(*inserted.tx_hash, expected);
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/transaction-pool/tests/it/blobs.rs | crates/transaction-pool/tests/it/blobs.rs | //! Blob transaction tests
use reth_transaction_pool::{
error::PoolErrorKind,
test_utils::{MockTransaction, MockTransactionFactory, TestPoolBuilder},
AddedTransactionOutcome, PoolTransaction, TransactionOrigin, TransactionPool,
};
#[tokio::test(flavor = "multi_thread")]
async fn blobs_exclusive() {
let txpool = TestPoolBuilder::default();
let mut mock_tx_factory = MockTransactionFactory::default();
let blob_tx = mock_tx_factory.create_eip4844();
let AddedTransactionOutcome { hash, .. } = txpool
.add_transaction(TransactionOrigin::External, blob_tx.transaction.clone())
.await
.unwrap();
assert_eq!(hash, *blob_tx.transaction.get_hash());
let mut best_txns = txpool.best_transactions();
assert_eq!(best_txns.next().unwrap().transaction.get_hash(), blob_tx.transaction.get_hash());
assert!(best_txns.next().is_none());
let eip1559_tx =
MockTransaction::eip1559().set_sender(blob_tx.transaction.sender()).inc_price_by(10_000);
let res =
txpool.add_transaction(TransactionOrigin::External, eip1559_tx.clone()).await.unwrap_err();
assert_eq!(res.hash, *eip1559_tx.get_hash());
match res.kind {
PoolErrorKind::ExistingConflictingTransactionType(addr, tx_type) => {
assert_eq!(addr, eip1559_tx.sender());
assert_eq!(tx_type, eip1559_tx.tx_type());
}
_ => unreachable!(),
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/transaction-pool/tests/it/best.rs | crates/transaction-pool/tests/it/best.rs | //! Best transaction and filter testing
use reth_transaction_pool::{noop::NoopTransactionPool, BestTransactions, TransactionPool};
#[test]
fn test_best_transactions() {
let noop = NoopTransactionPool::default();
let mut best =
noop.best_transactions().filter_transactions(|_| true).without_blobs().without_updates();
assert!(best.next().is_none());
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/transaction-pool/tests/it/main.rs | crates/transaction-pool/tests/it/main.rs | //! transaction-pool integration tests
#[cfg(feature = "test-utils")]
mod blobs;
#[cfg(feature = "test-utils")]
mod evict;
#[cfg(feature = "test-utils")]
mod listeners;
#[cfg(feature = "test-utils")]
mod pending;
mod best;
const fn main() {}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/transaction-pool/benches/canonical_state_change.rs | crates/transaction-pool/benches/canonical_state_change.rs | #![allow(missing_docs)]
use alloy_consensus::Transaction;
use alloy_primitives::{Address, B256, U256};
use criterion::{criterion_group, criterion_main, BatchSize, Criterion};
use proptest::{prelude::*, strategy::ValueTree, test_runner::TestRunner};
use rand::prelude::SliceRandom;
use reth_ethereum_primitives::{Block, BlockBody};
use reth_execution_types::ChangedAccount;
use reth_primitives_traits::{Header, SealedBlock};
use reth_transaction_pool::{
test_utils::{MockTransaction, TestPoolBuilder},
BlockInfo, CanonicalStateUpdate, PoolConfig, PoolTransaction, PoolUpdateKind, SubPoolLimit,
TransactionOrigin, TransactionPool, TransactionPoolExt,
};
use std::{collections::HashMap, time::Duration};
/// Generates a set of transactions for multiple senders
fn generate_transactions(num_senders: usize, txs_per_sender: usize) -> Vec<MockTransaction> {
let mut runner = TestRunner::deterministic();
let mut txs = Vec::new();
for sender_idx in 0..num_senders {
// Create a unique sender address
let sender_bytes = sender_idx.to_be_bytes();
let addr_slice = [0u8; 12].into_iter().chain(sender_bytes.into_iter()).collect::<Vec<_>>();
let sender = Address::from_slice(&addr_slice);
// Generate transactions for this sender
for nonce in 0..txs_per_sender {
let mut tx = any::<MockTransaction>().new_tree(&mut runner).unwrap().current();
tx.set_sender(sender);
tx.set_nonce(nonce as u64);
// Ensure it's not a legacy transaction
if tx.is_legacy() || tx.is_eip2930() {
tx = MockTransaction::eip1559();
tx.set_priority_fee(any::<u128>().new_tree(&mut runner).unwrap().current());
tx.set_max_fee(any::<u128>().new_tree(&mut runner).unwrap().current());
tx.set_sender(sender);
tx.set_nonce(nonce as u64);
}
txs.push(tx);
}
}
txs
}
/// Fill the pool with transactions
async fn fill_pool(pool: &TestPoolBuilder, txs: Vec<MockTransaction>) -> HashMap<Address, u64> {
let mut sender_nonces = HashMap::new();
// Add transactions one by one
for tx in txs {
let sender = tx.sender();
let nonce = tx.nonce();
// Track the highest nonce for each sender
sender_nonces.insert(sender, nonce.max(sender_nonces.get(&sender).copied().unwrap_or(0)));
// Add transaction to the pool
let _ = pool.add_transaction(TransactionOrigin::External, tx).await;
}
sender_nonces
}
fn canonical_state_change_bench(c: &mut Criterion) {
let mut group = c.benchmark_group("Transaction Pool Canonical State Change");
group.measurement_time(Duration::from_secs(10));
let rt = tokio::runtime::Runtime::new().unwrap();
// Test different pool sizes
for num_senders in [500, 1000, 2000] {
for txs_per_sender in [1, 5, 10] {
let total_txs = num_senders * txs_per_sender;
let group_id = format!(
"txpool | canonical_state_change | senders: {num_senders} | txs_per_sender: {txs_per_sender} | total: {total_txs}",
);
// Create the update
// Create a mock block - using default Ethereum block
let header = Header::default();
let body = BlockBody::default();
let block = Block { header, body };
let sealed_block = SealedBlock::seal_slow(block);
let txs = generate_transactions(num_senders, txs_per_sender);
let pool = TestPoolBuilder::default().with_config(PoolConfig {
pending_limit: SubPoolLimit::max(),
basefee_limit: SubPoolLimit::max(),
queued_limit: SubPoolLimit::max(),
blob_limit: SubPoolLimit::max(),
max_account_slots: 50,
..Default::default()
});
struct Input<B: reth_primitives_traits::Block> {
sealed_block: SealedBlock<B>,
pool: TestPoolBuilder,
}
group.bench_with_input(group_id, &Input { sealed_block, pool }, |b, input| {
b.iter_batched(
|| {
// Setup phase - create pool and transactions
let sealed_block = &input.sealed_block;
let pool = &input.pool;
let senders = pool.unique_senders();
for sender in senders {
pool.remove_transactions_by_sender(sender);
}
// Set initial block info
pool.set_block_info(BlockInfo {
last_seen_block_number: 0,
last_seen_block_hash: B256::ZERO,
pending_basefee: 1_000_000_000,
pending_blob_fee: Some(1_000_000),
block_gas_limit: 30_000_000,
});
let sender_nonces = rt.block_on(fill_pool(pool, txs.clone()));
let mut changed_accounts: Vec<ChangedAccount> = sender_nonces
.into_iter()
.map(|(address, nonce)| ChangedAccount {
address,
nonce: nonce + 1, // Increment nonce as if transactions were mined
balance: U256::from(9_000_000_000_000_000u64), // Decrease balance
})
.collect();
changed_accounts.shuffle(&mut rand::rng());
let changed_accounts = changed_accounts.drain(..100).collect();
let update = CanonicalStateUpdate {
new_tip: sealed_block,
pending_block_base_fee: 1_000_000_000, // 1 gwei
pending_block_blob_fee: Some(1_000_000), // 0.001 gwei
changed_accounts,
mined_transactions: vec![], // No transactions mined in this benchmark
update_kind: PoolUpdateKind::Commit,
};
(pool, update)
},
|(pool, update)| {
// The actual operation being benchmarked
pool.on_canonical_state_change(update);
},
BatchSize::LargeInput,
);
});
}
}
group.finish();
}
criterion_group! {
name = canonical_state_change;
config = Criterion::default();
targets = canonical_state_change_bench
}
criterion_main!(canonical_state_change);
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/transaction-pool/benches/reorder.rs | crates/transaction-pool/benches/reorder.rs | #![allow(missing_docs)]
use criterion::{
criterion_group, criterion_main, measurement::WallTime, BenchmarkGroup, Criterion,
};
use proptest::{prelude::*, strategy::ValueTree, test_runner::TestRunner};
use reth_transaction_pool::test_utils::MockTransaction;
/// Transaction Pool trait for benching.
trait BenchTxPool: Default {
fn add_transaction(&mut self, tx: MockTransaction);
fn reorder(&mut self, base_fee: u64);
}
fn txpool_reordering(c: &mut Criterion) {
let mut group = c.benchmark_group("Transaction Pool Reordering");
for seed_size in [1_000, 10_000, 50_000, 100_000] {
for input_size in [10, 100, 1_000] {
let (txs, new_txs, base_fee) = generate_test_data(seed_size, input_size);
use implementations::*;
// Vanilla sorting of unsorted collection
txpool_reordering_bench::<VecTxPoolSortStable>(
&mut group,
"VecTxPoolSortStable",
txs.clone(),
new_txs.clone(),
base_fee,
);
// Unstable sorting of unsorted collection
txpool_reordering_bench::<VecTxPoolSortUnstable>(
&mut group,
"VecTxPoolSortUnstable",
txs.clone(),
new_txs.clone(),
base_fee,
);
// BinaryHeap that is resorted on each update
txpool_reordering_bench::<BinaryHeapTxPool>(
&mut group,
"BinaryHeapTxPool",
txs,
new_txs,
base_fee,
);
}
}
}
fn txpool_reordering_bench<T: BenchTxPool>(
group: &mut BenchmarkGroup<'_, WallTime>,
description: &str,
seed: Vec<MockTransaction>,
new_txs: Vec<MockTransaction>,
base_fee: u64,
) {
let setup = || {
let mut txpool = T::default();
txpool.reorder(base_fee);
for tx in &seed {
txpool.add_transaction(tx.clone());
}
(txpool, new_txs.clone())
};
let group_id = format!(
"txpool | seed size: {} | input size: {} | {}",
seed.len(),
new_txs.len(),
description
);
group.bench_function(group_id, |b| {
b.iter_with_setup(setup, |(mut txpool, new_txs)| {
// Reorder with new base fee
let bigger_base_fee = base_fee.saturating_add(10);
txpool.reorder(bigger_base_fee);
// Reorder with new base fee after adding transactions.
for new_tx in new_txs {
txpool.add_transaction(new_tx);
}
let smaller_base_fee = base_fee.saturating_sub(10);
txpool.reorder(smaller_base_fee);
txpool
});
});
}
fn generate_test_data(
seed_size: usize,
input_size: usize,
) -> (Vec<MockTransaction>, Vec<MockTransaction>, u64) {
let mut runner = TestRunner::deterministic();
let txs = prop::collection::vec(any::<MockTransaction>(), seed_size)
.new_tree(&mut runner)
.unwrap()
.current();
let new_txs = prop::collection::vec(any::<MockTransaction>(), input_size)
.new_tree(&mut runner)
.unwrap()
.current();
let base_fee = any::<u64>().new_tree(&mut runner).unwrap().current();
(txs, new_txs, base_fee)
}
mod implementations {
use super::*;
use alloy_consensus::Transaction;
use std::collections::BinaryHeap;
/// This implementation appends the transactions and uses [`Vec::sort_by`] function for sorting.
#[derive(Default)]
pub(crate) struct VecTxPoolSortStable {
inner: Vec<MockTransaction>,
}
impl BenchTxPool for VecTxPoolSortStable {
fn add_transaction(&mut self, tx: MockTransaction) {
self.inner.push(tx);
}
fn reorder(&mut self, base_fee: u64) {
self.inner.sort_by(|a, b| {
a.effective_tip_per_gas(base_fee)
.expect("exists")
.cmp(&b.effective_tip_per_gas(base_fee).expect("exists"))
})
}
}
/// This implementation appends the transactions and uses [`Vec::sort_unstable_by`] function for
/// sorting.
#[derive(Default)]
pub(crate) struct VecTxPoolSortUnstable {
inner: Vec<MockTransaction>,
}
impl BenchTxPool for VecTxPoolSortUnstable {
fn add_transaction(&mut self, tx: MockTransaction) {
self.inner.push(tx);
}
fn reorder(&mut self, base_fee: u64) {
self.inner.sort_unstable_by(|a, b| {
a.effective_tip_per_gas(base_fee)
.expect("exists")
.cmp(&b.effective_tip_per_gas(base_fee).expect("exists"))
})
}
}
struct MockTransactionWithPriority {
tx: MockTransaction,
priority: u128,
}
impl PartialEq for MockTransactionWithPriority {
fn eq(&self, other: &Self) -> bool {
self.priority == other.priority
}
}
impl Eq for MockTransactionWithPriority {}
impl PartialOrd for MockTransactionWithPriority {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl Ord for MockTransactionWithPriority {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.priority.cmp(&other.priority)
}
}
/// This implementation uses `BinaryHeap` which is drained and reconstructed on each reordering.
#[derive(Default)]
pub(crate) struct BinaryHeapTxPool {
inner: BinaryHeap<MockTransactionWithPriority>,
base_fee: Option<u64>,
}
impl BenchTxPool for BinaryHeapTxPool {
fn add_transaction(&mut self, tx: MockTransaction) {
let priority = self
.base_fee
.as_ref()
.map(|bf| tx.effective_tip_per_gas(*bf).expect("set"))
.unwrap_or_default();
self.inner.push(MockTransactionWithPriority { tx, priority });
}
fn reorder(&mut self, base_fee: u64) {
self.base_fee = Some(base_fee);
let drained = self.inner.drain();
self.inner = drained
.map(|mock| {
let priority = mock.tx.effective_tip_per_gas(base_fee).expect("set");
MockTransactionWithPriority { tx: mock.tx, priority }
})
.collect();
}
}
}
criterion_group!(reorder, txpool_reordering);
criterion_main!(reorder);
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/transaction-pool/benches/priority.rs | crates/transaction-pool/benches/priority.rs | #![allow(missing_docs)]
use criterion::{
criterion_group, criterion_main, measurement::WallTime, BenchmarkGroup, Criterion,
};
use proptest::{prelude::*, strategy::ValueTree, test_runner::TestRunner};
use reth_transaction_pool::{blob_tx_priority, fee_delta};
use std::hint::black_box;
fn generate_test_data_fee_delta() -> (u128, u128) {
let mut runner = TestRunner::deterministic();
prop::arbitrary::any::<(u128, u128)>().new_tree(&mut runner).unwrap().current()
}
fn generate_test_data_priority() -> (u128, u128, u128, u128) {
let mut runner = TestRunner::deterministic();
prop::arbitrary::any::<(u128, u128, u128, u128)>().new_tree(&mut runner).unwrap().current()
}
fn priority_bench(
group: &mut BenchmarkGroup<'_, WallTime>,
description: &str,
input_data: (u128, u128, u128, u128),
) {
let group_id = format!("txpool | {description}");
group.bench_function(group_id, |b| {
b.iter(|| {
black_box(blob_tx_priority(
black_box(input_data.0),
black_box(input_data.1),
black_box(input_data.2),
black_box(input_data.3),
));
});
});
}
fn fee_jump_bench(
group: &mut BenchmarkGroup<'_, WallTime>,
description: &str,
input_data: (u128, u128),
) {
let group_id = format!("txpool | {description}");
group.bench_function(group_id, |b| {
b.iter(|| {
black_box(fee_delta(black_box(input_data.0), black_box(input_data.1)));
});
});
}
fn blob_priority_calculation(c: &mut Criterion) {
let mut group = c.benchmark_group("Blob priority calculation");
let fee_jump_input = generate_test_data_fee_delta();
// Unstable sorting of unsorted collection
fee_jump_bench(&mut group, "BenchmarkDynamicFeeJumpCalculation", fee_jump_input);
let blob_priority_input = generate_test_data_priority();
// BinaryHeap that is resorted on each update
priority_bench(&mut group, "BenchmarkPriorityCalculation", blob_priority_input);
}
criterion_group!(priority, blob_priority_calculation);
criterion_main!(priority);
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/transaction-pool/benches/insertion.rs | crates/transaction-pool/benches/insertion.rs | #![allow(missing_docs)]
use alloy_primitives::Address;
use criterion::{criterion_group, criterion_main, Criterion};
use proptest::{prelude::*, strategy::ValueTree, test_runner::TestRunner};
use reth_transaction_pool::{
batcher::{BatchTxProcessor, BatchTxRequest},
test_utils::{testing_pool, MockTransaction},
TransactionOrigin, TransactionPool,
};
use tokio::sync::oneshot;
/// Generates a set of transactions for multiple senders
fn generate_transactions(num_senders: usize, txs_per_sender: usize) -> Vec<MockTransaction> {
let mut runner = TestRunner::deterministic();
let mut txs = Vec::new();
for sender_idx in 0..num_senders {
// Create a unique sender address
let sender_bytes = sender_idx.to_be_bytes();
let addr_slice = [0u8; 12].into_iter().chain(sender_bytes.into_iter()).collect::<Vec<_>>();
let sender = Address::from_slice(&addr_slice);
// Generate transactions for this sender
for nonce in 0..txs_per_sender {
let mut tx = any::<MockTransaction>().new_tree(&mut runner).unwrap().current();
tx.set_sender(sender);
tx.set_nonce(nonce as u64);
// Ensure it's not a legacy transaction
if tx.is_legacy() || tx.is_eip2930() {
tx = MockTransaction::eip1559();
tx.set_priority_fee(any::<u128>().new_tree(&mut runner).unwrap().current());
tx.set_max_fee(any::<u128>().new_tree(&mut runner).unwrap().current());
tx.set_sender(sender);
tx.set_nonce(nonce as u64);
}
txs.push(tx);
}
}
txs
}
/// Benchmark individual transaction insertion
fn txpool_insertion(c: &mut Criterion) {
let mut group = c.benchmark_group("Txpool insertion");
let scenarios = [(1000, 100), (5000, 500), (10000, 1000), (20000, 2000)];
for (tx_count, sender_count) in scenarios {
let group_id = format!("txs: {tx_count} | senders: {sender_count}");
group.bench_function(group_id, |b| {
b.iter_with_setup(
|| {
let rt = tokio::runtime::Runtime::new().unwrap();
let pool = testing_pool();
let txs = generate_transactions(tx_count, sender_count);
(rt, pool, txs)
},
|(rt, pool, txs)| {
rt.block_on(async {
for tx in &txs {
let _ =
pool.add_transaction(TransactionOrigin::Local, tx.clone()).await;
}
});
},
);
});
}
group.finish();
}
/// Benchmark batch transaction insertion
fn txpool_batch_insertion(c: &mut Criterion) {
let mut group = c.benchmark_group("Txpool batch insertion");
let scenarios = [(1000, 100), (5000, 500), (10000, 1000), (20000, 2000)];
for (tx_count, sender_count) in scenarios {
let group_id = format!("txs: {tx_count} | senders: {sender_count}");
group.bench_function(group_id, |b| {
b.iter_with_setup(
|| {
let rt = tokio::runtime::Runtime::new().unwrap();
let pool = testing_pool();
let txs = generate_transactions(tx_count, sender_count);
let (processor, request_tx) = BatchTxProcessor::new(pool, tx_count);
let processor_handle = rt.spawn(processor);
let mut batch_requests = Vec::with_capacity(tx_count);
let mut response_futures = Vec::with_capacity(tx_count);
for tx in txs {
let (response_tx, response_rx) = oneshot::channel();
let request = BatchTxRequest::new(tx, response_tx);
batch_requests.push(request);
response_futures.push(response_rx);
}
(rt, request_tx, processor_handle, batch_requests, response_futures)
},
|(rt, request_tx, _processor_handle, batch_requests, response_futures)| {
rt.block_on(async {
// Send all transactions
for request in batch_requests {
request_tx.send(request).unwrap();
}
for response_rx in response_futures {
let _res = response_rx.await.unwrap();
}
});
},
);
});
}
group.finish();
}
criterion_group! {
name = insertion;
config = Criterion::default();
targets = txpool_insertion, txpool_batch_insertion
}
criterion_main!(insertion);
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/transaction-pool/benches/truncate.rs | crates/transaction-pool/benches/truncate.rs | #![allow(missing_docs)]
use alloy_primitives::Address;
use criterion::{
criterion_group, criterion_main, measurement::WallTime, BenchmarkGroup, Criterion,
};
use proptest::{prelude::*, strategy::ValueTree, test_runner::TestRunner};
use reth_transaction_pool::{
pool::{BasefeeOrd, BlobTransactions, ParkedPool, PendingPool, QueuedOrd},
test_utils::{MockOrdering, MockTransaction, MockTransactionFactory},
SubPoolLimit,
};
/// Generates a set of `depth` dependent transactions, with the specified sender. Its values are
/// generated using [Arbitrary].
fn create_transactions_for_sender(
runner: &mut TestRunner,
sender: Address,
depth: usize,
only_eip4844: bool,
) -> Vec<MockTransaction> {
// assert that depth is always greater than zero, since empty vecs do not really make sense in
// this context
assert!(depth > 0);
if only_eip4844 {
return prop::collection::vec(
any::<MockTransaction>().prop_filter("only eip4844", |tx| tx.is_eip4844()),
depth,
)
.new_tree(runner)
.unwrap()
.current();
}
// make sure these are all post-eip-1559 transactions
let mut txs =
prop::collection::vec(any::<MockTransaction>(), depth).new_tree(runner).unwrap().current();
for (nonce, tx) in txs.iter_mut().enumerate() {
// reject pre-eip1559 tx types, if there is a legacy tx, replace it with an eip1559 tx
if tx.is_legacy() || tx.is_eip2930() {
*tx = MockTransaction::eip1559();
// set fee values using arbitrary
tx.set_priority_fee(any::<u128>().new_tree(runner).unwrap().current());
tx.set_max_fee(any::<u128>().new_tree(runner).unwrap().current());
}
tx.set_sender(sender);
tx.set_nonce(nonce as u64);
}
txs
}
/// Generates many transactions, each with a different sender. The number of transactions per
/// sender is generated using [Arbitrary]. The number of senders is specified by `senders`.
///
/// Because this uses [Arbitrary], the number of transactions per sender needs to be bounded. This
/// is done by using the `max_depth` parameter.
///
/// This uses [`create_transactions_for_sender`] to generate the transactions.
fn generate_many_transactions(
senders: usize,
max_depth: usize,
only_eip4844: bool,
) -> Vec<MockTransaction> {
let mut runner = TestRunner::deterministic();
let mut txs = Vec::with_capacity(senders);
for idx in 0..senders {
// modulo max_depth so we know it is bounded, plus one so the minimum is always 1
let depth = any::<usize>().new_tree(&mut runner).unwrap().current() % max_depth + 1;
// set sender to an Address determined by the sender index. This should make any necessary
// debugging easier.
let idx_slice = idx.to_be_bytes();
// pad with 12 bytes of zeros before rest
let addr_slice = [0u8; 12].into_iter().chain(idx_slice.into_iter()).collect::<Vec<_>>();
let sender = Address::from_slice(&addr_slice);
txs.extend(create_transactions_for_sender(&mut runner, sender, depth, only_eip4844));
}
txs
}
/// Benchmarks all pool types for the truncate function.
fn benchmark_pools(group: &mut BenchmarkGroup<'_, WallTime>, senders: usize, max_depth: usize) {
println!(
"Generating transactions for benchmark with {senders} unique senders and a max depth of {max_depth}..."
);
let txs = generate_many_transactions(senders, max_depth, false);
// benchmark parked pool
truncate_basefee(group, "BasefeePool", txs.clone(), senders, max_depth);
// benchmark pending pool
truncate_pending(group, "PendingPool", txs.clone(), senders, max_depth);
// benchmark queued pool
truncate_queued(group, "QueuedPool", txs, senders, max_depth);
let blob_txs = generate_many_transactions(senders, max_depth, true);
truncate_blob(group, "BlobPool", blob_txs, senders, max_depth);
}
fn txpool_truncate(c: &mut Criterion) {
let mut group = c.benchmark_group("Transaction Pool Truncate");
// the first few benchmarks (5, 10, 20, 100) should cause the txpool to hit the max tx limit,
// so they are there to make sure we do not regress on best-case performance.
//
// the last few benchmarks (1000, 2000) should hit the max tx limit, at least for large enough
// depth, so these should benchmark closer to real-world performance
for senders in [5, 10, 20, 100, 1000, 2000] {
// the max we'll be benching is 20, because MAX_ACCOUNT_SLOTS so far is 16. So 20 should be
// a reasonable worst-case benchmark
for max_depth in [1, 5, 10, 20] {
benchmark_pools(&mut group, senders, max_depth);
}
}
let large_senders = 5000;
let max_depth = 16;
// let's run a benchmark that includes a large number of senders and max_depth of 16 to ensure
// we hit the TXPOOL_SUBPOOL_MAX_TXS_DEFAULT limit, which is currently 10k
benchmark_pools(&mut group, large_senders, max_depth);
// now we'll run a more realistic benchmark, with max depth of 1 and 15000 senders
let realistic_senders = 15000;
let realistic_max_depth = 1;
benchmark_pools(&mut group, realistic_senders, realistic_max_depth);
}
fn truncate_blob(
group: &mut BenchmarkGroup<'_, WallTime>,
description: &str,
seed: Vec<MockTransaction>,
senders: usize,
max_depth: usize,
) {
let setup = || {
let mut txpool = BlobTransactions::default();
let mut f = MockTransactionFactory::default();
for tx in &seed {
txpool.add_transaction(f.validated_arc(tx.clone()))
}
txpool
};
let group_id = format!(
"txpool | total txs: {} | total senders: {} | max depth: {} | {}",
seed.len(),
senders,
max_depth,
description,
);
// for now we just use the default SubPoolLimit
group.bench_function(group_id, |b| {
b.iter_with_setup(setup, |mut txpool| {
txpool.truncate_pool(SubPoolLimit::default());
txpool
});
});
}
fn truncate_pending(
group: &mut BenchmarkGroup<'_, WallTime>,
description: &str,
seed: Vec<MockTransaction>,
senders: usize,
max_depth: usize,
) {
let setup = || {
let mut txpool = PendingPool::new(MockOrdering::default());
let mut f = MockTransactionFactory::default();
for tx in &seed {
// add transactions with a basefee of zero, so they are not immediately removed
txpool.add_transaction(f.validated_arc(tx.clone()), 0);
}
txpool
};
let group_id = format!(
"txpool | total txs: {} | total senders: {} | max depth: {} | {}",
seed.len(),
senders,
max_depth,
description,
);
// for now we just use the default SubPoolLimit
group.bench_function(group_id, |b| {
b.iter_with_setup(setup, |mut txpool| {
txpool.truncate_pool(SubPoolLimit::default());
txpool
});
});
}
fn truncate_queued(
group: &mut BenchmarkGroup<'_, WallTime>,
description: &str,
seed: Vec<MockTransaction>,
senders: usize,
max_depth: usize,
) {
let setup = || {
let mut txpool = ParkedPool::<QueuedOrd<_>>::default();
let mut f = MockTransactionFactory::default();
for tx in &seed {
txpool.add_transaction(f.validated_arc(tx.clone()));
}
txpool
};
let group_id = format!(
"txpool | total txs: {} | total senders: {} | max depth: {} | {}",
seed.len(),
senders,
max_depth,
description,
);
// for now we just use the default SubPoolLimit
group.bench_function(group_id, |b| {
b.iter_with_setup(setup, |mut txpool| {
txpool.truncate_pool(SubPoolLimit::default());
txpool
});
});
}
fn truncate_basefee(
group: &mut BenchmarkGroup<'_, WallTime>,
description: &str,
seed: Vec<MockTransaction>,
senders: usize,
max_depth: usize,
) {
let setup = || {
let mut txpool = ParkedPool::<BasefeeOrd<_>>::default();
let mut f = MockTransactionFactory::default();
for tx in &seed {
txpool.add_transaction(f.validated_arc(tx.clone()));
}
txpool
};
let group_id = format!(
"txpool | total txs: {} | total senders: {} | max depth: {} | {}",
seed.len(),
senders,
max_depth,
description,
);
// for now we just use the default SubPoolLimit
group.bench_function(group_id, |b| {
b.iter_with_setup(setup, |mut txpool| {
txpool.truncate_pool(SubPoolLimit::default());
txpool
});
});
}
criterion_group! {
name = truncate;
config = Criterion::default();
targets = txpool_truncate
}
criterion_main!(truncate);
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/chain-state/src/chain_info.rs | crates/chain-state/src/chain_info.rs | use alloy_consensus::BlockHeader;
use alloy_eips::BlockNumHash;
use alloy_primitives::BlockNumber;
use parking_lot::RwLock;
use reth_chainspec::ChainInfo;
use reth_primitives_traits::{NodePrimitives, SealedHeader};
use std::{
sync::{
atomic::{AtomicU64, Ordering},
Arc,
},
time::Instant,
};
use tokio::sync::watch;
/// Tracks the chain info: canonical head, safe block, finalized block.
#[derive(Debug, Clone)]
pub struct ChainInfoTracker<N: NodePrimitives> {
inner: Arc<ChainInfoInner<N>>,
}
impl<N> ChainInfoTracker<N>
where
N: NodePrimitives,
N::BlockHeader: BlockHeader,
{
/// Create a new chain info container for the given canonical head and finalized header if it
/// exists.
pub fn new(
head: SealedHeader<N::BlockHeader>,
finalized: Option<SealedHeader<N::BlockHeader>>,
safe: Option<SealedHeader<N::BlockHeader>>,
) -> Self {
let (finalized_block, _) = watch::channel(finalized);
let (safe_block, _) = watch::channel(safe);
Self {
inner: Arc::new(ChainInfoInner {
last_forkchoice_update: RwLock::new(None),
canonical_head_number: AtomicU64::new(head.number()),
canonical_head: RwLock::new(head),
safe_block,
finalized_block,
}),
}
}
/// Returns the [`ChainInfo`] for the canonical head.
pub fn chain_info(&self) -> ChainInfo {
let inner = self.inner.canonical_head.read();
ChainInfo { best_hash: inner.hash(), best_number: inner.number() }
}
/// Update the timestamp when we received a forkchoice update.
pub fn on_forkchoice_update_received(&self) {
self.inner.last_forkchoice_update.write().replace(Instant::now());
}
/// Returns the instant when we received the latest forkchoice update.
pub fn last_forkchoice_update_received_at(&self) -> Option<Instant> {
*self.inner.last_forkchoice_update.read()
}
/// Returns the canonical head of the chain.
pub fn get_canonical_head(&self) -> SealedHeader<N::BlockHeader> {
self.inner.canonical_head.read().clone()
}
/// Returns the safe header of the chain.
pub fn get_safe_header(&self) -> Option<SealedHeader<N::BlockHeader>> {
self.inner.safe_block.borrow().clone()
}
/// Returns the finalized header of the chain.
pub fn get_finalized_header(&self) -> Option<SealedHeader<N::BlockHeader>> {
self.inner.finalized_block.borrow().clone()
}
/// Returns the canonical head of the chain.
pub fn get_canonical_num_hash(&self) -> BlockNumHash {
self.inner.canonical_head.read().num_hash()
}
/// Returns the canonical head of the chain.
pub fn get_canonical_block_number(&self) -> BlockNumber {
self.inner.canonical_head_number.load(Ordering::Relaxed)
}
/// Returns the safe header of the chain.
pub fn get_safe_num_hash(&self) -> Option<BlockNumHash> {
self.inner.safe_block.borrow().as_ref().map(SealedHeader::num_hash)
}
/// Returns the finalized header of the chain.
pub fn get_finalized_num_hash(&self) -> Option<BlockNumHash> {
self.inner.finalized_block.borrow().as_ref().map(SealedHeader::num_hash)
}
/// Sets the canonical head of the chain.
pub fn set_canonical_head(&self, header: SealedHeader<N::BlockHeader>) {
let number = header.number();
*self.inner.canonical_head.write() = header;
// also update the atomic number.
self.inner.canonical_head_number.store(number, Ordering::Relaxed);
}
/// Sets the safe header of the chain.
pub fn set_safe(&self, header: SealedHeader<N::BlockHeader>) {
self.inner.safe_block.send_if_modified(|current_header| {
if current_header.as_ref().map(SealedHeader::hash) != Some(header.hash()) {
let _ = current_header.replace(header);
return true
}
false
});
}
/// Sets the finalized header of the chain.
pub fn set_finalized(&self, header: SealedHeader<N::BlockHeader>) {
self.inner.finalized_block.send_if_modified(|current_header| {
if current_header.as_ref().map(SealedHeader::hash) != Some(header.hash()) {
let _ = current_header.replace(header);
return true
}
false
});
}
/// Subscribe to the finalized block.
pub fn subscribe_finalized_block(
&self,
) -> watch::Receiver<Option<SealedHeader<N::BlockHeader>>> {
self.inner.finalized_block.subscribe()
}
/// Subscribe to the safe block.
pub fn subscribe_safe_block(&self) -> watch::Receiver<Option<SealedHeader<N::BlockHeader>>> {
self.inner.safe_block.subscribe()
}
}
/// Container type for all chain info fields
#[derive(Debug)]
struct ChainInfoInner<N: NodePrimitives = reth_ethereum_primitives::EthPrimitives> {
/// Timestamp when we received the last fork choice update.
///
/// This is mainly used to track if we're connected to a beacon node.
last_forkchoice_update: RwLock<Option<Instant>>,
/// Tracks the number of the `canonical_head`.
canonical_head_number: AtomicU64,
/// The canonical head of the chain.
canonical_head: RwLock<SealedHeader<N::BlockHeader>>,
/// The block that the beacon node considers safe.
safe_block: watch::Sender<Option<SealedHeader<N::BlockHeader>>>,
/// The block that the beacon node considers finalized.
finalized_block: watch::Sender<Option<SealedHeader<N::BlockHeader>>>,
}
#[cfg(test)]
mod tests {
use super::*;
use reth_ethereum_primitives::EthPrimitives;
use reth_testing_utils::{generators, generators::random_header};
#[test]
fn test_chain_info() {
// Create a random header
let mut rng = generators::rng();
let header = random_header(&mut rng, 10, None);
// Create a new chain info tracker with the header
let tracker: ChainInfoTracker<EthPrimitives> =
ChainInfoTracker::new(header.clone(), None, None);
// Fetch the chain information from the tracker
let chain_info = tracker.chain_info();
// Verify that the chain information matches the header
assert_eq!(chain_info.best_number, header.number);
assert_eq!(chain_info.best_hash, header.hash());
}
#[test]
fn test_on_forkchoice_update_received() {
// Create a random block header
let mut rng = generators::rng();
let header = random_header(&mut rng, 10, None);
// Create a new chain info tracker with the header
let tracker: ChainInfoTracker<EthPrimitives> = ChainInfoTracker::new(header, None, None);
// Assert that there has been no forkchoice update yet (the timestamp is None)
assert!(tracker.last_forkchoice_update_received_at().is_none());
// Call the method to record the receipt of a forkchoice update
tracker.on_forkchoice_update_received();
// Assert that there is now a timestamp indicating when the forkchoice update was received
assert!(tracker.last_forkchoice_update_received_at().is_some());
}
#[test]
fn test_set_canonical_head() {
// Create a random number generator
let mut rng = generators::rng();
// Generate two random headers for testing
let header1 = random_header(&mut rng, 10, None);
let header2 = random_header(&mut rng, 20, None);
// Create a new chain info tracker with the first header
let tracker: ChainInfoTracker<EthPrimitives> = ChainInfoTracker::new(header1, None, None);
// Set the second header as the canonical head of the tracker
tracker.set_canonical_head(header2.clone());
// Assert that the tracker now uses the second header as its canonical head
let canonical_head = tracker.get_canonical_head();
assert_eq!(canonical_head, header2);
}
#[test]
fn test_set_safe() {
// Create a random number generator
let mut rng = generators::rng();
// Case 1: basic test
// Generate two random headers for the test
let header1 = random_header(&mut rng, 10, None);
let header2 = random_header(&mut rng, 20, None);
// Create a new chain info tracker with the first header (header1)
let tracker: ChainInfoTracker<EthPrimitives> = ChainInfoTracker::new(header1, None, None);
// Call the set_safe method with the second header (header2)
tracker.set_safe(header2.clone());
// Verify that the tracker now has header2 as the safe block
let safe_header = tracker.get_safe_header();
assert!(safe_header.is_some()); // Ensure a safe header is present
let safe_header = safe_header.unwrap();
assert_eq!(safe_header, header2);
// Case 2: call with the same header as the current safe block
// Call set_safe again with the same header (header2)
tracker.set_safe(header2.clone());
// Verify that nothing changes and the safe header remains the same
let same_safe_header = tracker.get_safe_header();
assert!(same_safe_header.is_some());
let same_safe_header = same_safe_header.unwrap();
assert_eq!(same_safe_header, header2);
// Case 3: call with a different (new) header
// Generate a third header with a higher block number
let header3 = random_header(&mut rng, 30, None);
// Call set_safe with this new header (header3)
tracker.set_safe(header3.clone());
// Verify that the safe header is updated with the new header
let updated_safe_header = tracker.get_safe_header();
assert!(updated_safe_header.is_some());
let updated_safe_header = updated_safe_header.unwrap();
assert_eq!(updated_safe_header, header3);
}
#[test]
fn test_set_finalized() {
// Create a random number generator
let mut rng = generators::rng();
// Generate random headers for testing
let header1 = random_header(&mut rng, 10, None);
let header2 = random_header(&mut rng, 20, None);
let header3 = random_header(&mut rng, 30, None);
// Create a new chain info tracker with the first header
let tracker: ChainInfoTracker<EthPrimitives> = ChainInfoTracker::new(header1, None, None);
// Initial state: finalize header should be None
assert!(tracker.get_finalized_header().is_none());
// Set the second header as the finalized header
tracker.set_finalized(header2.clone());
// Assert that the tracker now uses the second header as its finalized block
let finalized_header = tracker.get_finalized_header();
assert!(finalized_header.is_some());
let finalized_header = finalized_header.unwrap();
assert_eq!(finalized_header, header2);
// Case 2: attempt to set the same finalized header again
tracker.set_finalized(header2.clone());
// The finalized header should remain unchanged
let unchanged_finalized_header = tracker.get_finalized_header();
assert_eq!(unchanged_finalized_header.unwrap(), header2); // Should still be header2
// Case 3: set a higher block number as finalized
tracker.set_finalized(header3.clone());
// The finalized header should now be updated to header3
let updated_finalized_header = tracker.get_finalized_header();
assert!(updated_finalized_header.is_some());
assert_eq!(updated_finalized_header.unwrap(), header3);
}
#[test]
fn test_get_finalized_num_hash() {
// Create a random header
let mut rng = generators::rng();
let finalized_header = random_header(&mut rng, 10, None);
// Create a new chain info tracker with the finalized header
let tracker: ChainInfoTracker<EthPrimitives> =
ChainInfoTracker::new(finalized_header.clone(), Some(finalized_header.clone()), None);
// Assert that the BlockNumHash returned matches the finalized header
assert_eq!(tracker.get_finalized_num_hash(), Some(finalized_header.num_hash()));
}
#[test]
fn test_get_safe_num_hash() {
// Create a random header
let mut rng = generators::rng();
let safe_header = random_header(&mut rng, 10, None);
// Create a new chain info tracker with the safe header
let tracker: ChainInfoTracker<EthPrimitives> =
ChainInfoTracker::new(safe_header.clone(), None, None);
tracker.set_safe(safe_header.clone());
// Assert that the BlockNumHash returned matches the safe header
assert_eq!(tracker.get_safe_num_hash(), Some(safe_header.num_hash()));
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/chain-state/src/lib.rs | crates/chain-state/src/lib.rs | //! Reth state related types and functionality.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
use revm_state as _;
mod in_memory;
pub use in_memory::*;
mod noop;
mod chain_info;
pub use chain_info::ChainInfoTracker;
mod notifications;
pub use notifications::{
CanonStateNotification, CanonStateNotificationSender, CanonStateNotificationStream,
CanonStateNotifications, CanonStateSubscriptions, ForkChoiceNotifications, ForkChoiceStream,
ForkChoiceSubscriptions,
};
mod memory_overlay;
pub use memory_overlay::{MemoryOverlayStateProvider, MemoryOverlayStateProviderRef};
#[cfg(any(test, feature = "test-utils"))]
/// Common test helpers
pub mod test_utils;
// todo: remove when generic data prim integration complete
pub use reth_ethereum_primitives::EthPrimitives;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/chain-state/src/noop.rs | crates/chain-state/src/noop.rs | //! Noop impls for testing.
use crate::{
CanonStateNotifications, CanonStateSubscriptions, ForkChoiceNotifications,
ForkChoiceSubscriptions,
};
use reth_primitives_traits::NodePrimitives;
use reth_storage_api::noop::NoopProvider;
use tokio::sync::{broadcast, watch};
impl<C: Send + Sync, N: NodePrimitives> CanonStateSubscriptions for NoopProvider<C, N> {
fn subscribe_to_canonical_state(&self) -> CanonStateNotifications<N> {
broadcast::channel(1).1
}
}
impl<C: Send + Sync, N: NodePrimitives> ForkChoiceSubscriptions for NoopProvider<C, N> {
type Header = N::BlockHeader;
fn subscribe_safe_block(&self) -> ForkChoiceNotifications<N::BlockHeader> {
let (_, rx) = watch::channel(None);
ForkChoiceNotifications(rx)
}
fn subscribe_finalized_block(&self) -> ForkChoiceNotifications<N::BlockHeader> {
let (_, rx) = watch::channel(None);
ForkChoiceNotifications(rx)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/chain-state/src/test_utils.rs | crates/chain-state/src/test_utils.rs | use crate::{
in_memory::ExecutedBlockWithTrieUpdates, CanonStateNotification, CanonStateNotifications,
CanonStateSubscriptions, ExecutedTrieUpdates,
};
use alloy_consensus::{Header, SignableTransaction, TxEip1559, TxReceipt, EMPTY_ROOT_HASH};
use alloy_eips::{
eip1559::{ETHEREUM_BLOCK_GAS_LIMIT_30M, INITIAL_BASE_FEE},
eip7685::Requests,
};
use alloy_primitives::{Address, BlockNumber, B256, U256};
use alloy_signer::SignerSync;
use alloy_signer_local::PrivateKeySigner;
use core::marker::PhantomData;
use rand::Rng;
use reth_chainspec::{ChainSpec, EthereumHardfork, MIN_TRANSACTION_GAS};
use reth_ethereum_primitives::{
Block, BlockBody, EthPrimitives, Receipt, Transaction, TransactionSigned,
};
use reth_execution_types::{Chain, ExecutionOutcome};
use reth_primitives_traits::{
proofs::{calculate_receipt_root, calculate_transaction_root, calculate_withdrawals_root},
Account, NodePrimitives, Recovered, RecoveredBlock, SealedBlock, SealedHeader,
SignedTransaction,
};
use reth_storage_api::NodePrimitivesProvider;
use reth_trie::{root::state_root_unhashed, HashedPostState};
use revm_database::BundleState;
use revm_state::AccountInfo;
use std::{
collections::HashMap,
ops::Range,
sync::{Arc, Mutex},
};
use tokio::sync::broadcast::{self, Sender};
/// Functionality to build blocks for tests and help with assertions about
/// their execution.
#[derive(Debug)]
pub struct TestBlockBuilder<N: NodePrimitives = EthPrimitives> {
/// The account that signs all the block's transactions.
pub signer: Address,
/// Private key for signing.
pub signer_pk: PrivateKeySigner,
/// Keeps track of signer's account info after execution, will be updated in
/// methods related to block execution.
pub signer_execute_account_info: AccountInfo,
/// Keeps track of signer's nonce, will be updated in methods related
/// to block execution.
pub signer_build_account_info: AccountInfo,
/// Chain spec of the blocks generated by this builder
pub chain_spec: ChainSpec,
_prims: PhantomData<N>,
}
impl<N: NodePrimitives> Default for TestBlockBuilder<N> {
fn default() -> Self {
let initial_account_info = AccountInfo::from_balance(U256::from(10).pow(U256::from(18)));
let signer_pk = PrivateKeySigner::random();
let signer = signer_pk.address();
Self {
chain_spec: ChainSpec::default(),
signer,
signer_pk,
signer_execute_account_info: initial_account_info.clone(),
signer_build_account_info: initial_account_info,
_prims: PhantomData,
}
}
}
impl<N: NodePrimitives> TestBlockBuilder<N> {
/// Signer pk setter.
pub fn with_signer_pk(mut self, signer_pk: PrivateKeySigner) -> Self {
self.signer = signer_pk.address();
self.signer_pk = signer_pk;
self
}
/// Chainspec setter.
pub fn with_chain_spec(mut self, chain_spec: ChainSpec) -> Self {
self.chain_spec = chain_spec;
self
}
/// Gas cost of a single transaction generated by the block builder.
pub fn single_tx_cost() -> U256 {
U256::from(INITIAL_BASE_FEE * MIN_TRANSACTION_GAS)
}
/// Generates a random [`RecoveredBlock`].
pub fn generate_random_block(
&mut self,
number: BlockNumber,
parent_hash: B256,
) -> RecoveredBlock<reth_ethereum_primitives::Block> {
let mut rng = rand::rng();
let mock_tx = |nonce: u64| -> Recovered<_> {
let tx = Transaction::Eip1559(TxEip1559 {
chain_id: self.chain_spec.chain.id(),
nonce,
gas_limit: MIN_TRANSACTION_GAS,
to: Address::random().into(),
max_fee_per_gas: INITIAL_BASE_FEE as u128,
max_priority_fee_per_gas: 1,
..Default::default()
});
let signature_hash = tx.signature_hash();
let signature = self.signer_pk.sign_hash_sync(&signature_hash).unwrap();
TransactionSigned::new_unhashed(tx, signature).with_signer(self.signer)
};
let num_txs = rng.random_range(0..5);
let signer_balance_decrease = Self::single_tx_cost() * U256::from(num_txs);
let transactions: Vec<Recovered<_>> = (0..num_txs)
.map(|_| {
let tx = mock_tx(self.signer_build_account_info.nonce);
self.signer_build_account_info.nonce += 1;
self.signer_build_account_info.balance -= signer_balance_decrease;
tx
})
.collect();
let receipts = transactions
.iter()
.enumerate()
.map(|(idx, tx)| {
Receipt {
tx_type: tx.tx_type(),
success: true,
cumulative_gas_used: (idx as u64 + 1) * MIN_TRANSACTION_GAS,
..Default::default()
}
.into_with_bloom()
})
.collect::<Vec<_>>();
let initial_signer_balance = U256::from(10).pow(U256::from(18));
let header = Header {
number,
parent_hash,
gas_used: transactions.len() as u64 * MIN_TRANSACTION_GAS,
mix_hash: B256::random(),
gas_limit: ETHEREUM_BLOCK_GAS_LIMIT_30M,
base_fee_per_gas: Some(INITIAL_BASE_FEE),
transactions_root: calculate_transaction_root(
&transactions.clone().into_iter().map(|tx| tx.into_inner()).collect::<Vec<_>>(),
),
receipts_root: calculate_receipt_root(&receipts),
beneficiary: Address::random(),
state_root: state_root_unhashed(HashMap::from([(
self.signer,
Account {
balance: initial_signer_balance - signer_balance_decrease,
nonce: num_txs,
..Default::default()
}
.into_trie_account(EMPTY_ROOT_HASH),
)])),
// use the number as the timestamp so it is monotonically increasing
timestamp: number +
EthereumHardfork::Cancun.activation_timestamp(self.chain_spec.chain).unwrap(),
withdrawals_root: Some(calculate_withdrawals_root(&[])),
blob_gas_used: Some(0),
excess_blob_gas: Some(0),
parent_beacon_block_root: Some(B256::random()),
..Default::default()
};
let block = SealedBlock::from_sealed_parts(
SealedHeader::seal_slow(header),
BlockBody {
transactions: transactions.into_iter().map(|tx| tx.into_inner()).collect(),
ommers: Vec::new(),
withdrawals: Some(vec![].into()),
},
);
RecoveredBlock::try_recover_sealed_with_senders(block, vec![self.signer; num_txs as usize])
.unwrap()
}
/// Creates a fork chain with the given base block.
pub fn create_fork(
&mut self,
base_block: &SealedBlock<Block>,
length: u64,
) -> Vec<RecoveredBlock<Block>> {
let mut fork = Vec::with_capacity(length as usize);
let mut parent = base_block.clone();
for _ in 0..length {
let block = self.generate_random_block(parent.number + 1, parent.hash());
parent = block.clone_sealed_block();
fork.push(block);
}
fork
}
/// Gets an [`ExecutedBlockWithTrieUpdates`] with [`BlockNumber`], receipts and parent hash.
fn get_executed_block(
&mut self,
block_number: BlockNumber,
receipts: Vec<Vec<Receipt>>,
parent_hash: B256,
) -> ExecutedBlockWithTrieUpdates {
let block_with_senders = self.generate_random_block(block_number, parent_hash);
let (block, senders) = block_with_senders.split_sealed();
ExecutedBlockWithTrieUpdates::new(
Arc::new(RecoveredBlock::new_sealed(block, senders)),
Arc::new(ExecutionOutcome::new(
BundleState::default(),
receipts,
block_number,
vec![Requests::default()],
)),
Arc::new(HashedPostState::default()),
ExecutedTrieUpdates::empty(),
)
}
/// Generates an [`ExecutedBlockWithTrieUpdates`] that includes the given receipts.
pub fn get_executed_block_with_receipts(
&mut self,
receipts: Vec<Vec<Receipt>>,
parent_hash: B256,
) -> ExecutedBlockWithTrieUpdates {
let number = rand::rng().random::<u64>();
self.get_executed_block(number, receipts, parent_hash)
}
/// Generates an [`ExecutedBlockWithTrieUpdates`] with the given [`BlockNumber`].
pub fn get_executed_block_with_number(
&mut self,
block_number: BlockNumber,
parent_hash: B256,
) -> ExecutedBlockWithTrieUpdates {
self.get_executed_block(block_number, vec![vec![]], parent_hash)
}
/// Generates a range of executed blocks with ascending block numbers.
pub fn get_executed_blocks(
&mut self,
range: Range<u64>,
) -> impl Iterator<Item = ExecutedBlockWithTrieUpdates> + '_ {
let mut parent_hash = B256::default();
range.map(move |number| {
let current_parent_hash = parent_hash;
let block = self.get_executed_block_with_number(number, current_parent_hash);
parent_hash = block.recovered_block().hash();
block
})
}
/// Returns the execution outcome for a block created with this builder.
/// In order to properly include the bundle state, the signer balance is
/// updated.
pub fn get_execution_outcome(
&mut self,
block: RecoveredBlock<reth_ethereum_primitives::Block>,
) -> ExecutionOutcome {
let num_txs = block.body().transactions.len() as u64;
let single_cost = Self::single_tx_cost();
let mut final_balance = self.signer_execute_account_info.balance;
for _ in 0..num_txs {
final_balance -= single_cost;
}
let final_nonce = self.signer_execute_account_info.nonce + num_txs;
let receipts = block
.body()
.transactions
.iter()
.enumerate()
.map(|(idx, tx)| Receipt {
tx_type: tx.tx_type(),
success: true,
cumulative_gas_used: (idx as u64 + 1) * MIN_TRANSACTION_GAS,
..Default::default()
})
.collect::<Vec<_>>();
let bundle_state = BundleState::builder(block.number..=block.number)
.state_present_account_info(
self.signer,
AccountInfo { nonce: final_nonce, balance: final_balance, ..Default::default() },
)
.build();
self.signer_execute_account_info.balance = final_balance;
self.signer_execute_account_info.nonce = final_nonce;
let execution_outcome =
ExecutionOutcome::new(bundle_state, vec![vec![]], block.number, Vec::new());
execution_outcome.with_receipts(vec![receipts])
}
}
impl TestBlockBuilder {
/// Creates a `TestBlockBuilder` configured for Ethereum primitives.
pub fn eth() -> Self {
Self::default()
}
}
/// A test `ChainEventSubscriptions`
#[derive(Clone, Debug, Default)]
pub struct TestCanonStateSubscriptions<N: NodePrimitives = reth_ethereum_primitives::EthPrimitives>
{
canon_notif_tx: Arc<Mutex<Vec<Sender<CanonStateNotification<N>>>>>,
}
impl TestCanonStateSubscriptions {
/// Adds new block commit to the queue that can be consumed with
/// [`TestCanonStateSubscriptions::subscribe_to_canonical_state`]
pub fn add_next_commit(&self, new: Arc<Chain>) {
let event = CanonStateNotification::Commit { new };
self.canon_notif_tx.lock().as_mut().unwrap().retain(|tx| tx.send(event.clone()).is_ok())
}
/// Adds reorg to the queue that can be consumed with
/// [`TestCanonStateSubscriptions::subscribe_to_canonical_state`]
pub fn add_next_reorg(&self, old: Arc<Chain>, new: Arc<Chain>) {
let event = CanonStateNotification::Reorg { old, new };
self.canon_notif_tx.lock().as_mut().unwrap().retain(|tx| tx.send(event.clone()).is_ok())
}
}
impl NodePrimitivesProvider for TestCanonStateSubscriptions {
type Primitives = EthPrimitives;
}
impl CanonStateSubscriptions for TestCanonStateSubscriptions {
/// Sets up a broadcast channel with a buffer size of 100.
fn subscribe_to_canonical_state(&self) -> CanonStateNotifications {
let (canon_notif_tx, canon_notif_rx) = broadcast::channel(100);
self.canon_notif_tx.lock().as_mut().unwrap().push(canon_notif_tx);
canon_notif_rx
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/chain-state/src/memory_overlay.rs | crates/chain-state/src/memory_overlay.rs | use super::ExecutedBlockWithTrieUpdates;
use alloy_consensus::BlockHeader;
use alloy_primitives::{keccak256, Address, BlockNumber, Bytes, StorageKey, B256};
use reth_errors::ProviderResult;
use reth_primitives_traits::{Account, Bytecode, NodePrimitives};
use reth_storage_api::{
AccountReader, BlockHashReader, BytecodeReader, HashedPostStateProvider, StateProofProvider,
StateProvider, StateRootProvider, StorageRootProvider,
};
use reth_trie::{
updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof,
MultiProofTargets, StorageMultiProof, TrieInput,
};
use revm_database::BundleState;
use std::sync::OnceLock;
use alloy_primitives::FlaggedStorage;
/// A state provider that stores references to in-memory blocks along with their state as well as a
/// reference of the historical state provider for fallback lookups.
#[expect(missing_debug_implementations)]
pub struct MemoryOverlayStateProviderRef<
'a,
N: NodePrimitives = reth_ethereum_primitives::EthPrimitives,
> {
/// Historical state provider for state lookups that are not found in memory blocks.
pub(crate) historical: Box<dyn StateProvider + 'a>,
/// The collection of executed parent blocks. Expected order is newest to oldest.
pub(crate) in_memory: Vec<ExecutedBlockWithTrieUpdates<N>>,
/// Lazy-loaded in-memory trie data.
pub(crate) trie_input: OnceLock<TrieInput>,
}
/// A state provider that stores references to in-memory blocks along with their state as well as
/// the historical state provider for fallback lookups.
pub type MemoryOverlayStateProvider<N> = MemoryOverlayStateProviderRef<'static, N>;
impl<'a, N: NodePrimitives> MemoryOverlayStateProviderRef<'a, N> {
/// Create new memory overlay state provider.
///
/// ## Arguments
///
/// - `in_memory` - the collection of executed ancestor blocks in reverse.
/// - `historical` - a historical state provider for the latest ancestor block stored in the
/// database.
pub fn new(
historical: Box<dyn StateProvider + 'a>,
in_memory: Vec<ExecutedBlockWithTrieUpdates<N>>,
) -> Self {
Self { historical, in_memory, trie_input: OnceLock::new() }
}
/// Turn this state provider into a state provider
pub fn boxed(self) -> Box<dyn StateProvider + 'a> {
Box::new(self)
}
/// Return lazy-loaded trie state aggregated from in-memory blocks.
fn trie_input(&self) -> &TrieInput {
self.trie_input.get_or_init(|| {
TrieInput::from_blocks(
self.in_memory
.iter()
.rev()
.map(|block| (block.hashed_state.as_ref(), block.trie.as_ref())),
)
})
}
}
impl<N: NodePrimitives> BlockHashReader for MemoryOverlayStateProviderRef<'_, N> {
fn block_hash(&self, number: BlockNumber) -> ProviderResult<Option<B256>> {
for block in &self.in_memory {
if block.recovered_block().number() == number {
return Ok(Some(block.recovered_block().hash()));
}
}
self.historical.block_hash(number)
}
fn canonical_hashes_range(
&self,
start: BlockNumber,
end: BlockNumber,
) -> ProviderResult<Vec<B256>> {
let range = start..end;
let mut earliest_block_number = None;
let mut in_memory_hashes = Vec::new();
for block in &self.in_memory {
if range.contains(&block.recovered_block().number()) {
in_memory_hashes.push(block.recovered_block().hash());
earliest_block_number = Some(block.recovered_block().number());
}
}
// `self.in_memory` stores executed blocks in ascending order (oldest to newest).
// However, `in_memory_hashes` should be constructed in descending order (newest to oldest),
// so we reverse the vector after collecting the hashes.
in_memory_hashes.reverse();
let mut hashes =
self.historical.canonical_hashes_range(start, earliest_block_number.unwrap_or(end))?;
hashes.append(&mut in_memory_hashes);
Ok(hashes)
}
}
impl<N: NodePrimitives> AccountReader for MemoryOverlayStateProviderRef<'_, N> {
fn basic_account(&self, address: &Address) -> ProviderResult<Option<Account>> {
for block in &self.in_memory {
if let Some(account) = block.execution_output.account(address) {
return Ok(account);
}
}
self.historical.basic_account(address)
}
}
impl<N: NodePrimitives> StateRootProvider for MemoryOverlayStateProviderRef<'_, N> {
fn state_root(&self, state: HashedPostState) -> ProviderResult<B256> {
self.state_root_from_nodes(TrieInput::from_state(state))
}
fn state_root_from_nodes(&self, mut input: TrieInput) -> ProviderResult<B256> {
input.prepend_self(self.trie_input().clone());
self.historical.state_root_from_nodes(input)
}
fn state_root_with_updates(
&self,
state: HashedPostState,
) -> ProviderResult<(B256, TrieUpdates)> {
self.state_root_from_nodes_with_updates(TrieInput::from_state(state))
}
fn state_root_from_nodes_with_updates(
&self,
mut input: TrieInput,
) -> ProviderResult<(B256, TrieUpdates)> {
input.prepend_self(self.trie_input().clone());
self.historical.state_root_from_nodes_with_updates(input)
}
}
impl<N: NodePrimitives> StorageRootProvider for MemoryOverlayStateProviderRef<'_, N> {
// TODO: Currently this does not reuse available in-memory trie nodes.
fn storage_root(&self, address: Address, storage: HashedStorage) -> ProviderResult<B256> {
let state = &self.trie_input().state;
let mut hashed_storage =
state.storages.get(&keccak256(address)).cloned().unwrap_or_default();
hashed_storage.extend(&storage);
self.historical.storage_root(address, hashed_storage)
}
// TODO: Currently this does not reuse available in-memory trie nodes.
fn storage_proof(
&self,
address: Address,
slot: B256,
storage: HashedStorage,
) -> ProviderResult<reth_trie::StorageProof> {
let state = &self.trie_input().state;
let mut hashed_storage =
state.storages.get(&keccak256(address)).cloned().unwrap_or_default();
hashed_storage.extend(&storage);
self.historical.storage_proof(address, slot, hashed_storage)
}
// TODO: Currently this does not reuse available in-memory trie nodes.
fn storage_multiproof(
&self,
address: Address,
slots: &[B256],
storage: HashedStorage,
) -> ProviderResult<StorageMultiProof> {
let state = &self.trie_input().state;
let mut hashed_storage =
state.storages.get(&keccak256(address)).cloned().unwrap_or_default();
hashed_storage.extend(&storage);
self.historical.storage_multiproof(address, slots, hashed_storage)
}
}
impl<N: NodePrimitives> StateProofProvider for MemoryOverlayStateProviderRef<'_, N> {
fn proof(
&self,
mut input: TrieInput,
address: Address,
slots: &[B256],
) -> ProviderResult<AccountProof> {
input.prepend_self(self.trie_input().clone());
self.historical.proof(input, address, slots)
}
fn multiproof(
&self,
mut input: TrieInput,
targets: MultiProofTargets,
) -> ProviderResult<MultiProof> {
input.prepend_self(self.trie_input().clone());
self.historical.multiproof(input, targets)
}
fn witness(&self, mut input: TrieInput, target: HashedPostState) -> ProviderResult<Vec<Bytes>> {
input.prepend_self(self.trie_input().clone());
self.historical.witness(input, target)
}
}
impl<N: NodePrimitives> HashedPostStateProvider for MemoryOverlayStateProviderRef<'_, N> {
fn hashed_post_state(&self, bundle_state: &BundleState) -> HashedPostState {
self.historical.hashed_post_state(bundle_state)
}
}
impl<N: NodePrimitives> StateProvider for MemoryOverlayStateProviderRef<'_, N> {
fn storage(
&self,
address: Address,
storage_key: StorageKey,
) -> ProviderResult<Option<FlaggedStorage>> {
for block in &self.in_memory {
if let Some(value) = block.execution_output.storage(&address, storage_key.into()) {
return Ok(Some(value));
}
}
self.historical.storage(address, storage_key)
}
}
impl<N: NodePrimitives> BytecodeReader for MemoryOverlayStateProviderRef<'_, N> {
fn bytecode_by_hash(&self, code_hash: &B256) -> ProviderResult<Option<Bytecode>> {
for block in &self.in_memory {
if let Some(contract) = block.execution_output.bytecode(code_hash) {
return Ok(Some(contract));
}
}
self.historical.bytecode_by_hash(code_hash)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/chain-state/src/in_memory.rs | crates/chain-state/src/in_memory.rs | //! Types for tracking the canonical chain state in memory.
use crate::{
CanonStateNotification, CanonStateNotificationSender, CanonStateNotifications,
ChainInfoTracker, MemoryOverlayStateProvider,
};
use alloy_consensus::{transaction::TransactionMeta, BlockHeader};
use alloy_eips::{BlockHashOrNumber, BlockNumHash};
use alloy_primitives::{map::HashMap, BlockNumber, TxHash, B256};
use parking_lot::RwLock;
use reth_chainspec::ChainInfo;
use reth_ethereum_primitives::EthPrimitives;
use reth_execution_types::{Chain, ExecutionOutcome};
use reth_metrics::{metrics::Gauge, Metrics};
use reth_primitives_traits::{
BlockBody as _, IndexedTx, NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader,
SignedTransaction,
};
use reth_storage_api::StateProviderBox;
use reth_trie::{updates::TrieUpdates, HashedPostState};
use std::{collections::BTreeMap, sync::Arc, time::Instant};
use tokio::sync::{broadcast, watch};
/// Size of the broadcast channel used to notify canonical state events.
const CANON_STATE_NOTIFICATION_CHANNEL_SIZE: usize = 256;
/// Metrics for the in-memory state.
#[derive(Metrics)]
#[metrics(scope = "blockchain_tree.in_mem_state")]
pub(crate) struct InMemoryStateMetrics {
/// The block number of the earliest block in the in-memory state.
pub(crate) earliest_block: Gauge,
/// The block number of the latest block in the in-memory state.
pub(crate) latest_block: Gauge,
/// The number of blocks in the in-memory state.
pub(crate) num_blocks: Gauge,
}
/// Container type for in memory state data of the canonical chain.
///
/// This tracks blocks and their state that haven't been persisted to disk yet but are part of the
/// canonical chain that can be traced back to a canonical block on disk.
///
/// # Locking behavior on state updates
///
/// All update calls must acquire all locks at once before modifying state to ensure the internal
/// state remains consistent. This prevents readers from observing partially updated state where
/// the numbers and blocks maps are out of sync.
/// Update functions ensure that the numbers write lock is always acquired first, because lookup by
/// numbers first read the numbers map and then the blocks map.
/// By acquiring the numbers lock first, we ensure that read-only lookups don't deadlock updates.
/// This holds, because only lookup by number functions need to acquire the numbers lock first to
/// get the block hash.
#[derive(Debug, Default)]
pub(crate) struct InMemoryState<N: NodePrimitives = EthPrimitives> {
/// All canonical blocks that are not on disk yet.
blocks: RwLock<HashMap<B256, Arc<BlockState<N>>>>,
/// Mapping of block numbers to block hashes.
numbers: RwLock<BTreeMap<u64, B256>>,
/// The pending block that has not yet been made canonical.
pending: watch::Sender<Option<BlockState<N>>>,
/// Metrics for the in-memory state.
metrics: InMemoryStateMetrics,
}
impl<N: NodePrimitives> InMemoryState<N> {
pub(crate) fn new(
blocks: HashMap<B256, Arc<BlockState<N>>>,
numbers: BTreeMap<u64, B256>,
pending: Option<BlockState<N>>,
) -> Self {
let (pending, _) = watch::channel(pending);
let this = Self {
blocks: RwLock::new(blocks),
numbers: RwLock::new(numbers),
pending,
metrics: Default::default(),
};
this.update_metrics();
this
}
/// Update the metrics for the in-memory state.
///
/// # Locking behavior
///
/// This tries to acquire a read lock. Drop any write locks before calling this.
pub(crate) fn update_metrics(&self) {
let numbers = self.numbers.read();
if let Some((earliest_block_number, _)) = numbers.first_key_value() {
self.metrics.earliest_block.set(*earliest_block_number as f64);
}
if let Some((latest_block_number, _)) = numbers.last_key_value() {
self.metrics.latest_block.set(*latest_block_number as f64);
}
self.metrics.num_blocks.set(numbers.len() as f64);
}
/// Returns the state for a given block hash.
pub(crate) fn state_by_hash(&self, hash: B256) -> Option<Arc<BlockState<N>>> {
self.blocks.read().get(&hash).cloned()
}
/// Returns the state for a given block number.
pub(crate) fn state_by_number(&self, number: u64) -> Option<Arc<BlockState<N>>> {
let hash = self.hash_by_number(number)?;
self.state_by_hash(hash)
}
/// Returns the hash for a specific block number
pub(crate) fn hash_by_number(&self, number: u64) -> Option<B256> {
self.numbers.read().get(&number).copied()
}
/// Returns the current chain head state.
pub(crate) fn head_state(&self) -> Option<Arc<BlockState<N>>> {
let hash = *self.numbers.read().last_key_value()?.1;
self.state_by_hash(hash)
}
/// Returns the pending state corresponding to the current head plus one,
/// from the payload received in newPayload that does not have a FCU yet.
pub(crate) fn pending_state(&self) -> Option<BlockState<N>> {
self.pending.borrow().clone()
}
#[cfg(test)]
fn block_count(&self) -> usize {
self.blocks.read().len()
}
}
/// Inner type to provide in memory state. It includes a chain tracker to be
/// advanced internally by the tree.
#[derive(Debug)]
pub(crate) struct CanonicalInMemoryStateInner<N: NodePrimitives> {
/// Tracks certain chain information, such as the canonical head, safe head, and finalized
/// head.
pub(crate) chain_info_tracker: ChainInfoTracker<N>,
/// Tracks blocks at the tip of the chain that have not been persisted to disk yet.
pub(crate) in_memory_state: InMemoryState<N>,
/// A broadcast stream that emits events when the canonical chain is updated.
pub(crate) canon_state_notification_sender: CanonStateNotificationSender<N>,
}
impl<N: NodePrimitives> CanonicalInMemoryStateInner<N> {
/// Clears all entries in the in memory state.
fn clear(&self) {
{
// acquire locks, starting with the numbers lock
let mut numbers = self.in_memory_state.numbers.write();
let mut blocks = self.in_memory_state.blocks.write();
numbers.clear();
blocks.clear();
self.in_memory_state.pending.send_modify(|p| {
p.take();
});
}
self.in_memory_state.update_metrics();
}
}
type PendingBlockAndReceipts<N> =
(RecoveredBlock<<N as NodePrimitives>::Block>, Vec<reth_primitives_traits::ReceiptTy<N>>);
/// This type is responsible for providing the blocks, receipts, and state for
/// all canonical blocks not on disk yet and keeps track of the block range that
/// is in memory.
#[derive(Debug, Clone)]
pub struct CanonicalInMemoryState<N: NodePrimitives = EthPrimitives> {
pub(crate) inner: Arc<CanonicalInMemoryStateInner<N>>,
}
impl<N: NodePrimitives> CanonicalInMemoryState<N> {
/// Create a new in-memory state with the given blocks, numbers, pending state, and optional
/// finalized header.
pub fn new(
blocks: HashMap<B256, Arc<BlockState<N>>>,
numbers: BTreeMap<u64, B256>,
pending: Option<BlockState<N>>,
finalized: Option<SealedHeader<N::BlockHeader>>,
safe: Option<SealedHeader<N::BlockHeader>>,
) -> Self {
let in_memory_state = InMemoryState::new(blocks, numbers, pending);
let header = in_memory_state.head_state().map_or_else(SealedHeader::default, |state| {
state.block_ref().recovered_block().clone_sealed_header()
});
let chain_info_tracker = ChainInfoTracker::new(header, finalized, safe);
let (canon_state_notification_sender, _) =
broadcast::channel(CANON_STATE_NOTIFICATION_CHANNEL_SIZE);
Self {
inner: Arc::new(CanonicalInMemoryStateInner {
chain_info_tracker,
in_memory_state,
canon_state_notification_sender,
}),
}
}
/// Create an empty state.
pub fn empty() -> Self {
Self::new(HashMap::default(), BTreeMap::new(), None, None, None)
}
/// Create a new in memory state with the given local head and finalized header
/// if it exists.
pub fn with_head(
head: SealedHeader<N::BlockHeader>,
finalized: Option<SealedHeader<N::BlockHeader>>,
safe: Option<SealedHeader<N::BlockHeader>>,
) -> Self {
let chain_info_tracker = ChainInfoTracker::new(head, finalized, safe);
let in_memory_state = InMemoryState::default();
let (canon_state_notification_sender, _) =
broadcast::channel(CANON_STATE_NOTIFICATION_CHANNEL_SIZE);
let inner = CanonicalInMemoryStateInner {
chain_info_tracker,
in_memory_state,
canon_state_notification_sender,
};
Self { inner: Arc::new(inner) }
}
/// Returns the block hash corresponding to the given number.
pub fn hash_by_number(&self, number: u64) -> Option<B256> {
self.inner.in_memory_state.hash_by_number(number)
}
/// Returns the header corresponding to the given hash.
pub fn header_by_hash(&self, hash: B256) -> Option<SealedHeader<N::BlockHeader>> {
self.state_by_hash(hash)
.map(|block| block.block_ref().recovered_block().clone_sealed_header())
}
/// Clears all entries in the in memory state.
pub fn clear_state(&self) {
self.inner.clear()
}
/// Updates the pending block with the given block.
///
/// Note: This assumes that the parent block of the pending block is canonical.
pub fn set_pending_block(&self, pending: ExecutedBlockWithTrieUpdates<N>) {
// fetch the state of the pending block's parent block
let parent = self.state_by_hash(pending.recovered_block().parent_hash());
let pending = BlockState::with_parent(pending, parent);
self.inner.in_memory_state.pending.send_modify(|p| {
p.replace(pending);
});
self.inner.in_memory_state.update_metrics();
}
/// Append new blocks to the in memory state.
///
/// This removes all reorged blocks and appends the new blocks to the tracked chain and connects
/// them to their parent blocks.
fn update_blocks<I, R>(&self, new_blocks: I, reorged: R)
where
I: IntoIterator<Item = ExecutedBlockWithTrieUpdates<N>>,
R: IntoIterator<Item = ExecutedBlock<N>>,
{
{
// acquire locks, starting with the numbers lock
let mut numbers = self.inner.in_memory_state.numbers.write();
let mut blocks = self.inner.in_memory_state.blocks.write();
// we first remove the blocks from the reorged chain
for block in reorged {
let hash = block.recovered_block().hash();
let number = block.recovered_block().number();
blocks.remove(&hash);
numbers.remove(&number);
}
// insert the new blocks
for block in new_blocks {
let parent = blocks.get(&block.recovered_block().parent_hash()).cloned();
let block_state = BlockState::with_parent(block, parent);
let hash = block_state.hash();
let number = block_state.number();
// append new blocks
blocks.insert(hash, Arc::new(block_state));
numbers.insert(number, hash);
}
// remove the pending state
self.inner.in_memory_state.pending.send_modify(|p| {
p.take();
});
}
self.inner.in_memory_state.update_metrics();
}
/// Update the in memory state with the given chain update.
pub fn update_chain(&self, new_chain: NewCanonicalChain<N>) {
match new_chain {
NewCanonicalChain::Commit { new } => {
self.update_blocks(new, vec![]);
}
NewCanonicalChain::Reorg { new, old } => {
self.update_blocks(new, old);
}
}
}
/// Removes blocks from the in memory state that are persisted to the given height.
///
/// This will update the links between blocks and remove all blocks that are [..
/// `persisted_height`].
pub fn remove_persisted_blocks(&self, persisted_num_hash: BlockNumHash) {
// if the persisted hash is not in the canonical in memory state, do nothing, because it
// means canonical blocks were not actually persisted.
//
// This can happen if the persistence task takes a long time, while a reorg is happening.
{
if self.inner.in_memory_state.blocks.read().get(&persisted_num_hash.hash).is_none() {
// do nothing
return
}
}
{
// acquire locks, starting with the numbers lock
let mut numbers = self.inner.in_memory_state.numbers.write();
let mut blocks = self.inner.in_memory_state.blocks.write();
let BlockNumHash { number: persisted_height, hash: _ } = persisted_num_hash;
// clear all numbers
numbers.clear();
// drain all blocks and only keep the ones that are not persisted (below the persisted
// height)
let mut old_blocks = blocks
.drain()
.filter(|(_, b)| b.block_ref().recovered_block().number() > persisted_height)
.map(|(_, b)| b.block.clone())
.collect::<Vec<_>>();
// sort the blocks by number so we can insert them back in natural order (low -> high)
old_blocks.sort_unstable_by_key(|block| block.recovered_block().number());
// re-insert the blocks in natural order and connect them to their parent blocks
for block in old_blocks {
let parent = blocks.get(&block.recovered_block().parent_hash()).cloned();
let block_state = BlockState::with_parent(block, parent);
let hash = block_state.hash();
let number = block_state.number();
// append new blocks
blocks.insert(hash, Arc::new(block_state));
numbers.insert(number, hash);
}
// also shift the pending state if it exists
self.inner.in_memory_state.pending.send_modify(|p| {
if let Some(p) = p.as_mut() {
p.parent = blocks.get(&p.block_ref().recovered_block().parent_hash()).cloned();
}
});
}
self.inner.in_memory_state.update_metrics();
}
/// Returns in memory state corresponding the given hash.
pub fn state_by_hash(&self, hash: B256) -> Option<Arc<BlockState<N>>> {
self.inner.in_memory_state.state_by_hash(hash)
}
/// Returns in memory state corresponding the block number.
pub fn state_by_number(&self, number: u64) -> Option<Arc<BlockState<N>>> {
self.inner.in_memory_state.state_by_number(number)
}
/// Returns the in memory head state.
pub fn head_state(&self) -> Option<Arc<BlockState<N>>> {
self.inner.in_memory_state.head_state()
}
/// Returns the in memory pending state.
pub fn pending_state(&self) -> Option<BlockState<N>> {
self.inner.in_memory_state.pending_state()
}
/// Returns the in memory pending `BlockNumHash`.
pub fn pending_block_num_hash(&self) -> Option<BlockNumHash> {
self.inner
.in_memory_state
.pending_state()
.map(|state| BlockNumHash { number: state.number(), hash: state.hash() })
}
/// Returns the current `ChainInfo`.
pub fn chain_info(&self) -> ChainInfo {
self.inner.chain_info_tracker.chain_info()
}
/// Returns the latest canonical block number.
pub fn get_canonical_block_number(&self) -> u64 {
self.inner.chain_info_tracker.get_canonical_block_number()
}
/// Returns the `BlockNumHash` of the safe head.
pub fn get_safe_num_hash(&self) -> Option<BlockNumHash> {
self.inner.chain_info_tracker.get_safe_num_hash()
}
/// Returns the `BlockNumHash` of the finalized head.
pub fn get_finalized_num_hash(&self) -> Option<BlockNumHash> {
self.inner.chain_info_tracker.get_finalized_num_hash()
}
/// Hook for new fork choice update.
pub fn on_forkchoice_update_received(&self) {
self.inner.chain_info_tracker.on_forkchoice_update_received();
}
/// Returns the timestamp of the last received update.
pub fn last_received_update_timestamp(&self) -> Option<Instant> {
self.inner.chain_info_tracker.last_forkchoice_update_received_at()
}
/// Canonical head setter.
pub fn set_canonical_head(&self, header: SealedHeader<N::BlockHeader>) {
self.inner.chain_info_tracker.set_canonical_head(header);
}
/// Safe head setter.
pub fn set_safe(&self, header: SealedHeader<N::BlockHeader>) {
self.inner.chain_info_tracker.set_safe(header);
}
/// Finalized head setter.
pub fn set_finalized(&self, header: SealedHeader<N::BlockHeader>) {
self.inner.chain_info_tracker.set_finalized(header);
}
/// Canonical head getter.
pub fn get_canonical_head(&self) -> SealedHeader<N::BlockHeader> {
self.inner.chain_info_tracker.get_canonical_head()
}
/// Finalized header getter.
pub fn get_finalized_header(&self) -> Option<SealedHeader<N::BlockHeader>> {
self.inner.chain_info_tracker.get_finalized_header()
}
/// Safe header getter.
pub fn get_safe_header(&self) -> Option<SealedHeader<N::BlockHeader>> {
self.inner.chain_info_tracker.get_safe_header()
}
/// Returns the `SealedHeader` corresponding to the pending state.
pub fn pending_sealed_header(&self) -> Option<SealedHeader<N::BlockHeader>> {
self.pending_state().map(|h| h.block_ref().recovered_block().clone_sealed_header())
}
/// Returns the `Header` corresponding to the pending state.
pub fn pending_header(&self) -> Option<N::BlockHeader> {
self.pending_sealed_header().map(|sealed_header| sealed_header.unseal())
}
/// Returns the `SealedBlock` corresponding to the pending state.
pub fn pending_block(&self) -> Option<SealedBlock<N::Block>> {
self.pending_state()
.map(|block_state| block_state.block_ref().recovered_block().sealed_block().clone())
}
/// Returns the `RecoveredBlock` corresponding to the pending state.
pub fn pending_recovered_block(&self) -> Option<RecoveredBlock<N::Block>>
where
N::SignedTx: SignedTransaction,
{
self.pending_state().map(|block_state| block_state.block_ref().recovered_block().clone())
}
/// Returns a tuple with the `SealedBlock` corresponding to the pending
/// state and a vector of its `Receipt`s.
pub fn pending_block_and_receipts(&self) -> Option<PendingBlockAndReceipts<N>> {
self.pending_state().map(|block_state| {
(
block_state.block_ref().recovered_block().clone(),
block_state.executed_block_receipts(),
)
})
}
/// Subscribe to new blocks events.
pub fn subscribe_canon_state(&self) -> CanonStateNotifications<N> {
self.inner.canon_state_notification_sender.subscribe()
}
/// Subscribe to new safe block events.
pub fn subscribe_safe_block(&self) -> watch::Receiver<Option<SealedHeader<N::BlockHeader>>> {
self.inner.chain_info_tracker.subscribe_safe_block()
}
/// Subscribe to new finalized block events.
pub fn subscribe_finalized_block(
&self,
) -> watch::Receiver<Option<SealedHeader<N::BlockHeader>>> {
self.inner.chain_info_tracker.subscribe_finalized_block()
}
/// Attempts to send a new [`CanonStateNotification`] to all active Receiver handles.
pub fn notify_canon_state(&self, event: CanonStateNotification<N>) {
self.inner.canon_state_notification_sender.send(event).ok();
}
/// Return state provider with reference to in-memory blocks that overlay database state.
///
/// This merges the state of all blocks that are part of the chain that the requested block is
/// the head of. This includes all blocks that connect back to the canonical block on disk.
pub fn state_provider(
&self,
hash: B256,
historical: StateProviderBox,
) -> MemoryOverlayStateProvider<N> {
let in_memory = if let Some(state) = self.state_by_hash(hash) {
state.chain().map(|block_state| block_state.block()).collect()
} else {
Vec::new()
};
MemoryOverlayStateProvider::new(historical, in_memory)
}
/// Returns an iterator over all __canonical blocks__ in the in-memory state, from newest to
/// oldest (highest to lowest).
///
/// This iterator contains a snapshot of the in-memory state at the time of the call.
pub fn canonical_chain(&self) -> impl Iterator<Item = Arc<BlockState<N>>> {
self.inner.in_memory_state.head_state().into_iter().flat_map(|head| head.iter())
}
/// Returns [`SignedTransaction`] type for the given `TxHash` if found.
pub fn transaction_by_hash(&self, hash: TxHash) -> Option<N::SignedTx> {
for block_state in self.canonical_chain() {
if let Some(tx) =
block_state.block_ref().recovered_block().body().transaction_by_hash(&hash)
{
return Some(tx.clone())
}
}
None
}
/// Returns a tuple with [`SignedTransaction`] type and [`TransactionMeta`] for the
/// given [`TxHash`] if found.
pub fn transaction_by_hash_with_meta(
&self,
tx_hash: TxHash,
) -> Option<(N::SignedTx, TransactionMeta)> {
for block_state in self.canonical_chain() {
if let Some(indexed) = block_state.find_indexed(tx_hash) {
return Some((indexed.tx().clone(), indexed.meta()));
}
}
None
}
}
/// State after applying the given block, this block is part of the canonical chain that partially
/// stored in memory and can be traced back to a canonical block on disk.
#[derive(Debug, PartialEq, Eq, Clone)]
pub struct BlockState<N: NodePrimitives = EthPrimitives> {
/// The executed block that determines the state after this block has been executed.
block: ExecutedBlockWithTrieUpdates<N>,
/// The block's parent block if it exists.
parent: Option<Arc<BlockState<N>>>,
}
impl<N: NodePrimitives> BlockState<N> {
/// [`BlockState`] constructor.
pub const fn new(block: ExecutedBlockWithTrieUpdates<N>) -> Self {
Self { block, parent: None }
}
/// [`BlockState`] constructor with parent.
pub const fn with_parent(
block: ExecutedBlockWithTrieUpdates<N>,
parent: Option<Arc<Self>>,
) -> Self {
Self { block, parent }
}
/// Returns the hash and block of the on disk block this state can be traced back to.
pub fn anchor(&self) -> BlockNumHash {
let mut current = self;
while let Some(parent) = ¤t.parent {
current = parent;
}
current.block.recovered_block().parent_num_hash()
}
/// Returns the executed block that determines the state.
pub fn block(&self) -> ExecutedBlockWithTrieUpdates<N> {
self.block.clone()
}
/// Returns a reference to the executed block that determines the state.
pub const fn block_ref(&self) -> &ExecutedBlockWithTrieUpdates<N> {
&self.block
}
/// Returns the hash of executed block that determines the state.
pub fn hash(&self) -> B256 {
self.block.recovered_block().hash()
}
/// Returns the block number of executed block that determines the state.
pub fn number(&self) -> u64 {
self.block.recovered_block().number()
}
/// Returns the state root after applying the executed block that determines
/// the state.
pub fn state_root(&self) -> B256 {
self.block.recovered_block().state_root()
}
/// Returns the `Receipts` of executed block that determines the state.
pub fn receipts(&self) -> &Vec<Vec<N::Receipt>> {
&self.block.execution_outcome().receipts
}
/// Returns a vector of `Receipt` of executed block that determines the state.
/// We assume that the `Receipts` in the executed block `ExecutionOutcome`
/// has only one element corresponding to the executed block associated to
/// the state.
pub fn executed_block_receipts(&self) -> Vec<N::Receipt> {
let receipts = self.receipts();
debug_assert!(
receipts.len() <= 1,
"Expected at most one block's worth of receipts, found {}",
receipts.len()
);
receipts.first().cloned().unwrap_or_default()
}
/// Returns a vector of __parent__ `BlockStates`.
///
/// The block state order in the output vector is newest to oldest (highest to lowest):
/// `[5,4,3,2,1]`
///
/// Note: This does not include self.
pub fn parent_state_chain(&self) -> Vec<&Self> {
let mut parents = Vec::new();
let mut current = self.parent.as_deref();
while let Some(parent) = current {
parents.push(parent);
current = parent.parent.as_deref();
}
parents
}
/// Returns a vector of `BlockStates` representing the entire in memory chain.
/// The block state order in the output vector is newest to oldest (highest to lowest),
/// including self as the first element.
pub fn chain(&self) -> impl Iterator<Item = &Self> {
std::iter::successors(Some(self), |state| state.parent.as_deref())
}
/// Appends the parent chain of this [`BlockState`] to the given vector.
pub fn append_parent_chain<'a>(&'a self, chain: &mut Vec<&'a Self>) {
chain.extend(self.parent_state_chain());
}
/// Returns an iterator over the atomically captured chain of in memory blocks.
///
/// This yields the blocks from newest to oldest (highest to lowest).
pub fn iter(self: Arc<Self>) -> impl Iterator<Item = Arc<Self>> {
std::iter::successors(Some(self), |state| state.parent.clone())
}
/// Return state provider with reference to in-memory blocks that overlay database state.
///
/// This merges the state of all blocks that are part of the chain that the this block is
/// the head of. This includes all blocks that connect back to the canonical block on disk.
pub fn state_provider(&self, historical: StateProviderBox) -> MemoryOverlayStateProvider<N> {
let in_memory = self.chain().map(|block_state| block_state.block()).collect();
MemoryOverlayStateProvider::new(historical, in_memory)
}
/// Tries to find a block by [`BlockHashOrNumber`] in the chain ending at this block.
pub fn block_on_chain(&self, hash_or_num: BlockHashOrNumber) -> Option<&Self> {
self.chain().find(|block| match hash_or_num {
BlockHashOrNumber::Hash(hash) => block.hash() == hash,
BlockHashOrNumber::Number(number) => block.number() == number,
})
}
/// Tries to find a transaction by [`TxHash`] in the chain ending at this block.
pub fn transaction_on_chain(&self, hash: TxHash) -> Option<N::SignedTx> {
self.chain().find_map(|block_state| {
block_state.block_ref().recovered_block().body().transaction_by_hash(&hash).cloned()
})
}
/// Tries to find a transaction with meta by [`TxHash`] in the chain ending at this block.
pub fn transaction_meta_on_chain(
&self,
tx_hash: TxHash,
) -> Option<(N::SignedTx, TransactionMeta)> {
self.chain().find_map(|block_state| {
block_state.find_indexed(tx_hash).map(|indexed| (indexed.tx().clone(), indexed.meta()))
})
}
/// Finds a transaction by hash and returns it with its index and block context.
pub fn find_indexed(&self, tx_hash: TxHash) -> Option<IndexedTx<'_, N::Block>> {
self.block_ref().recovered_block().find_indexed(tx_hash)
}
}
/// Represents an executed block stored in-memory.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct ExecutedBlock<N: NodePrimitives = EthPrimitives> {
/// Recovered Block
pub recovered_block: Arc<RecoveredBlock<N::Block>>,
/// Block's execution outcome.
pub execution_output: Arc<ExecutionOutcome<N::Receipt>>,
/// Block's hashed state.
pub hashed_state: Arc<HashedPostState>,
}
impl<N: NodePrimitives> Default for ExecutedBlock<N> {
fn default() -> Self {
Self {
recovered_block: Default::default(),
execution_output: Default::default(),
hashed_state: Default::default(),
}
}
}
impl<N: NodePrimitives> ExecutedBlock<N> {
/// Returns a reference to an inner [`SealedBlock`]
#[inline]
pub fn sealed_block(&self) -> &SealedBlock<N::Block> {
self.recovered_block.sealed_block()
}
/// Returns a reference to [`RecoveredBlock`]
#[inline]
pub fn recovered_block(&self) -> &RecoveredBlock<N::Block> {
&self.recovered_block
}
/// Returns a reference to the block's execution outcome
#[inline]
pub fn execution_outcome(&self) -> &ExecutionOutcome<N::Receipt> {
&self.execution_output
}
/// Returns a reference to the hashed state result of the execution outcome
#[inline]
pub fn hashed_state(&self) -> &HashedPostState {
&self.hashed_state
}
/// Returns a [`BlockNumber`] of the block.
#[inline]
pub fn block_number(&self) -> BlockNumber {
self.recovered_block.header().number()
}
}
/// Trie updates that result from calculating the state root for the block.
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum ExecutedTrieUpdates {
/// Trie updates present. State root was calculated, and the trie updates can be applied to the
/// database.
Present(Arc<TrieUpdates>),
/// Trie updates missing. State root was calculated, but the trie updates cannot be applied to
/// the current database state. To apply the updates, the state root must be recalculated, and
/// new trie updates must be generated.
///
/// This can happen when processing fork chain blocks that are building on top of the
/// historical database state. Since we don't store the historical trie state, we cannot
/// generate the trie updates for it.
Missing,
}
impl ExecutedTrieUpdates {
/// Creates a [`ExecutedTrieUpdates`] with present but empty trie updates.
pub fn empty() -> Self {
Self::Present(Arc::default())
}
/// Sets the trie updates to the provided value as present.
pub fn set_present(&mut self, updates: Arc<TrieUpdates>) {
*self = Self::Present(updates);
}
/// Takes the present trie updates, leaving the state as missing.
pub fn take_present(&mut self) -> Option<Arc<TrieUpdates>> {
match self {
Self::Present(updates) => {
let updates = core::mem::take(updates);
*self = Self::Missing;
Some(updates)
}
Self::Missing => None,
}
}
/// Returns a reference to the trie updates if present.
#[allow(clippy::missing_const_for_fn)] // false positive
pub fn as_ref(&self) -> Option<&TrieUpdates> {
match self {
Self::Present(updates) => Some(updates),
Self::Missing => None,
}
}
/// Returns `true` if the trie updates are present.
pub const fn is_present(&self) -> bool {
matches!(self, Self::Present(_))
}
/// Returns `true` if the trie updates are missing.
pub const fn is_missing(&self) -> bool {
matches!(self, Self::Missing)
}
}
/// An [`ExecutedBlock`] with its [`TrieUpdates`].
///
/// We store it as separate type because [`TrieUpdates`] are only available for blocks stored in
/// memory and can't be obtained for canonical persisted blocks.
#[derive(
Clone, Debug, PartialEq, Eq, derive_more::Deref, derive_more::DerefMut, derive_more::Into,
)]
pub struct ExecutedBlockWithTrieUpdates<N: NodePrimitives = EthPrimitives> {
/// Inner [`ExecutedBlock`].
#[deref]
#[deref_mut]
#[into]
pub block: ExecutedBlock<N>,
/// Trie updates that result from calculating the state root for the block.
///
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | true |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/chain-state/src/notifications.rs | crates/chain-state/src/notifications.rs | //! Canonical chain state notification trait and types.
use alloy_eips::eip2718::Encodable2718;
use derive_more::{Deref, DerefMut};
use reth_execution_types::{BlockReceipts, Chain};
use reth_primitives_traits::{NodePrimitives, RecoveredBlock, SealedHeader};
use reth_storage_api::NodePrimitivesProvider;
use std::{
pin::Pin,
sync::Arc,
task::{ready, Context, Poll},
};
use tokio::sync::{broadcast, watch};
use tokio_stream::{
wrappers::{BroadcastStream, WatchStream},
Stream,
};
use tracing::debug;
/// Type alias for a receiver that receives [`CanonStateNotification`]
pub type CanonStateNotifications<N = reth_ethereum_primitives::EthPrimitives> =
broadcast::Receiver<CanonStateNotification<N>>;
/// Type alias for a sender that sends [`CanonStateNotification`]
pub type CanonStateNotificationSender<N = reth_ethereum_primitives::EthPrimitives> =
broadcast::Sender<CanonStateNotification<N>>;
/// A type that allows to register chain related event subscriptions.
pub trait CanonStateSubscriptions: NodePrimitivesProvider + Send + Sync {
/// Get notified when a new canonical chain was imported.
///
/// A canonical chain be one or more blocks, a reorg or a revert.
fn subscribe_to_canonical_state(&self) -> CanonStateNotifications<Self::Primitives>;
/// Convenience method to get a stream of [`CanonStateNotification`].
fn canonical_state_stream(&self) -> CanonStateNotificationStream<Self::Primitives> {
CanonStateNotificationStream {
st: BroadcastStream::new(self.subscribe_to_canonical_state()),
}
}
}
impl<T: CanonStateSubscriptions> CanonStateSubscriptions for &T {
fn subscribe_to_canonical_state(&self) -> CanonStateNotifications<Self::Primitives> {
(*self).subscribe_to_canonical_state()
}
fn canonical_state_stream(&self) -> CanonStateNotificationStream<Self::Primitives> {
(*self).canonical_state_stream()
}
}
/// A Stream of [`CanonStateNotification`].
#[derive(Debug)]
#[pin_project::pin_project]
pub struct CanonStateNotificationStream<N: NodePrimitives = reth_ethereum_primitives::EthPrimitives>
{
#[pin]
st: BroadcastStream<CanonStateNotification<N>>,
}
impl<N: NodePrimitives> Stream for CanonStateNotificationStream<N> {
type Item = CanonStateNotification<N>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
loop {
return match ready!(self.as_mut().project().st.poll_next(cx)) {
Some(Ok(notification)) => Poll::Ready(Some(notification)),
Some(Err(err)) => {
debug!(%err, "canonical state notification stream lagging behind");
continue
}
None => Poll::Ready(None),
}
}
}
}
/// A notification that is sent when a new block is imported, or an old block is reverted.
///
/// The notification contains at least one [`Chain`] with the imported segment. If some blocks were
/// reverted (e.g. during a reorg), the old chain is also returned.
#[derive(Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "serde", serde(bound = ""))]
pub enum CanonStateNotification<N: NodePrimitives = reth_ethereum_primitives::EthPrimitives> {
/// The canonical chain was extended.
Commit {
/// The newly added chain segment.
new: Arc<Chain<N>>,
},
/// A chain segment was reverted or reorged.
///
/// - In the case of a reorg, the reverted blocks are present in `old`, and the new blocks are
/// present in `new`.
/// - In the case of a revert, the reverted blocks are present in `old`, and `new` is an empty
/// chain segment.
Reorg {
/// The chain segment that was reverted.
old: Arc<Chain<N>>,
/// The chain segment that was added on top of the canonical chain, minus the reverted
/// blocks.
///
/// In the case of a revert, not a reorg, this chain segment is empty.
new: Arc<Chain<N>>,
},
}
impl<N: NodePrimitives> CanonStateNotification<N> {
/// Get the chain segment that was reverted, if any.
pub fn reverted(&self) -> Option<Arc<Chain<N>>> {
match self {
Self::Commit { .. } => None,
Self::Reorg { old, .. } => Some(old.clone()),
}
}
/// Get the newly imported chain segment, if any.
pub fn committed(&self) -> Arc<Chain<N>> {
match self {
Self::Commit { new } | Self::Reorg { new, .. } => new.clone(),
}
}
/// Gets the new tip of the chain.
///
/// Returns the new tip for [`Self::Reorg`] and [`Self::Commit`] variants which commit at least
/// 1 new block.
///
/// # Panics
///
/// If chain doesn't have any blocks.
pub fn tip(&self) -> &RecoveredBlock<N::Block> {
match self {
Self::Commit { new } | Self::Reorg { new, .. } => new.tip(),
}
}
/// Gets the new tip of the chain.
///
/// If the chain has no blocks, it returns `None`. Otherwise, it returns the new tip for
/// [`Self::Reorg`] and [`Self::Commit`] variants.
pub fn tip_checked(&self) -> Option<&RecoveredBlock<N::Block>> {
match self {
Self::Commit { new } | Self::Reorg { new, .. } => {
if new.is_empty() {
None
} else {
Some(new.tip())
}
}
}
}
/// Get receipts in the reverted and newly imported chain segments with their corresponding
/// block numbers and transaction hashes.
///
/// The boolean in the tuple (2nd element) denotes whether the receipt was from the reverted
/// chain segment.
pub fn block_receipts(&self) -> Vec<(BlockReceipts<N::Receipt>, bool)>
where
N::SignedTx: Encodable2718,
{
let mut receipts = Vec::new();
// get old receipts
if let Some(old) = self.reverted() {
receipts
.extend(old.receipts_with_attachment().into_iter().map(|receipt| (receipt, true)));
}
// get new receipts
receipts.extend(
self.committed().receipts_with_attachment().into_iter().map(|receipt| (receipt, false)),
);
receipts
}
}
/// Wrapper around a broadcast receiver that receives fork choice notifications.
#[derive(Debug, Deref, DerefMut)]
pub struct ForkChoiceNotifications<T = alloy_consensus::Header>(
pub watch::Receiver<Option<SealedHeader<T>>>,
);
/// A trait that allows to register to fork choice related events
/// and get notified when a new fork choice is available.
pub trait ForkChoiceSubscriptions: Send + Sync {
/// Block Header type.
type Header: Clone + Send + Sync + 'static;
/// Get notified when a new safe block of the chain is selected.
fn subscribe_safe_block(&self) -> ForkChoiceNotifications<Self::Header>;
/// Get notified when a new finalized block of the chain is selected.
fn subscribe_finalized_block(&self) -> ForkChoiceNotifications<Self::Header>;
/// Convenience method to get a stream of the new safe blocks of the chain.
fn safe_block_stream(&self) -> ForkChoiceStream<SealedHeader<Self::Header>> {
ForkChoiceStream::new(self.subscribe_safe_block().0)
}
/// Convenience method to get a stream of the new finalized blocks of the chain.
fn finalized_block_stream(&self) -> ForkChoiceStream<SealedHeader<Self::Header>> {
ForkChoiceStream::new(self.subscribe_finalized_block().0)
}
}
/// A stream for fork choice watch channels (pending, safe or finalized watchers)
#[derive(Debug)]
#[pin_project::pin_project]
pub struct ForkChoiceStream<T> {
#[pin]
st: WatchStream<Option<T>>,
}
impl<T: Clone + Sync + Send + 'static> ForkChoiceStream<T> {
/// Creates a new `ForkChoiceStream`
pub fn new(rx: watch::Receiver<Option<T>>) -> Self {
Self { st: WatchStream::from_changes(rx) }
}
}
impl<T: Clone + Sync + Send + 'static> Stream for ForkChoiceStream<T> {
type Item = T;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
loop {
match ready!(self.as_mut().project().st.poll_next(cx)) {
Some(Some(notification)) => return Poll::Ready(Some(notification)),
Some(None) => {}
None => return Poll::Ready(None),
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_consensus::{BlockBody, SignableTransaction, TxLegacy};
use alloy_primitives::{b256, Signature, B256};
use reth_ethereum_primitives::{Receipt, TransactionSigned, TxType};
use reth_execution_types::ExecutionOutcome;
use reth_primitives_traits::SealedBlock;
#[test]
fn test_commit_notification() {
let block: RecoveredBlock<reth_ethereum_primitives::Block> = Default::default();
let block1_hash = B256::new([0x01; 32]);
let block2_hash = B256::new([0x02; 32]);
let mut block1 = block.clone();
block1.set_block_number(1);
block1.set_hash(block1_hash);
let mut block2 = block;
block2.set_block_number(2);
block2.set_hash(block2_hash);
let chain: Arc<Chain> = Arc::new(Chain::new(
vec![block1.clone(), block2.clone()],
ExecutionOutcome::default(),
None,
));
// Create a commit notification
let notification = CanonStateNotification::Commit { new: chain.clone() };
// Test that `committed` returns the correct chain
assert_eq!(notification.committed(), chain);
// Test that `reverted` returns None for `Commit`
assert!(notification.reverted().is_none());
// Test that `tip` returns the correct block
assert_eq!(*notification.tip(), block2);
}
#[test]
fn test_reorg_notification() {
let block: RecoveredBlock<reth_ethereum_primitives::Block> = Default::default();
let block1_hash = B256::new([0x01; 32]);
let block2_hash = B256::new([0x02; 32]);
let block3_hash = B256::new([0x03; 32]);
let mut block1 = block.clone();
block1.set_block_number(1);
block1.set_hash(block1_hash);
let mut block2 = block.clone();
block2.set_block_number(2);
block2.set_hash(block2_hash);
let mut block3 = block;
block3.set_block_number(3);
block3.set_hash(block3_hash);
let old_chain: Arc<Chain> =
Arc::new(Chain::new(vec![block1.clone()], ExecutionOutcome::default(), None));
let new_chain = Arc::new(Chain::new(
vec![block2.clone(), block3.clone()],
ExecutionOutcome::default(),
None,
));
// Create a reorg notification
let notification =
CanonStateNotification::Reorg { old: old_chain.clone(), new: new_chain.clone() };
// Test that `reverted` returns the old chain
assert_eq!(notification.reverted(), Some(old_chain));
// Test that `committed` returns the new chain
assert_eq!(notification.committed(), new_chain);
// Test that `tip` returns the tip of the new chain (last block in the new chain)
assert_eq!(*notification.tip(), block3);
}
#[test]
fn test_block_receipts_commit() {
// Create a default block instance for use in block definitions.
let mut body = BlockBody::<TransactionSigned>::default();
// Define unique hashes for two blocks to differentiate them in the chain.
let block1_hash = B256::new([0x01; 32]);
let block2_hash = B256::new([0x02; 32]);
// Create a default transaction to include in block1's transactions.
let tx = TxLegacy::default().into_signed(Signature::test_signature()).into();
body.transactions.push(tx);
let block = SealedBlock::<alloy_consensus::Block<TransactionSigned>>::from_sealed_parts(
SealedHeader::seal_slow(alloy_consensus::Header::default()),
body,
)
.try_recover()
.unwrap();
// Create a clone of the default block and customize it to act as block1.
let mut block1 = block.clone();
block1.set_block_number(1);
block1.set_hash(block1_hash);
// Clone the default block and customize it to act as block2.
let mut block2 = block;
block2.set_block_number(2);
block2.set_hash(block2_hash);
// Create a receipt for the transaction in block1.
let receipt1 = Receipt {
tx_type: TxType::Legacy,
cumulative_gas_used: 12345,
logs: vec![],
success: true,
};
// Wrap the receipt in a `Receipts` structure, as expected in the `ExecutionOutcome`.
let receipts = vec![vec![receipt1.clone()]];
// Define an `ExecutionOutcome` with the created receipts.
let execution_outcome = ExecutionOutcome { receipts, ..Default::default() };
// Create a new chain segment with `block1` and `block2` and the execution outcome.
let new_chain: Arc<Chain> =
Arc::new(Chain::new(vec![block1.clone(), block2.clone()], execution_outcome, None));
// Create a commit notification containing the new chain segment.
let notification = CanonStateNotification::Commit { new: new_chain };
// Call `block_receipts` on the commit notification to retrieve block receipts.
let block_receipts = notification.block_receipts();
// Assert that only one receipt entry exists in the `block_receipts` list.
assert_eq!(block_receipts.len(), 1);
// Verify that the first entry matches block1's hash and transaction receipt.
assert_eq!(
block_receipts[0].0,
BlockReceipts {
block: block1.num_hash(),
timestamp: block1.timestamp,
tx_receipts: vec![(
// Transaction hash of a Transaction::default()
b256!("0x20b5378c6fe992c118b557d2f8e8bbe0b7567f6fe5483a8f0f1c51e93a9d91ab"),
receipt1
)]
}
);
// Assert that the receipt is from the committed segment (not reverted).
assert!(!block_receipts[0].1);
}
#[test]
fn test_block_receipts_reorg() {
// Define block1 for the old chain segment, which will be reverted.
let mut body = BlockBody::<TransactionSigned>::default();
body.transactions.push(TxLegacy::default().into_signed(Signature::test_signature()).into());
let mut old_block1 =
SealedBlock::<alloy_consensus::Block<TransactionSigned>>::from_sealed_parts(
SealedHeader::seal_slow(alloy_consensus::Header::default()),
body,
)
.try_recover()
.unwrap();
old_block1.set_block_number(1);
old_block1.set_hash(B256::new([0x01; 32]));
// Create a receipt for a transaction in the reverted block.
let old_receipt = Receipt {
tx_type: TxType::Legacy,
cumulative_gas_used: 54321,
logs: vec![],
success: false,
};
let old_receipts = vec![vec![old_receipt.clone()]];
let old_execution_outcome =
ExecutionOutcome { receipts: old_receipts, ..Default::default() };
// Create an old chain segment to be reverted, containing `old_block1`.
let old_chain: Arc<Chain> =
Arc::new(Chain::new(vec![old_block1.clone()], old_execution_outcome, None));
// Define block2 for the new chain segment, which will be committed.
let mut body = BlockBody::<TransactionSigned>::default();
body.transactions.push(TxLegacy::default().into_signed(Signature::test_signature()).into());
let mut new_block1 =
SealedBlock::<alloy_consensus::Block<TransactionSigned>>::from_sealed_parts(
SealedHeader::seal_slow(alloy_consensus::Header::default()),
body,
)
.try_recover()
.unwrap();
new_block1.set_block_number(2);
new_block1.set_hash(B256::new([0x02; 32]));
// Create a receipt for a transaction in the new committed block.
let new_receipt = Receipt {
tx_type: TxType::Legacy,
cumulative_gas_used: 12345,
logs: vec![],
success: true,
};
let new_receipts = vec![vec![new_receipt.clone()]];
let new_execution_outcome =
ExecutionOutcome { receipts: new_receipts, ..Default::default() };
// Create a new chain segment to be committed, containing `new_block1`.
let new_chain = Arc::new(Chain::new(vec![new_block1.clone()], new_execution_outcome, None));
// Create a reorg notification with both reverted (old) and committed (new) chain segments.
let notification = CanonStateNotification::Reorg { old: old_chain, new: new_chain };
// Retrieve receipts from both old (reverted) and new (committed) segments.
let block_receipts = notification.block_receipts();
// Assert there are two receipt entries, one from each chain segment.
assert_eq!(block_receipts.len(), 2);
// Verify that the first entry matches old_block1 and its receipt from the reverted segment.
assert_eq!(
block_receipts[0].0,
BlockReceipts {
block: old_block1.num_hash(),
timestamp: old_block1.timestamp,
tx_receipts: vec![(
// Transaction hash of a Transaction::default()
b256!("0x20b5378c6fe992c118b557d2f8e8bbe0b7567f6fe5483a8f0f1c51e93a9d91ab"),
old_receipt
)]
}
);
// Confirm this is from the reverted segment.
assert!(block_receipts[0].1);
// Verify that the second entry matches new_block1 and its receipt from the committed
// segment.
assert_eq!(
block_receipts[1].0,
BlockReceipts {
block: new_block1.num_hash(),
timestamp: new_block1.timestamp,
tx_receipts: vec![(
// Transaction hash of a Transaction::default()
b256!("0x20b5378c6fe992c118b557d2f8e8bbe0b7567f6fe5483a8f0f1c51e93a9d91ab"),
new_receipt
)]
}
);
// Confirm this is from the committed segment.
assert!(!block_receipts[1].1);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/static-file/static-file/src/lib.rs | crates/static-file/static-file/src/lib.rs | //! Static file producer implementation.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
pub mod segments;
mod static_file_producer;
pub use static_file_producer::{
StaticFileProducer, StaticFileProducerInner, StaticFileProducerResult,
StaticFileProducerWithResult,
};
// Re-export for convenience.
pub use reth_static_file_types::*;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/static-file/static-file/src/static_file_producer.rs | crates/static-file/static-file/src/static_file_producer.rs | //! Support for producing static files.
use crate::{segments, segments::Segment, StaticFileProducerEvent};
use alloy_primitives::BlockNumber;
use parking_lot::Mutex;
use rayon::prelude::*;
use reth_codecs::Compact;
use reth_db_api::table::Value;
use reth_primitives_traits::NodePrimitives;
use reth_provider::{
providers::StaticFileWriter, BlockReader, ChainStateBlockReader, DBProvider,
DatabaseProviderFactory, StageCheckpointReader, StaticFileProviderFactory,
};
use reth_prune_types::PruneModes;
use reth_stages_types::StageId;
use reth_static_file_types::{HighestStaticFiles, StaticFileTargets};
use reth_storage_errors::provider::ProviderResult;
use reth_tokio_util::{EventSender, EventStream};
use std::{
ops::{Deref, RangeInclusive},
sync::Arc,
time::Instant,
};
use tracing::{debug, trace};
/// Result of [`StaticFileProducerInner::run`] execution.
pub type StaticFileProducerResult = ProviderResult<StaticFileTargets>;
/// The [`StaticFileProducer`] instance itself with the result of [`StaticFileProducerInner::run`]
pub type StaticFileProducerWithResult<Provider> =
(StaticFileProducer<Provider>, StaticFileProducerResult);
/// Static File producer. It's a wrapper around [`StaticFileProducer`] that allows to share it
/// between threads.
#[derive(Debug)]
pub struct StaticFileProducer<Provider>(Arc<Mutex<StaticFileProducerInner<Provider>>>);
impl<Provider> StaticFileProducer<Provider> {
/// Creates a new [`StaticFileProducer`].
pub fn new(provider: Provider, prune_modes: PruneModes) -> Self {
Self(Arc::new(Mutex::new(StaticFileProducerInner::new(provider, prune_modes))))
}
}
impl<Provider> Clone for StaticFileProducer<Provider> {
fn clone(&self) -> Self {
Self(self.0.clone())
}
}
impl<Provider> Deref for StaticFileProducer<Provider> {
type Target = Arc<Mutex<StaticFileProducerInner<Provider>>>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
/// Static File producer routine. See [`StaticFileProducerInner::run`] for more detailed
/// description.
#[derive(Debug)]
pub struct StaticFileProducerInner<Provider> {
/// Provider factory
provider: Provider,
/// Pruning configuration for every part of the data that can be pruned. Set by user, and
/// needed in [`StaticFileProducerInner`] to prevent attempting to move prunable data to static
/// files. See [`StaticFileProducerInner::get_static_file_targets`].
prune_modes: PruneModes,
event_sender: EventSender<StaticFileProducerEvent>,
}
impl<Provider> StaticFileProducerInner<Provider> {
fn new(provider: Provider, prune_modes: PruneModes) -> Self {
Self { provider, prune_modes, event_sender: Default::default() }
}
}
impl<Provider> StaticFileProducerInner<Provider>
where
Provider: StaticFileProviderFactory + DatabaseProviderFactory<Provider: ChainStateBlockReader>,
{
/// Returns the last finalized block number on disk.
pub fn last_finalized_block(&self) -> ProviderResult<Option<BlockNumber>> {
self.provider.database_provider_ro()?.last_finalized_block_number()
}
}
impl<Provider> StaticFileProducerInner<Provider>
where
Provider: StaticFileProviderFactory
+ DatabaseProviderFactory<
Provider: StaticFileProviderFactory<
Primitives: NodePrimitives<
SignedTx: Value + Compact,
BlockHeader: Value + Compact,
Receipt: Value + Compact,
>,
> + StageCheckpointReader
+ BlockReader,
>,
{
/// Listen for events on the `static_file_producer`.
pub fn events(&self) -> EventStream<StaticFileProducerEvent> {
self.event_sender.new_listener()
}
/// Run the `static_file_producer`.
///
/// For each [Some] target in [`StaticFileTargets`], initializes a corresponding [Segment] and
/// runs it with the provided block range using [`reth_provider::providers::StaticFileProvider`]
/// and a read-only database transaction from [`DatabaseProviderFactory`]. All segments are run
/// in parallel.
///
/// NOTE: it doesn't delete the data from database, and the actual deleting (aka pruning) logic
/// lives in the `prune` crate.
pub fn run(&self, targets: StaticFileTargets) -> StaticFileProducerResult {
// If there are no targets, do not produce any static files and return early
if !targets.any() {
return Ok(targets)
}
debug_assert!(targets.is_contiguous_to_highest_static_files(
self.provider.static_file_provider().get_highest_static_files()
));
self.event_sender.notify(StaticFileProducerEvent::Started { targets: targets.clone() });
debug!(target: "static_file", ?targets, "StaticFileProducer started");
let start = Instant::now();
let mut segments =
Vec::<(Box<dyn Segment<Provider::Provider>>, RangeInclusive<BlockNumber>)>::new();
if let Some(block_range) = targets.transactions.clone() {
segments.push((Box::new(segments::Transactions), block_range));
}
if let Some(block_range) = targets.headers.clone() {
segments.push((Box::new(segments::Headers), block_range));
}
if let Some(block_range) = targets.receipts.clone() {
segments.push((Box::new(segments::Receipts), block_range));
}
segments.par_iter().try_for_each(|(segment, block_range)| -> ProviderResult<()> {
debug!(target: "static_file", segment = %segment.segment(), ?block_range, "StaticFileProducer segment");
let start = Instant::now();
// Create a new database transaction on every segment to prevent long-lived read-only
// transactions
let provider = self.provider.database_provider_ro()?.disable_long_read_transaction_safety();
segment.copy_to_static_files(provider, block_range.clone())?;
let elapsed = start.elapsed(); // TODO(alexey): track in metrics
debug!(target: "static_file", segment = %segment.segment(), ?block_range, ?elapsed, "Finished StaticFileProducer segment");
Ok(())
})?;
self.provider.static_file_provider().commit()?;
for (segment, block_range) in segments {
self.provider
.static_file_provider()
.update_index(segment.segment(), Some(*block_range.end()))?;
}
let elapsed = start.elapsed(); // TODO(alexey): track in metrics
debug!(target: "static_file", ?targets, ?elapsed, "StaticFileProducer finished");
self.event_sender
.notify(StaticFileProducerEvent::Finished { targets: targets.clone(), elapsed });
Ok(targets)
}
/// Copies data from database to static files according to
/// [stage checkpoints](reth_stages_types::StageCheckpoint).
///
/// Returns highest block numbers for all static file segments.
pub fn copy_to_static_files(&self) -> ProviderResult<HighestStaticFiles> {
let provider = self.provider.database_provider_ro()?;
let stages_checkpoints = [StageId::Headers, StageId::Execution, StageId::Bodies]
.into_iter()
.map(|stage| provider.get_stage_checkpoint(stage).map(|c| c.map(|c| c.block_number)))
.collect::<Result<Vec<_>, _>>()?;
let highest_static_files = HighestStaticFiles {
headers: stages_checkpoints[0],
receipts: stages_checkpoints[1],
transactions: stages_checkpoints[2],
};
let targets = self.get_static_file_targets(highest_static_files)?;
self.run(targets)?;
Ok(highest_static_files)
}
/// Returns a static file targets at the provided finalized block numbers per segment.
/// The target is determined by the check against highest `static_files` using
/// [`reth_provider::providers::StaticFileProvider::get_highest_static_files`].
pub fn get_static_file_targets(
&self,
finalized_block_numbers: HighestStaticFiles,
) -> ProviderResult<StaticFileTargets> {
let highest_static_files = self.provider.static_file_provider().get_highest_static_files();
let targets = StaticFileTargets {
headers: finalized_block_numbers.headers.and_then(|finalized_block_number| {
self.get_static_file_target(highest_static_files.headers, finalized_block_number)
}),
// StaticFile receipts only if they're not pruned according to the user configuration
receipts: if self.prune_modes.receipts.is_none() &&
self.prune_modes.receipts_log_filter.is_empty()
{
finalized_block_numbers.receipts.and_then(|finalized_block_number| {
self.get_static_file_target(
highest_static_files.receipts,
finalized_block_number,
)
})
} else {
None
},
transactions: finalized_block_numbers.transactions.and_then(|finalized_block_number| {
self.get_static_file_target(
highest_static_files.transactions,
finalized_block_number,
)
}),
};
trace!(
target: "static_file",
?finalized_block_numbers,
?highest_static_files,
?targets,
any = %targets.any(),
"StaticFile targets"
);
Ok(targets)
}
fn get_static_file_target(
&self,
highest_static_file: Option<BlockNumber>,
finalized_block_number: BlockNumber,
) -> Option<RangeInclusive<BlockNumber>> {
let range = highest_static_file.map_or(0, |block| block + 1)..=finalized_block_number;
(!range.is_empty()).then_some(range)
}
}
#[cfg(test)]
mod tests {
use crate::static_file_producer::{
StaticFileProducer, StaticFileProducerInner, StaticFileTargets,
};
use alloy_primitives::{B256, U256};
use assert_matches::assert_matches;
use reth_db_api::{database::Database, transaction::DbTx};
use reth_provider::{
providers::StaticFileWriter, test_utils::MockNodeTypesWithDB, ProviderError,
ProviderFactory, StaticFileProviderFactory,
};
use reth_prune_types::PruneModes;
use reth_stages::test_utils::{StorageKind, TestStageDB};
use reth_static_file_types::{HighestStaticFiles, StaticFileSegment};
use reth_testing_utils::generators::{
self, random_block_range, random_receipt, BlockRangeParams,
};
use std::{sync::mpsc::channel, time::Duration};
use tempfile::TempDir;
fn setup() -> (ProviderFactory<MockNodeTypesWithDB>, TempDir) {
let mut rng = generators::rng();
let db = TestStageDB::default();
let blocks = random_block_range(
&mut rng,
0..=3,
BlockRangeParams { parent: Some(B256::ZERO), tx_count: 2..3, ..Default::default() },
);
db.insert_blocks(blocks.iter(), StorageKind::Database(None)).expect("insert blocks");
// Unwind headers from static_files and manually insert them into the database, so we're
// able to check that static_file_producer works
let static_file_provider = db.factory.static_file_provider();
let mut static_file_writer = static_file_provider
.latest_writer(StaticFileSegment::Headers)
.expect("get static file writer for headers");
static_file_writer.prune_headers(blocks.len() as u64).unwrap();
static_file_writer.commit().expect("prune headers");
let tx = db.factory.db_ref().tx_mut().expect("init tx");
for block in &blocks {
TestStageDB::insert_header(None, &tx, block.sealed_header(), U256::ZERO)
.expect("insert block header");
}
tx.commit().expect("commit tx");
let mut receipts = Vec::new();
for block in &blocks {
for transaction in &block.body().transactions {
receipts.push((
receipts.len() as u64,
random_receipt(&mut rng, transaction, Some(0), None),
));
}
}
db.insert_receipts(receipts).expect("insert receipts");
let provider_factory = db.factory;
(provider_factory, db.temp_static_files_dir)
}
#[test]
fn run() {
let (provider_factory, _temp_static_files_dir) = setup();
let static_file_producer =
StaticFileProducerInner::new(provider_factory.clone(), PruneModes::default());
let targets = static_file_producer
.get_static_file_targets(HighestStaticFiles {
headers: Some(1),
receipts: Some(1),
transactions: Some(1),
})
.expect("get static file targets");
assert_eq!(
targets,
StaticFileTargets {
headers: Some(0..=1),
receipts: Some(0..=1),
transactions: Some(0..=1)
}
);
assert_matches!(static_file_producer.run(targets), Ok(_));
assert_eq!(
provider_factory.static_file_provider().get_highest_static_files(),
HighestStaticFiles { headers: Some(1), receipts: Some(1), transactions: Some(1) }
);
let targets = static_file_producer
.get_static_file_targets(HighestStaticFiles {
headers: Some(3),
receipts: Some(3),
transactions: Some(3),
})
.expect("get static file targets");
assert_eq!(
targets,
StaticFileTargets {
headers: Some(2..=3),
receipts: Some(2..=3),
transactions: Some(2..=3)
}
);
assert_matches!(static_file_producer.run(targets), Ok(_));
assert_eq!(
provider_factory.static_file_provider().get_highest_static_files(),
HighestStaticFiles { headers: Some(3), receipts: Some(3), transactions: Some(3) }
);
let targets = static_file_producer
.get_static_file_targets(HighestStaticFiles {
headers: Some(4),
receipts: Some(4),
transactions: Some(4),
})
.expect("get static file targets");
assert_eq!(
targets,
StaticFileTargets {
headers: Some(4..=4),
receipts: Some(4..=4),
transactions: Some(4..=4)
}
);
assert_matches!(
static_file_producer.run(targets),
Err(ProviderError::BlockBodyIndicesNotFound(4))
);
assert_eq!(
provider_factory.static_file_provider().get_highest_static_files(),
HighestStaticFiles { headers: Some(3), receipts: Some(3), transactions: Some(3) }
);
}
/// Tests that a cloneable [`StaticFileProducer`] type is not susceptible to any race condition.
#[test]
fn only_one() {
let (provider_factory, _temp_static_files_dir) = setup();
let static_file_producer = StaticFileProducer::new(provider_factory, PruneModes::default());
let (tx, rx) = channel();
for i in 0..5 {
let producer = static_file_producer.clone();
let tx = tx.clone();
std::thread::spawn(move || {
let locked_producer = producer.lock();
if i == 0 {
// Let other threads spawn as well.
std::thread::sleep(Duration::from_millis(100));
}
let targets = locked_producer
.get_static_file_targets(HighestStaticFiles {
headers: Some(1),
receipts: Some(1),
transactions: Some(1),
})
.expect("get static file targets");
assert_matches!(locked_producer.run(targets.clone()), Ok(_));
tx.send(targets).unwrap();
});
}
drop(tx);
let mut only_one = Some(());
for target in rx {
// Only the first spawn should have any meaningful target.
assert!(only_one.take().is_some_and(|_| target.any()) || !target.any())
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/static-file/static-file/src/segments/headers.rs | crates/static-file/static-file/src/segments/headers.rs | use crate::segments::Segment;
use alloy_primitives::BlockNumber;
use reth_codecs::Compact;
use reth_db_api::{cursor::DbCursorRO, table::Value, tables, transaction::DbTx};
use reth_primitives_traits::NodePrimitives;
use reth_provider::{providers::StaticFileWriter, DBProvider, StaticFileProviderFactory};
use reth_static_file_types::StaticFileSegment;
use reth_storage_errors::provider::ProviderResult;
use std::ops::RangeInclusive;
/// Static File segment responsible for [`StaticFileSegment::Headers`] part of data.
#[derive(Debug, Default)]
pub struct Headers;
impl<Provider> Segment<Provider> for Headers
where
Provider: StaticFileProviderFactory<Primitives: NodePrimitives<BlockHeader: Compact + Value>>
+ DBProvider,
{
fn segment(&self) -> StaticFileSegment {
StaticFileSegment::Headers
}
fn copy_to_static_files(
&self,
provider: Provider,
block_range: RangeInclusive<BlockNumber>,
) -> ProviderResult<()> {
let static_file_provider = provider.static_file_provider();
let mut static_file_writer =
static_file_provider.get_writer(*block_range.start(), StaticFileSegment::Headers)?;
let mut headers_cursor = provider
.tx_ref()
.cursor_read::<tables::Headers<<Provider::Primitives as NodePrimitives>::BlockHeader>>(
)?;
let headers_walker = headers_cursor.walk_range(block_range.clone())?;
let mut header_td_cursor =
provider.tx_ref().cursor_read::<tables::HeaderTerminalDifficulties>()?;
let header_td_walker = header_td_cursor.walk_range(block_range.clone())?;
let mut canonical_headers_cursor =
provider.tx_ref().cursor_read::<tables::CanonicalHeaders>()?;
let canonical_headers_walker = canonical_headers_cursor.walk_range(block_range)?;
for ((header_entry, header_td_entry), canonical_header_entry) in
headers_walker.zip(header_td_walker).zip(canonical_headers_walker)
{
let (header_block, header) = header_entry?;
let (header_td_block, header_td) = header_td_entry?;
let (canonical_header_block, canonical_header) = canonical_header_entry?;
debug_assert_eq!(header_block, header_td_block);
debug_assert_eq!(header_td_block, canonical_header_block);
static_file_writer.append_header(&header, header_td.0, &canonical_header)?;
}
Ok(())
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/static-file/static-file/src/segments/transactions.rs | crates/static-file/static-file/src/segments/transactions.rs | use crate::segments::Segment;
use alloy_primitives::BlockNumber;
use reth_codecs::Compact;
use reth_db_api::{cursor::DbCursorRO, table::Value, tables, transaction::DbTx};
use reth_primitives_traits::NodePrimitives;
use reth_provider::{
providers::StaticFileWriter, BlockReader, DBProvider, StaticFileProviderFactory,
};
use reth_static_file_types::StaticFileSegment;
use reth_storage_errors::provider::{ProviderError, ProviderResult};
use std::ops::RangeInclusive;
/// Static File segment responsible for [`StaticFileSegment::Transactions`] part of data.
#[derive(Debug, Default)]
pub struct Transactions;
impl<Provider> Segment<Provider> for Transactions
where
Provider: StaticFileProviderFactory<Primitives: NodePrimitives<SignedTx: Value + Compact>>
+ DBProvider
+ BlockReader,
{
fn segment(&self) -> StaticFileSegment {
StaticFileSegment::Transactions
}
/// Write transactions from database table [`tables::Transactions`] to static files with segment
/// [`StaticFileSegment::Transactions`] for the provided block range.
fn copy_to_static_files(
&self,
provider: Provider,
block_range: RangeInclusive<BlockNumber>,
) -> ProviderResult<()> {
let static_file_provider = provider.static_file_provider();
let mut static_file_writer = static_file_provider
.get_writer(*block_range.start(), StaticFileSegment::Transactions)?;
for block in block_range {
static_file_writer.increment_block(block)?;
let block_body_indices = provider
.block_body_indices(block)?
.ok_or(ProviderError::BlockBodyIndicesNotFound(block))?;
let mut transactions_cursor = provider.tx_ref().cursor_read::<tables::Transactions<
<Provider::Primitives as NodePrimitives>::SignedTx,
>>()?;
let transactions_walker =
transactions_cursor.walk_range(block_body_indices.tx_num_range())?;
for entry in transactions_walker {
let (tx_number, transaction) = entry?;
static_file_writer.append_transaction(tx_number, &transaction)?;
}
}
Ok(())
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/static-file/static-file/src/segments/mod.rs | crates/static-file/static-file/src/segments/mod.rs | //! `StaticFile` segment implementations and utilities.
mod transactions;
pub use transactions::Transactions;
mod headers;
pub use headers::Headers;
mod receipts;
pub use receipts::Receipts;
use alloy_primitives::BlockNumber;
use reth_provider::StaticFileProviderFactory;
use reth_static_file_types::StaticFileSegment;
use reth_storage_errors::provider::ProviderResult;
use std::ops::RangeInclusive;
/// A segment represents moving some portion of the data to static files.
pub trait Segment<Provider: StaticFileProviderFactory>: Send + Sync {
/// Returns the [`StaticFileSegment`].
fn segment(&self) -> StaticFileSegment;
/// Move data to static files for the provided block range.
/// [`StaticFileProvider`](reth_provider::providers::StaticFileProvider) will handle
/// the management of and writing to files.
fn copy_to_static_files(
&self,
provider: Provider,
block_range: RangeInclusive<BlockNumber>,
) -> ProviderResult<()>;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/static-file/static-file/src/segments/receipts.rs | crates/static-file/static-file/src/segments/receipts.rs | use crate::segments::Segment;
use alloy_primitives::BlockNumber;
use reth_codecs::Compact;
use reth_db_api::{cursor::DbCursorRO, table::Value, tables, transaction::DbTx};
use reth_primitives_traits::NodePrimitives;
use reth_provider::{
providers::StaticFileWriter, BlockReader, DBProvider, StaticFileProviderFactory,
};
use reth_static_file_types::StaticFileSegment;
use reth_storage_errors::provider::{ProviderError, ProviderResult};
use std::ops::RangeInclusive;
/// Static File segment responsible for [`StaticFileSegment::Receipts`] part of data.
#[derive(Debug, Default)]
pub struct Receipts;
impl<Provider> Segment<Provider> for Receipts
where
Provider: StaticFileProviderFactory<Primitives: NodePrimitives<Receipt: Value + Compact>>
+ DBProvider
+ BlockReader,
{
fn segment(&self) -> StaticFileSegment {
StaticFileSegment::Receipts
}
fn copy_to_static_files(
&self,
provider: Provider,
block_range: RangeInclusive<BlockNumber>,
) -> ProviderResult<()> {
let static_file_provider = provider.static_file_provider();
let mut static_file_writer =
static_file_provider.get_writer(*block_range.start(), StaticFileSegment::Receipts)?;
for block in block_range {
static_file_writer.increment_block(block)?;
let block_body_indices = provider
.block_body_indices(block)?
.ok_or(ProviderError::BlockBodyIndicesNotFound(block))?;
let mut receipts_cursor = provider
.tx_ref()
.cursor_read::<tables::Receipts<<Provider::Primitives as NodePrimitives>::Receipt>>(
)?;
let receipts_walker = receipts_cursor.walk_range(block_body_indices.tx_num_range())?;
static_file_writer.append_receipts(
receipts_walker.map(|result| result.map_err(ProviderError::from)),
)?;
}
Ok(())
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/static-file/types/src/event.rs | crates/static-file/types/src/event.rs | use crate::StaticFileTargets;
use core::time::Duration;
/// An event emitted by the static file producer.
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum StaticFileProducerEvent {
/// Emitted when static file producer started running.
Started {
/// Targets that will be moved to static files
targets: StaticFileTargets,
},
/// Emitted when static file producer finished running.
Finished {
/// Targets that were moved to static files
targets: StaticFileTargets,
/// Time it took to run the static file producer
elapsed: Duration,
},
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/static-file/types/src/lib.rs | crates/static-file/types/src/lib.rs | //! Commonly used types for static file usage.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![cfg_attr(not(feature = "std"), no_std)]
extern crate alloc;
mod compression;
mod event;
mod segment;
use alloy_primitives::BlockNumber;
pub use compression::Compression;
use core::ops::RangeInclusive;
pub use event::StaticFileProducerEvent;
pub use segment::{SegmentConfig, SegmentHeader, SegmentRangeInclusive, StaticFileSegment};
/// Default static file block count.
pub const DEFAULT_BLOCKS_PER_STATIC_FILE: u64 = 500_000;
/// Highest static file block numbers, per data segment.
#[derive(Debug, Clone, Copy, Default, Eq, PartialEq)]
pub struct HighestStaticFiles {
/// Highest static file block of headers, inclusive.
/// If [`None`], no static file is available.
pub headers: Option<BlockNumber>,
/// Highest static file block of receipts, inclusive.
/// If [`None`], no static file is available.
pub receipts: Option<BlockNumber>,
/// Highest static file block of transactions, inclusive.
/// If [`None`], no static file is available.
pub transactions: Option<BlockNumber>,
}
impl HighestStaticFiles {
/// Returns the highest static file if it exists for a segment
pub const fn highest(&self, segment: StaticFileSegment) -> Option<BlockNumber> {
match segment {
StaticFileSegment::Headers => self.headers,
StaticFileSegment::Transactions => self.transactions,
StaticFileSegment::Receipts => self.receipts,
}
}
/// Returns a mutable reference to a static file segment
pub const fn as_mut(&mut self, segment: StaticFileSegment) -> &mut Option<BlockNumber> {
match segment {
StaticFileSegment::Headers => &mut self.headers,
StaticFileSegment::Transactions => &mut self.transactions,
StaticFileSegment::Receipts => &mut self.receipts,
}
}
/// Returns an iterator over all static file segments
fn iter(&self) -> impl Iterator<Item = Option<BlockNumber>> {
[self.headers, self.transactions, self.receipts].into_iter()
}
/// Returns the minimum block of all segments.
pub fn min_block_num(&self) -> Option<u64> {
self.iter().flatten().min()
}
/// Returns the maximum block of all segments.
pub fn max_block_num(&self) -> Option<u64> {
self.iter().flatten().max()
}
}
/// Static File targets, per data segment, measured in [`BlockNumber`].
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct StaticFileTargets {
/// Targeted range of headers.
pub headers: Option<RangeInclusive<BlockNumber>>,
/// Targeted range of receipts.
pub receipts: Option<RangeInclusive<BlockNumber>>,
/// Targeted range of transactions.
pub transactions: Option<RangeInclusive<BlockNumber>>,
}
impl StaticFileTargets {
/// Returns `true` if any of the targets are [Some].
pub const fn any(&self) -> bool {
self.headers.is_some() || self.receipts.is_some() || self.transactions.is_some()
}
/// Returns `true` if all targets are either [`None`] or has beginning of the range equal to the
/// highest static file.
pub fn is_contiguous_to_highest_static_files(&self, static_files: HighestStaticFiles) -> bool {
[
(self.headers.as_ref(), static_files.headers),
(self.receipts.as_ref(), static_files.receipts),
(self.transactions.as_ref(), static_files.transactions),
]
.iter()
.all(|(target_block_range, highest_static_file_block)| {
target_block_range.is_none_or(|target_block_range| {
*target_block_range.start() ==
highest_static_file_block
.map_or(0, |highest_static_file_block| highest_static_file_block + 1)
})
})
}
}
/// Each static file has a fixed number of blocks. This gives out the range where the requested
/// block is positioned. Used for segment filename.
pub const fn find_fixed_range(
block: BlockNumber,
blocks_per_static_file: u64,
) -> SegmentRangeInclusive {
let start = (block / blocks_per_static_file) * blocks_per_static_file;
SegmentRangeInclusive::new(start, start + blocks_per_static_file - 1)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_highest_static_files_highest() {
let files =
HighestStaticFiles { headers: Some(100), receipts: Some(200), transactions: None };
// Test for headers segment
assert_eq!(files.highest(StaticFileSegment::Headers), Some(100));
// Test for receipts segment
assert_eq!(files.highest(StaticFileSegment::Receipts), Some(200));
// Test for transactions segment
assert_eq!(files.highest(StaticFileSegment::Transactions), None);
}
#[test]
fn test_highest_static_files_as_mut() {
let mut files = HighestStaticFiles::default();
// Modify headers value
*files.as_mut(StaticFileSegment::Headers) = Some(150);
assert_eq!(files.headers, Some(150));
// Modify receipts value
*files.as_mut(StaticFileSegment::Receipts) = Some(250);
assert_eq!(files.receipts, Some(250));
// Modify transactions value
*files.as_mut(StaticFileSegment::Transactions) = Some(350);
assert_eq!(files.transactions, Some(350));
}
#[test]
fn test_highest_static_files_min() {
let files =
HighestStaticFiles { headers: Some(300), receipts: Some(100), transactions: None };
// Minimum value among the available segments
assert_eq!(files.min_block_num(), Some(100));
let empty_files = HighestStaticFiles::default();
// No values, should return None
assert_eq!(empty_files.min_block_num(), None);
}
#[test]
fn test_highest_static_files_max() {
let files =
HighestStaticFiles { headers: Some(300), receipts: Some(100), transactions: Some(500) };
// Maximum value among the available segments
assert_eq!(files.max_block_num(), Some(500));
let empty_files = HighestStaticFiles::default();
// No values, should return None
assert_eq!(empty_files.max_block_num(), None);
}
#[test]
fn test_find_fixed_range() {
// Test with default block size
let block: BlockNumber = 600_000;
let range = find_fixed_range(block, DEFAULT_BLOCKS_PER_STATIC_FILE);
assert_eq!(range.start(), 500_000);
assert_eq!(range.end(), 999_999);
// Test with a custom block size
let block: BlockNumber = 1_200_000;
let range = find_fixed_range(block, 1_000_000);
assert_eq!(range.start(), 1_000_000);
assert_eq!(range.end(), 1_999_999);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/static-file/types/src/compression.rs | crates/static-file/types/src/compression.rs | use strum::AsRefStr;
/// Static File compression types.
#[derive(Debug, Copy, Clone, Default, AsRefStr)]
#[cfg_attr(feature = "clap", derive(clap::ValueEnum))]
pub enum Compression {
/// LZ4 compression algorithm.
#[strum(serialize = "lz4")]
Lz4,
/// Zstandard (Zstd) compression algorithm.
#[strum(serialize = "zstd")]
Zstd,
/// Zstandard (Zstd) compression algorithm with a dictionary.
#[strum(serialize = "zstd-dict")]
ZstdWithDictionary,
/// No compression.
#[strum(serialize = "uncompressed")]
#[default]
Uncompressed,
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/static-file/types/src/segment.rs | crates/static-file/types/src/segment.rs | use crate::{BlockNumber, Compression};
use alloc::{
format,
string::{String, ToString},
};
use alloy_primitives::TxNumber;
use core::{ops::RangeInclusive, str::FromStr};
use derive_more::Display;
use serde::{Deserialize, Serialize};
use strum::{AsRefStr, EnumString};
#[derive(
Debug,
Copy,
Clone,
Eq,
PartialEq,
Hash,
Ord,
PartialOrd,
Deserialize,
Serialize,
EnumString,
AsRefStr,
Display,
)]
#[cfg_attr(feature = "clap", derive(clap::ValueEnum))]
/// Segment of the data that can be moved to static files.
pub enum StaticFileSegment {
#[strum(serialize = "headers")]
/// Static File segment responsible for the `CanonicalHeaders`, `Headers`,
/// `HeaderTerminalDifficulties` tables.
Headers,
#[strum(serialize = "transactions")]
/// Static File segment responsible for the `Transactions` table.
Transactions,
#[strum(serialize = "receipts")]
/// Static File segment responsible for the `Receipts` table.
Receipts,
}
impl StaticFileSegment {
/// Returns the segment as a string.
pub const fn as_str(&self) -> &'static str {
match self {
Self::Headers => "headers",
Self::Transactions => "transactions",
Self::Receipts => "receipts",
}
}
/// Returns an iterator over all segments.
pub fn iter() -> impl Iterator<Item = Self> {
// The order of segments is significant and must be maintained to ensure correctness.
[Self::Headers, Self::Transactions, Self::Receipts].into_iter()
}
/// Returns the default configuration of the segment.
pub const fn config(&self) -> SegmentConfig {
SegmentConfig { compression: Compression::Lz4 }
}
/// Returns the number of columns for the segment
pub const fn columns(&self) -> usize {
match self {
Self::Headers => 3,
Self::Transactions | Self::Receipts => 1,
}
}
/// Returns the default file name for the provided segment and range.
pub fn filename(&self, block_range: &SegmentRangeInclusive) -> String {
// ATTENTION: if changing the name format, be sure to reflect those changes in
// [`Self::parse_filename`].
format!("static_file_{}_{}_{}", self.as_ref(), block_range.start(), block_range.end())
}
/// Returns file name for the provided segment and range, alongside filters, compression.
pub fn filename_with_configuration(
&self,
compression: Compression,
block_range: &SegmentRangeInclusive,
) -> String {
let prefix = self.filename(block_range);
let filters_name = "none".to_string();
// ATTENTION: if changing the name format, be sure to reflect those changes in
// [`Self::parse_filename`.]
format!("{prefix}_{}_{}", filters_name, compression.as_ref())
}
/// Parses a filename into a `StaticFileSegment` and its expected block range.
///
/// The filename is expected to follow the format:
/// "`static_file`_{segment}_{`block_start`}_{`block_end`}". This function checks
/// for the correct prefix ("`static_file`"), and then parses the segment and the inclusive
/// ranges for blocks. It ensures that the start of each range is less than or equal to the
/// end.
///
/// # Returns
/// - `Some((segment, block_range))` if parsing is successful and all conditions are met.
/// - `None` if any condition fails, such as an incorrect prefix, parsing error, or invalid
/// range.
///
/// # Note
/// This function is tightly coupled with the naming convention defined in [`Self::filename`].
/// Any changes in the filename format in `filename` should be reflected here.
pub fn parse_filename(name: &str) -> Option<(Self, SegmentRangeInclusive)> {
let mut parts = name.split('_');
if !(parts.next() == Some("static") && parts.next() == Some("file")) {
return None
}
let segment = Self::from_str(parts.next()?).ok()?;
let (block_start, block_end) = (parts.next()?.parse().ok()?, parts.next()?.parse().ok()?);
if block_start > block_end {
return None
}
Some((segment, SegmentRangeInclusive::new(block_start, block_end)))
}
/// Returns `true` if the segment is `StaticFileSegment::Headers`.
pub const fn is_headers(&self) -> bool {
matches!(self, Self::Headers)
}
/// Returns `true` if the segment is `StaticFileSegment::Receipts`.
pub const fn is_receipts(&self) -> bool {
matches!(self, Self::Receipts)
}
/// Returns `true` if a segment row is linked to a transaction.
pub const fn is_tx_based(&self) -> bool {
matches!(self, Self::Receipts | Self::Transactions)
}
/// Returns `true` if a segment row is linked to a block.
pub const fn is_block_based(&self) -> bool {
matches!(self, Self::Headers)
}
}
/// A segment header that contains information common to all segments. Used for storage.
#[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Hash, Clone)]
pub struct SegmentHeader {
/// Defines the expected block range for a static file segment. This attribute is crucial for
/// scenarios where the file contains no data, allowing for a representation beyond a
/// simple `start..=start` range. It ensures clarity in differentiating between an empty file
/// and a file with a single block numbered 0.
expected_block_range: SegmentRangeInclusive,
/// Block range of data on the static file segment
block_range: Option<SegmentRangeInclusive>,
/// Transaction range of data of the static file segment
tx_range: Option<SegmentRangeInclusive>,
/// Segment type
segment: StaticFileSegment,
}
impl SegmentHeader {
/// Returns [`SegmentHeader`].
pub const fn new(
expected_block_range: SegmentRangeInclusive,
block_range: Option<SegmentRangeInclusive>,
tx_range: Option<SegmentRangeInclusive>,
segment: StaticFileSegment,
) -> Self {
Self { expected_block_range, block_range, tx_range, segment }
}
/// Returns the static file segment kind.
pub const fn segment(&self) -> StaticFileSegment {
self.segment
}
/// Returns the block range.
pub const fn block_range(&self) -> Option<&SegmentRangeInclusive> {
self.block_range.as_ref()
}
/// Returns the transaction range.
pub const fn tx_range(&self) -> Option<&SegmentRangeInclusive> {
self.tx_range.as_ref()
}
/// The expected block start of the segment.
pub const fn expected_block_start(&self) -> BlockNumber {
self.expected_block_range.start()
}
/// The expected block end of the segment.
pub const fn expected_block_end(&self) -> BlockNumber {
self.expected_block_range.end()
}
/// Returns the first block number of the segment.
pub fn block_start(&self) -> Option<BlockNumber> {
self.block_range.as_ref().map(|b| b.start())
}
/// Returns the last block number of the segment.
pub fn block_end(&self) -> Option<BlockNumber> {
self.block_range.as_ref().map(|b| b.end())
}
/// Returns the first transaction number of the segment.
pub fn tx_start(&self) -> Option<TxNumber> {
self.tx_range.as_ref().map(|t| t.start())
}
/// Returns the last transaction number of the segment.
pub fn tx_end(&self) -> Option<TxNumber> {
self.tx_range.as_ref().map(|t| t.end())
}
/// Number of transactions.
pub fn tx_len(&self) -> Option<u64> {
self.tx_range.as_ref().map(|r| (r.end() + 1) - r.start())
}
/// Number of blocks.
pub fn block_len(&self) -> Option<u64> {
self.block_range.as_ref().map(|r| (r.end() + 1) - r.start())
}
/// Increments block end range depending on segment
pub const fn increment_block(&mut self) -> BlockNumber {
if let Some(block_range) = &mut self.block_range {
block_range.end += 1;
block_range.end
} else {
self.block_range = Some(SegmentRangeInclusive::new(
self.expected_block_start(),
self.expected_block_start(),
));
self.expected_block_start()
}
}
/// Increments tx end range depending on segment
pub const fn increment_tx(&mut self) {
if self.segment.is_tx_based() {
if let Some(tx_range) = &mut self.tx_range {
tx_range.end += 1;
} else {
self.tx_range = Some(SegmentRangeInclusive::new(0, 0));
}
}
}
/// Removes `num` elements from end of tx or block range.
pub const fn prune(&mut self, num: u64) {
if self.segment.is_block_based() {
if let Some(range) = &mut self.block_range {
if num > range.end - range.start {
self.block_range = None;
} else {
range.end = range.end.saturating_sub(num);
}
};
} else if let Some(range) = &mut self.tx_range {
if num > range.end - range.start {
self.tx_range = None;
} else {
range.end = range.end.saturating_sub(num);
}
}
}
/// Sets a new `block_range`.
pub const fn set_block_range(&mut self, block_start: BlockNumber, block_end: BlockNumber) {
if let Some(block_range) = &mut self.block_range {
block_range.start = block_start;
block_range.end = block_end;
} else {
self.block_range = Some(SegmentRangeInclusive::new(block_start, block_end))
}
}
/// Sets a new `tx_range`.
pub const fn set_tx_range(&mut self, tx_start: TxNumber, tx_end: TxNumber) {
if let Some(tx_range) = &mut self.tx_range {
tx_range.start = tx_start;
tx_range.end = tx_end;
} else {
self.tx_range = Some(SegmentRangeInclusive::new(tx_start, tx_end))
}
}
/// Returns the row offset which depends on whether the segment is block or transaction based.
pub fn start(&self) -> Option<u64> {
if self.segment.is_block_based() {
return self.block_start()
}
self.tx_start()
}
}
/// Configuration used on the segment.
#[derive(Debug, Clone, Copy)]
pub struct SegmentConfig {
/// Compression used on the segment
pub compression: Compression,
}
/// Helper type to handle segment transaction and block INCLUSIVE ranges.
///
/// They can be modified on a hot loop, which makes the `std::ops::RangeInclusive` a poor fit.
#[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Hash, Clone, Copy)]
pub struct SegmentRangeInclusive {
start: u64,
end: u64,
}
impl SegmentRangeInclusive {
/// Creates a new [`SegmentRangeInclusive`]
pub const fn new(start: u64, end: u64) -> Self {
Self { start, end }
}
/// Start of the inclusive range
pub const fn start(&self) -> u64 {
self.start
}
/// End of the inclusive range
pub const fn end(&self) -> u64 {
self.end
}
}
impl core::fmt::Display for SegmentRangeInclusive {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
write!(f, "{}..={}", self.start, self.end)
}
}
impl From<RangeInclusive<u64>> for SegmentRangeInclusive {
fn from(value: RangeInclusive<u64>) -> Self {
Self { start: *value.start(), end: *value.end() }
}
}
impl From<&SegmentRangeInclusive> for RangeInclusive<u64> {
fn from(value: &SegmentRangeInclusive) -> Self {
value.start()..=value.end()
}
}
impl From<SegmentRangeInclusive> for RangeInclusive<u64> {
fn from(value: SegmentRangeInclusive) -> Self {
(&value).into()
}
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_primitives::hex;
use reth_nippy_jar::NippyJar;
#[test]
fn test_filename() {
let test_vectors = [
(StaticFileSegment::Headers, 2..=30, "static_file_headers_2_30", None),
(StaticFileSegment::Receipts, 30..=300, "static_file_receipts_30_300", None),
(
StaticFileSegment::Transactions,
1_123_233..=11_223_233,
"static_file_transactions_1123233_11223233",
None,
),
(
StaticFileSegment::Headers,
2..=30,
"static_file_headers_2_30_none_lz4",
Some(Compression::Lz4),
),
(
StaticFileSegment::Headers,
2..=30,
"static_file_headers_2_30_none_zstd",
Some(Compression::Zstd),
),
(
StaticFileSegment::Headers,
2..=30,
"static_file_headers_2_30_none_zstd-dict",
Some(Compression::ZstdWithDictionary),
),
];
for (segment, block_range, filename, compression) in test_vectors {
let block_range: SegmentRangeInclusive = block_range.into();
if let Some(compression) = compression {
assert_eq!(
segment.filename_with_configuration(compression, &block_range),
filename
);
} else {
assert_eq!(segment.filename(&block_range), filename);
}
assert_eq!(StaticFileSegment::parse_filename(filename), Some((segment, block_range)));
}
assert_eq!(StaticFileSegment::parse_filename("static_file_headers_2"), None);
assert_eq!(StaticFileSegment::parse_filename("static_file_headers_"), None);
// roundtrip test
let dummy_range = SegmentRangeInclusive::new(123, 1230);
for segment in StaticFileSegment::iter() {
let filename = segment.filename(&dummy_range);
assert_eq!(Some((segment, dummy_range)), StaticFileSegment::parse_filename(&filename));
}
}
#[test]
fn test_segment_config_backwards() {
let headers = hex!(
"010000000000000000000000000000001fa10700000000000100000000000000001fa10700000000000000000000030000000000000020a107000000000001010000004a02000000000000"
);
let transactions = hex!(
"010000000000000000000000000000001fa10700000000000100000000000000001fa107000000000001000000000000000034a107000000000001000000010000000000000035a1070000000000004010000000000000"
);
let receipts = hex!(
"010000000000000000000000000000001fa10700000000000100000000000000000000000000000000000200000001000000000000000000000000000000000000000000000000"
);
{
let headers = NippyJar::<SegmentHeader>::load_from_reader(&headers[..]).unwrap();
assert_eq!(
&SegmentHeader {
expected_block_range: SegmentRangeInclusive::new(0, 499999),
block_range: Some(SegmentRangeInclusive::new(0, 499999)),
tx_range: None,
segment: StaticFileSegment::Headers,
},
headers.user_header()
);
}
{
let transactions =
NippyJar::<SegmentHeader>::load_from_reader(&transactions[..]).unwrap();
assert_eq!(
&SegmentHeader {
expected_block_range: SegmentRangeInclusive::new(0, 499999),
block_range: Some(SegmentRangeInclusive::new(0, 499999)),
tx_range: Some(SegmentRangeInclusive::new(0, 500020)),
segment: StaticFileSegment::Transactions,
},
transactions.user_header()
);
}
{
let receipts = NippyJar::<SegmentHeader>::load_from_reader(&receipts[..]).unwrap();
assert_eq!(
&SegmentHeader {
expected_block_range: SegmentRangeInclusive::new(0, 499999),
block_range: Some(SegmentRangeInclusive::new(0, 0)),
tx_range: None,
segment: StaticFileSegment::Receipts,
},
receipts.user_header()
);
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/seismic/evm/src/config.rs | crates/seismic/evm/src/config.rs | //! Helpers for configuring the SeismicSpecId for the evm
use crate::Header;
use alloy_consensus::BlockHeader;
use reth_chainspec::ChainSpec as SeismicChainSpec;
use seismic_revm::SeismicSpecId;
/// Map the latest active hardfork at the given header to a revm [`SeismicSpecId`].
pub fn revm_spec(chain_spec: &SeismicChainSpec, header: &Header) -> SeismicSpecId {
revm_spec_by_timestamp_seismic(&chain_spec, header.timestamp())
}
/// Map the latest active hardfork at the given timestamp or block number to a revm
/// [`SeismicSpecId`].
///
/// For now our only hardfork is MERCURY, so we only return MERCURY
fn revm_spec_by_timestamp_seismic(
_chain_spec: &SeismicChainSpec,
_timestamp: u64,
) -> SeismicSpecId {
SeismicSpecId::MERCURY
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/seismic/evm/src/lib.rs | crates/seismic/evm/src/lib.rs | //! EVM config for vanilla seismic.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![cfg_attr(not(feature = "std"), no_std)]
extern crate alloc;
use alloc::{borrow::Cow, sync::Arc};
use alloy_consensus::{BlockHeader, Header};
use alloy_eips::{eip1559::INITIAL_BASE_FEE, Decodable2718};
use alloy_evm::{eth::EthBlockExecutionCtx, EvmFactory};
use alloy_primitives::{Bytes, U256};
use alloy_rpc_types_engine::ExecutionData;
pub use alloy_seismic_evm::{block::SeismicBlockExecutorFactory, SeismicEvm, SeismicEvmFactory};
use build::SeismicBlockAssembler;
use core::fmt::Debug;
use reth_chainspec::{ChainSpec, EthChainSpec};
use reth_ethereum_forks::EthereumHardfork;
use reth_evm::{
ConfigureEngineEvm, ConfigureEvm, EvmEnv, EvmEnvFor, ExecutableTxIterator, ExecutionCtxFor,
NextBlockEnvAttributes,
};
use reth_primitives_traits::{SealedBlock, SealedHeader, SignedTransaction, TxTy};
use reth_seismic_primitives::{SeismicBlock, SeismicPrimitives};
use reth_storage_errors::any::AnyError;
use revm::{
context::{BlockEnv, CfgEnv},
context_interface::block::BlobExcessGasAndPrice,
};
use seismic_revm::SeismicSpecId;
use std::convert::Infallible;
mod receipts;
pub use receipts::*;
mod build;
pub mod config;
use config::revm_spec;
/// Seismic EVM configuration.
#[derive(Debug, Clone)]
pub struct SeismicEvmConfig {
/// Inner [`SeismicBlockExecutorFactory`].
pub executor_factory:
SeismicBlockExecutorFactory<SeismicRethReceiptBuilder, Arc<ChainSpec>, SeismicEvmFactory>,
/// Seismic block assembler.
pub block_assembler: SeismicBlockAssembler<ChainSpec>,
}
impl SeismicEvmConfig {
/// Creates a new Seismic EVM configuration with the given chain spec and purpose keys.
pub fn new(
chain_spec: Arc<ChainSpec>,
purpose_keys: &'static seismic_enclave::GetPurposeKeysResponse,
) -> Self {
SeismicEvmConfig::new_with_evm_factory(
chain_spec,
SeismicEvmFactory::new_with_purpose_keys(purpose_keys),
purpose_keys,
)
}
/// Creates a new Ethereum EVM configuration with the given chain spec and EVM factory.
pub fn new_with_evm_factory(
chain_spec: Arc<ChainSpec>,
evm_factory: SeismicEvmFactory,
purpose_keys: &'static seismic_enclave::GetPurposeKeysResponse,
) -> Self {
Self {
block_assembler: SeismicBlockAssembler::new(chain_spec.clone()),
executor_factory: SeismicBlockExecutorFactory::new(
SeismicRethReceiptBuilder::default(),
chain_spec,
evm_factory,
purpose_keys,
),
}
}
/// Returns the chain spec associated with this configuration.
pub const fn chain_spec(&self) -> &Arc<ChainSpec> {
self.executor_factory.spec()
}
/// Sets the extra data for the block assembler.
pub fn with_extra_data(mut self, extra_data: Bytes) -> Self {
self.block_assembler.extra_data = extra_data;
self
}
/// Creates an EVM with the pre-fetched purpose keys
pub fn evm_with_env_and_live_key<DB>(
&self,
db: DB,
evm_env: EvmEnv<SeismicSpecId>,
) -> SeismicEvm<DB, revm::inspector::NoOpInspector>
where
DB: alloy_evm::Database,
{
self.executor_factory.evm_factory().create_evm(db, evm_env)
}
}
impl ConfigureEvm for SeismicEvmConfig {
type Primitives = SeismicPrimitives;
type Error = Infallible;
type NextBlockEnvCtx = NextBlockEnvAttributes;
type BlockExecutorFactory =
SeismicBlockExecutorFactory<SeismicRethReceiptBuilder, Arc<ChainSpec>, SeismicEvmFactory>;
type BlockAssembler = SeismicBlockAssembler<ChainSpec>;
fn block_executor_factory(&self) -> &Self::BlockExecutorFactory {
&self.executor_factory
}
fn block_assembler(&self) -> &Self::BlockAssembler {
&self.block_assembler
}
fn evm_env(&self, header: &Header) -> EvmEnv<SeismicSpecId> {
// TODO: use the correct spec id
let spec = SeismicSpecId::MERCURY;
// configure evm env based on parent block
let cfg_env = CfgEnv::new().with_chain_id(self.chain_spec().chain().id()).with_spec(spec);
let block_env = BlockEnv {
number: U256::from(header.number()),
beneficiary: header.beneficiary(),
timestamp: U256::from(header.timestamp()),
difficulty: U256::ZERO,
prevrandao: header.mix_hash(), /* Seismic genesis spec (Mercury) starts after Paris,
* so we always use header.mix_hash() */
gas_limit: header.gas_limit(),
basefee: header.base_fee_per_gas().unwrap_or_default(),
// EIP-4844 excess blob gas of this block, introduced in Cancun
blob_excess_gas_and_price: header.excess_blob_gas.map(|excess_blob_gas| {
BlobExcessGasAndPrice::new_with_spec(excess_blob_gas, spec.into_eth_spec())
}),
};
EvmEnv { cfg_env, block_env }
}
fn next_evm_env(
&self,
parent: &Header,
attributes: &NextBlockEnvAttributes,
) -> Result<EvmEnv<SeismicSpecId>, Self::Error> {
let spec_id = revm_spec(self.chain_spec(), parent);
// configure evm env based on parent block
let cfg = CfgEnv::new().with_chain_id(self.chain_spec().chain().id()).with_spec(spec_id);
// if the parent block did not have excess blob gas (i.e. it was pre-cancun), but it is
// cancun now, we need to set the excess blob gas to the default value(0)
let blob_excess_gas_and_price = parent
.maybe_next_block_excess_blob_gas(
self.chain_spec().blob_params_at_timestamp(attributes.timestamp_seconds()),
)
.map(|gas| BlobExcessGasAndPrice::new_with_spec(gas, spec_id.into_eth_spec()));
let mut basefee = parent.next_block_base_fee(
self.chain_spec().base_fee_params_at_timestamp(attributes.timestamp_seconds()),
);
let mut gas_limit = attributes.gas_limit;
// If we are on the London fork boundary, we need to multiply the parent's gas limit by the
// elasticity multiplier to get the new gas limit.
if self.chain_spec().fork(EthereumHardfork::London).transitions_at_block(parent.number + 1)
{
let elasticity_multiplier = self
.chain_spec()
.base_fee_params_at_timestamp(attributes.timestamp_seconds())
.elasticity_multiplier;
// multiply the gas limit by the elasticity multiplier
gas_limit *= elasticity_multiplier as u64;
// set the base fee to the initial base fee from the EIP-1559 spec
basefee = Some(INITIAL_BASE_FEE)
}
let block_env = BlockEnv {
number: U256::from(parent.number + 1),
beneficiary: attributes.suggested_fee_recipient,
// When timestamp-in-seconds is disabled, EVM should use milliseconds
timestamp: U256::from(attributes.timestamp),
difficulty: U256::ZERO,
prevrandao: Some(attributes.prev_randao),
gas_limit,
// calculate basefee based on parent block's gas usage
basefee: basefee.unwrap_or_default(),
// calculate excess gas based on parent block's blob gas usage
blob_excess_gas_and_price,
};
Ok((cfg, block_env).into())
}
fn context_for_block<'a>(
&self,
block: &'a SealedBlock<SeismicBlock>,
) -> EthBlockExecutionCtx<'a> {
EthBlockExecutionCtx {
parent_hash: block.header().parent_hash,
parent_beacon_block_root: block.header().parent_beacon_block_root,
ommers: &block.body().ommers,
withdrawals: block.body().withdrawals.as_ref().map(Cow::Borrowed),
}
}
fn context_for_next_block(
&self,
parent: &SealedHeader,
attributes: Self::NextBlockEnvCtx,
) -> EthBlockExecutionCtx<'_> {
EthBlockExecutionCtx {
parent_hash: parent.hash(),
parent_beacon_block_root: attributes.parent_beacon_block_root,
ommers: &[],
withdrawals: attributes.withdrawals.map(Cow::Owned),
}
}
/// Override to use pre-fetched live RNG key
fn evm_with_env<DB: alloy_evm::Database>(
&self,
db: DB,
evm_env: EvmEnv<SeismicSpecId>,
) -> SeismicEvm<DB, revm::inspector::NoOpInspector> {
self.evm_with_env_and_live_key(db, evm_env)
}
}
impl ConfigureEngineEvm<ExecutionData> for SeismicEvmConfig {
fn evm_env_for_payload(&self, payload: &ExecutionData) -> EvmEnvFor<Self> {
// Create a temporary header with the payload information to determine the spec
let temp_header = Header {
number: payload.payload.block_number(),
timestamp: payload.payload.timestamp(),
gas_limit: payload.payload.gas_limit(),
beneficiary: payload.payload.fee_recipient(),
..Default::default()
};
let spec_id = revm_spec(self.chain_spec(), &temp_header);
let cfg_env =
CfgEnv::new().with_chain_id(self.chain_spec().chain().id()).with_spec(spec_id);
let blob_excess_gas_and_price = payload
.payload
.blob_gas_used()
.map(|_gas| BlobExcessGasAndPrice::new_with_spec(0, spec_id.into_eth_spec()));
let block_env = BlockEnv {
number: U256::from(payload.payload.block_number()),
beneficiary: payload.payload.fee_recipient(),
timestamp: U256::from(payload.payload.timestamp()),
difficulty: U256::ZERO,
prevrandao: Some(payload.payload.prev_randao()),
gas_limit: payload.payload.gas_limit(),
basefee: payload.payload.saturated_base_fee_per_gas(),
blob_excess_gas_and_price,
};
(cfg_env, block_env).into()
}
fn context_for_payload<'a>(&self, payload: &'a ExecutionData) -> ExecutionCtxFor<'a, Self> {
EthBlockExecutionCtx {
parent_hash: payload.payload.parent_hash(),
parent_beacon_block_root: payload.sidecar.parent_beacon_block_root(),
ommers: &[],
withdrawals: payload.payload.withdrawals().map(|w| Cow::Owned(w.clone().into())),
}
}
fn tx_iterator_for_payload(&self, payload: &ExecutionData) -> impl ExecutableTxIterator<Self> {
payload.payload.transactions().clone().into_iter().map(|tx| {
let mut tx_data = tx.as_ref();
let tx = TxTy::<Self::Primitives>::decode_2718(&mut tx_data).map_err(AnyError::new)?;
let signer = tx.try_recover().map_err(AnyError::new)?;
Ok::<_, AnyError>(tx.with_signer(signer))
})
}
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_consensus::{Header, Receipt};
use alloy_eips::eip7685::Requests;
use alloy_evm::Evm;
use alloy_primitives::{bytes, map::HashMap, Address, LogData, B256};
use reth_chainspec::ChainSpec;
use reth_evm::execute::ProviderError;
use reth_execution_types::{
AccountRevertInit, BundleStateInit, Chain, ExecutionOutcome, RevertsInit,
};
use reth_primitives_traits::{Account, RecoveredBlock};
use reth_seismic_chainspec::SEISMIC_MAINNET;
use reth_seismic_primitives::{SeismicBlock, SeismicPrimitives, SeismicReceipt};
use revm::{
database::{BundleState, CacheDB},
database_interface::EmptyDBTyped,
handler::PrecompileProvider,
inspector::NoOpInspector,
precompile::u64_to_address,
primitives::Log,
state::AccountInfo,
};
use seismic_alloy_genesis::Genesis;
use seismic_enclave::{
get_unsecure_sample_schnorrkel_keypair, get_unsecure_sample_secp256k1_pk,
get_unsecure_sample_secp256k1_sk, GetPurposeKeysResponse,
};
use std::sync::Arc;
fn test_evm_config() -> SeismicEvmConfig {
// Get mock purpose keys for testing
let mock_keys = Box::leak(Box::new(get_mock_keys()));
SeismicEvmConfig::new(SEISMIC_MAINNET.clone(), mock_keys)
}
fn get_mock_keys() -> GetPurposeKeysResponse {
GetPurposeKeysResponse {
tx_io_sk: get_unsecure_sample_secp256k1_sk(),
tx_io_pk: get_unsecure_sample_secp256k1_pk(),
snapshot_key_bytes: [0u8; 32],
rng_keypair: get_unsecure_sample_schnorrkel_keypair(),
}
}
#[test]
fn test_fill_cfg_and_block_env() {
// Create a default header
let header = Header::default();
// Build the ChainSpec for Ethereum mainnet, activating London, Paris, and Shanghai
// hardforks
let chain_spec = ChainSpec::builder()
.chain(0.into())
.genesis(Genesis::default())
.london_activated()
.paris_activated()
.shanghai_activated()
.build();
// Use the `SeismicEvmConfig` to create the `cfg_env` and `block_env` based on the
// ChainSpec, Header, and total difficulty
let mock_keys = Box::leak(Box::new(get_mock_keys()));
let EvmEnv { cfg_env, .. } =
SeismicEvmConfig::new(Arc::new(chain_spec.clone()), mock_keys).evm_env(&header);
// Assert that the chain ID in the `cfg_env` is correctly set to the chain ID of the
// ChainSpec
assert_eq!(cfg_env.chain_id, chain_spec.chain().id());
}
#[test]
fn test_seismic_evm_with_env_default_spec() {
// Setup the EVM with test config and environment
let evm_config = test_evm_config(); // Provides SeismicEvm config with Seismic mainnet spec
let db = CacheDB::<EmptyDBTyped<ProviderError>>::default();
let evm_env = EvmEnv::default();
let evm: SeismicEvm<_, NoOpInspector> = evm_config.evm_with_env(db, evm_env.clone());
let precompiles = evm.precompiles().clone();
// Check that the EVM environment is correctly set
assert_eq!(evm.cfg, evm_env.cfg_env);
assert_eq!(evm.cfg.spec, SeismicSpecId::MERCURY);
// Check that the expected number of precompiles is set
let precompile_addresses =
[u64_to_address(101), u64_to_address(102), u64_to_address(103), u64_to_address(104)];
for &addr in &precompile_addresses {
let is_contained = precompiles.contains(&addr);
assert!(
is_contained,
"Expected Precompile at address for RETH evm generation {addr:?}"
);
}
}
#[test]
fn test_evm_with_env_custom_cfg() {
let evm_config = test_evm_config();
let db = CacheDB::<EmptyDBTyped<ProviderError>>::default();
// Create a custom configuration environment with a chain ID of 111
let cfg = CfgEnv::new().with_chain_id(111).with_spec(SeismicSpecId::default());
let evm_env = EvmEnv { cfg_env: cfg.clone(), ..Default::default() };
let evm = evm_config.evm_with_env(db, evm_env);
// Check that the EVM environment is initialized with the custom environment
assert_eq!(evm.cfg, cfg);
}
#[test]
fn test_evm_with_env_custom_block_and_tx() {
let evm_config = test_evm_config();
let db = CacheDB::<EmptyDBTyped<ProviderError>>::default();
// Create customs block and tx env
let block = BlockEnv {
basefee: 1000,
gas_limit: 10_000_000,
number: U256::from(42),
..Default::default()
};
let evm_env = EvmEnv { block_env: block, ..Default::default() };
let evm = evm_config.evm_with_env(db, evm_env.clone());
// Verify that the block and transaction environments are set correctly
assert_eq!(evm.block, evm_env.block_env);
}
#[test]
fn test_evm_with_spec_id() {
let evm_config = test_evm_config();
let db = CacheDB::<EmptyDBTyped<ProviderError>>::default();
let evm_env = EvmEnv {
cfg_env: CfgEnv::new().with_spec(SeismicSpecId::MERCURY),
..Default::default()
};
let evm = evm_config.evm_with_env(db, evm_env.clone());
assert_eq!(evm.cfg, evm_env.cfg_env);
}
#[test]
fn test_evm_with_env_and_default_inspector() {
let evm_config = test_evm_config();
let db = CacheDB::<EmptyDBTyped<ProviderError>>::default();
let evm_env = EvmEnv { cfg_env: Default::default(), ..Default::default() };
let evm = evm_config.evm_with_env_and_inspector(db, evm_env.clone(), NoOpInspector {});
// Check that the EVM environment is set to default values
assert_eq!(*evm.block(), evm_env.block_env);
assert_eq!(evm.cfg, evm_env.cfg_env);
}
#[test]
fn test_evm_with_env_inspector_and_custom_cfg() {
let evm_config = test_evm_config();
let db = CacheDB::<EmptyDBTyped<ProviderError>>::default();
let cfg = CfgEnv::new().with_chain_id(111).with_spec(SeismicSpecId::MERCURY);
let block = BlockEnv::default();
let evm_env = EvmEnv { block_env: block, cfg_env: cfg.clone() };
let evm = evm_config.evm_with_env_and_inspector(db, evm_env.clone(), NoOpInspector {});
// Check that the EVM environment is set with custom configuration
assert_eq!(evm.cfg, cfg);
assert_eq!(evm.block, evm_env.block_env);
}
#[test]
fn test_evm_with_env_inspector_and_custom_block_tx() {
let evm_config = test_evm_config();
let db = CacheDB::<EmptyDBTyped<ProviderError>>::default();
// Create custom block and tx environment
let block = BlockEnv {
basefee: 1000,
gas_limit: 10_000_000,
number: U256::from(42),
..Default::default()
};
let evm_env = EvmEnv { block_env: block, ..Default::default() };
let evm = evm_config.evm_with_env_and_inspector(db, evm_env.clone(), NoOpInspector {});
// Verify that the block and transaction environments are set correctly
assert_eq!(evm.block, evm_env.block_env);
}
#[test]
fn test_evm_with_env_inspector_and_spec_id() {
let evm_config = test_evm_config();
let db = CacheDB::<EmptyDBTyped<ProviderError>>::default();
let evm_env = EvmEnv {
cfg_env: CfgEnv::new().with_spec(SeismicSpecId::MERCURY),
..Default::default()
};
let evm = evm_config.evm_with_env_and_inspector(db, evm_env.clone(), NoOpInspector {});
// Check that the spec ID is set properly
assert_eq!(evm.cfg, evm_env.cfg_env);
assert_eq!(evm.block, evm_env.block_env);
}
#[test]
fn receipts_by_block_hash() {
// Create a default recovered block
let block: RecoveredBlock<SeismicBlock> = Default::default();
// Define block hashes for block1 and block2
let block1_hash = B256::new([0x01; 32]);
let block2_hash = B256::new([0x02; 32]);
// Clone the default block into block1 and block2
let mut block1 = block.clone();
let mut block2 = block;
// Set the hashes of block1 and block2
block1.set_block_number(10);
block1.set_hash(block1_hash);
block2.set_block_number(11);
block2.set_hash(block2_hash);
// Create a random receipt object, receipt1
let receipt1 = SeismicReceipt::Legacy(Receipt {
cumulative_gas_used: 46913,
logs: vec![],
status: true.into(),
});
// Create another random receipt object, receipt2
let receipt2 = SeismicReceipt::Legacy(Receipt {
cumulative_gas_used: 1325345,
logs: vec![],
status: true.into(),
});
// Create a Receipts object with a vector of receipt vectors
let receipts = vec![vec![receipt1.clone()], vec![receipt2]];
// Create an ExecutionOutcome object with the created bundle, receipts, an empty requests
// vector, and first_block set to 10
let execution_outcome = ExecutionOutcome::<SeismicReceipt> {
bundle: Default::default(),
receipts,
requests: vec![],
first_block: 10,
};
// Create a Chain object with a BTreeMap of blocks mapped to their block numbers,
// including block1_hash and block2_hash, and the execution_outcome
let chain: Chain<SeismicPrimitives> =
Chain::new([block1, block2], execution_outcome.clone(), None);
// Assert that the proper receipt vector is returned for block1_hash
assert_eq!(chain.receipts_by_block_hash(block1_hash), Some(vec![&receipt1]));
// Create an ExecutionOutcome object with a single receipt vector containing receipt1
let execution_outcome1 = ExecutionOutcome {
bundle: Default::default(),
receipts: vec![vec![receipt1]],
requests: vec![],
first_block: 10,
};
// Assert that the execution outcome at the first block contains only the first receipt
assert_eq!(chain.execution_outcome_at_block(10), Some(execution_outcome1));
// Assert that the execution outcome at the tip block contains the whole execution outcome
assert_eq!(chain.execution_outcome_at_block(11), Some(execution_outcome));
}
#[test]
fn test_initialisation() {
// Create a new BundleState object with initial data
let bundle = BundleState::new(
vec![(Address::new([2; 20]), None, Some(AccountInfo::default()), HashMap::default())],
vec![vec![(Address::new([2; 20]), None, vec![])]],
vec![],
);
// Create a Receipts object with a vector of receipt vectors
let receipts = vec![vec![Some(SeismicReceipt::Legacy(Receipt {
cumulative_gas_used: 46913,
logs: vec![],
status: true.into(),
}))]];
// Create a Requests object with a vector of requests
let requests = vec![Requests::new(vec![bytes!("dead"), bytes!("beef"), bytes!("beebee")])];
// Define the first block number
let first_block = 123;
// Create a ExecutionOutcome object with the created bundle, receipts, requests, and
// first_block
let exec_res = ExecutionOutcome {
bundle: bundle.clone(),
receipts: receipts.clone(),
requests: requests.clone(),
first_block,
};
// Assert that creating a new ExecutionOutcome using the constructor matches exec_res
assert_eq!(
ExecutionOutcome::new(bundle, receipts.clone(), first_block, requests.clone()),
exec_res
);
// Create a BundleStateInit object and insert initial data
let mut state_init: BundleStateInit = HashMap::default();
state_init
.insert(Address::new([2; 20]), (None, Some(Account::default()), HashMap::default()));
// Create a HashMap for account reverts and insert initial data
let mut revert_inner: HashMap<Address, AccountRevertInit> = HashMap::default();
revert_inner.insert(Address::new([2; 20]), (None, vec![]));
// Create a RevertsInit object and insert the revert_inner data
let mut revert_init: RevertsInit = HashMap::default();
revert_init.insert(123, revert_inner);
// Assert that creating a new ExecutionOutcome using the new_init method matches
// exec_res
assert_eq!(
ExecutionOutcome::new_init(
state_init,
revert_init,
vec![],
receipts,
first_block,
requests,
),
exec_res
);
}
#[test]
fn test_block_number_to_index() {
// Create a Receipts object with a vector of receipt vectors
let receipts = vec![vec![Some(SeismicReceipt::Legacy(Receipt {
cumulative_gas_used: 46913,
logs: vec![],
status: true.into(),
}))]];
// Define the first block number
let first_block = 123;
// Create a ExecutionOutcome object with the created bundle, receipts, requests, and
// first_block
let exec_res = ExecutionOutcome {
bundle: Default::default(),
receipts,
requests: vec![],
first_block,
};
// Test before the first block
assert_eq!(exec_res.block_number_to_index(12), None);
// Test after after the first block but index larger than receipts length
assert_eq!(exec_res.block_number_to_index(133), None);
// Test after the first block
assert_eq!(exec_res.block_number_to_index(123), Some(0));
}
#[test]
fn test_get_logs() {
// Create a Receipts object with a vector of receipt vectors
let receipts = vec![vec![SeismicReceipt::Legacy(Receipt {
cumulative_gas_used: 46913,
logs: vec![Log::<LogData>::default()],
status: true.into(),
})]];
// Define the first block number
let first_block = 123;
// Create a ExecutionOutcome object with the created bundle, receipts, requests, and
// first_block
let exec_res = ExecutionOutcome {
bundle: Default::default(),
receipts,
requests: vec![],
first_block,
};
// Get logs for block number 123
let logs: Vec<&Log> = exec_res.logs(123).unwrap().collect();
// Assert that the logs match the expected logs
assert_eq!(logs, vec![&Log::<LogData>::default()]);
}
#[test]
fn test_receipts_by_block() {
// Create a Receipts object with a vector of receipt vectors
let receipts = vec![vec![Some(SeismicReceipt::Legacy(Receipt {
cumulative_gas_used: 46913,
logs: vec![Log::<LogData>::default()],
status: true.into(),
}))]];
// Define the first block number
let first_block = 123;
// Create a ExecutionOutcome object with the created bundle, receipts, requests, and
// first_block
let exec_res = ExecutionOutcome {
bundle: Default::default(), // Default value for bundle
receipts, // Include the created receipts
requests: vec![], // Empty vector for requests
first_block, // Set the first block number
};
// Get receipts for block number 123 and convert the result into a vector
let receipts_by_block: Vec<_> = exec_res.receipts_by_block(123).iter().collect();
// Assert that the receipts for block number 123 match the expected receipts
assert_eq!(
receipts_by_block,
vec![&Some(SeismicReceipt::Legacy(Receipt {
cumulative_gas_used: 46913,
logs: vec![Log::<LogData>::default()],
status: true.into(),
}))]
);
}
#[test]
fn test_receipts_len() {
// Create a Receipts object with a vector of receipt vectors
let receipts = vec![vec![Some(SeismicReceipt::Legacy(Receipt {
cumulative_gas_used: 46913,
logs: vec![Log::<LogData>::default()],
status: true.into(),
}))]];
// Create an empty Receipts object
let receipts_empty = vec![];
// Define the first block number
let first_block = 123;
// Create a ExecutionOutcome object with the created bundle, receipts, requests, and
// first_block
let exec_res = ExecutionOutcome {
bundle: Default::default(), // Default value for bundle
receipts, // Include the created receipts
requests: vec![], // Empty vector for requests
first_block, // Set the first block number
};
// Assert that the length of receipts in exec_res is 1
assert_eq!(exec_res.len(), 1);
// Assert that exec_res is not empty
assert!(!exec_res.is_empty());
// Create a ExecutionOutcome object with an empty Receipts object
let exec_res_empty_receipts: ExecutionOutcome<SeismicReceipt> = ExecutionOutcome {
bundle: Default::default(), // Default value for bundle
receipts: receipts_empty, // Include the empty receipts
requests: vec![], // Empty vector for requests
first_block, // Set the first block number
};
// Assert that the length of receipts in exec_res_empty_receipts is 0
assert_eq!(exec_res_empty_receipts.len(), 0);
// Assert that exec_res_empty_receipts is empty
assert!(exec_res_empty_receipts.is_empty());
}
#[test]
fn test_revert_to() {
// Create a random receipt object
let receipt = SeismicReceipt::Legacy(Receipt {
cumulative_gas_used: 46913,
logs: vec![],
status: true.into(),
});
// Create a Receipts object with a vector of receipt vectors
let receipts = vec![vec![Some(receipt.clone())], vec![Some(receipt.clone())]];
// Define the first block number
let first_block = 123;
// Create a request.
let request = bytes!("deadbeef");
// Create a vector of Requests containing the request.
let requests =
vec![Requests::new(vec![request.clone()]), Requests::new(vec![request.clone()])];
// Create a ExecutionOutcome object with the created bundle, receipts, requests, and
// first_block
let mut exec_res =
ExecutionOutcome { bundle: Default::default(), receipts, requests, first_block };
// Assert that the revert_to method returns true when reverting to the initial block number.
assert!(exec_res.revert_to(123));
// Assert that the receipts are properly cut after reverting to the initial block number.
assert_eq!(exec_res.receipts, vec![vec![Some(receipt)]]);
// Assert that the requests are properly cut after reverting to the initial block number.
assert_eq!(exec_res.requests, vec![Requests::new(vec![request])]);
// Assert that the revert_to method returns false when attempting to revert to a block
// number greater than the initial block number.
assert!(!exec_res.revert_to(133));
// Assert that the revert_to method returns false when attempting to revert to a block
// number less than the initial block number.
assert!(!exec_res.revert_to(10));
}
#[test]
fn test_extend_execution_outcome() {
// Create a Receipt object with specific attributes.
let receipt = SeismicReceipt::Legacy(Receipt {
cumulative_gas_used: 46913,
logs: vec![],
status: true.into(),
});
// Create a Receipts object containing the receipt.
let receipts = vec![vec![Some(receipt.clone())]];
// Create a request.
let request = bytes!("deadbeef");
// Create a vector of Requests containing the request.
let requests = vec![Requests::new(vec![request.clone()])];
// Define the initial block number.
let first_block = 123;
// Create an ExecutionOutcome object.
let mut exec_res =
ExecutionOutcome { bundle: Default::default(), receipts, requests, first_block };
// Extend the ExecutionOutcome object by itself.
exec_res.extend(exec_res.clone());
// Assert the extended ExecutionOutcome matches the expected outcome.
assert_eq!(
exec_res,
ExecutionOutcome {
bundle: Default::default(),
receipts: vec![vec![Some(receipt.clone())], vec![Some(receipt)]],
requests: vec![Requests::new(vec![request.clone()]), Requests::new(vec![request])],
first_block: 123,
}
);
}
#[test]
fn test_split_at_execution_outcome() {
// Create a random receipt object
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | true |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/seismic/evm/src/build.rs | crates/seismic/evm/src/build.rs | //! The SeismicBlockAssembler. Nearly identical to the EthBlockAssembler, except with seismic types
use alloc::sync::Arc;
use alloy_consensus::{
proofs, Block, BlockBody, BlockHeader, Header, Transaction, TxReceipt, EMPTY_OMMER_ROOT_HASH,
};
use alloy_eips::merge::BEACON_NONCE;
use alloy_evm::block::BlockExecutorFactory;
use alloy_primitives::{logs_bloom, Bytes};
use reth_chainspec::{EthChainSpec, EthereumHardforks};
use reth_evm::{
eth::EthBlockExecutionCtx,
execute::{BlockAssembler, BlockAssemblerInput},
};
use reth_execution_errors::BlockExecutionError;
use reth_execution_types::BlockExecutionResult;
use reth_seismic_primitives::{SeismicReceipt, SeismicTransactionSigned};
/// Block builder for Seismic.
#[derive(Debug, Clone)]
pub struct SeismicBlockAssembler<ChainSpec> {
/// The chainspec.
pub chain_spec: Arc<ChainSpec>,
/// Extra data to use for the blocks.
pub extra_data: Bytes,
}
impl<ChainSpec> SeismicBlockAssembler<ChainSpec> {
/// Creates a new [`SeismicBlockAssembler`].
pub fn new(chain_spec: Arc<ChainSpec>) -> Self {
Self { chain_spec, extra_data: Default::default() }
}
}
impl<F, ChainSpec> BlockAssembler<F> for SeismicBlockAssembler<ChainSpec>
where
F: for<'a> BlockExecutorFactory<
ExecutionCtx<'a> = EthBlockExecutionCtx<'a>,
Transaction = SeismicTransactionSigned,
Receipt = SeismicReceipt,
>,
ChainSpec: EthChainSpec + EthereumHardforks,
{
type Block = Block<SeismicTransactionSigned>;
fn assemble_block(
&self,
input: BlockAssemblerInput<'_, '_, F>,
) -> Result<Block<SeismicTransactionSigned>, BlockExecutionError> {
let BlockAssemblerInput {
evm_env,
execution_ctx: ctx,
parent,
transactions,
output: BlockExecutionResult { receipts, requests, gas_used },
state_root,
..
} = input;
// EVM block env timestamp is in milliseconds when feature is disabled, seconds when enabled
let timestamp = evm_env.block_env.timestamp.saturating_to();
// For fork activation checks, we always need seconds
let timestamp_seconds =
if cfg!(feature = "timestamp-in-seconds") { timestamp } else { timestamp / 1000 };
let transactions_root = proofs::calculate_transaction_root(&transactions);
let receipts_root = SeismicReceipt::calculate_receipt_root_no_memo(receipts);
let logs_bloom = logs_bloom(receipts.iter().flat_map(|r| r.logs()));
let withdrawals = self
.chain_spec
.is_shanghai_active_at_timestamp(timestamp_seconds)
.then(|| ctx.withdrawals.map(|w| w.into_owned()).unwrap_or_default());
let withdrawals_root =
withdrawals.as_deref().map(|w| proofs::calculate_withdrawals_root(w));
let requests_hash = self
.chain_spec
.is_prague_active_at_timestamp(timestamp_seconds)
.then(|| requests.requests_hash());
let mut excess_blob_gas = None;
let mut blob_gas_used = None;
// only determine cancun fields when active
if self.chain_spec.is_cancun_active_at_timestamp(timestamp_seconds) {
blob_gas_used =
Some(transactions.iter().map(|tx| tx.blob_gas_used().unwrap_or_default()).sum());
excess_blob_gas =
if self.chain_spec.is_cancun_active_at_timestamp(parent.timestamp_seconds()) {
parent.maybe_next_block_excess_blob_gas(
self.chain_spec.blob_params_at_timestamp(timestamp_seconds),
)
} else {
// for the first post-fork block, both parent.blob_gas_used and
// parent.excess_blob_gas are evaluated as 0
Some(
alloy_eips::eip7840::BlobParams::cancun()
.next_block_excess_blob_gas_osaka(0, 0, 0),
)
};
}
let header = Header {
parent_hash: ctx.parent_hash,
ommers_hash: EMPTY_OMMER_ROOT_HASH,
beneficiary: evm_env.block_env.beneficiary,
state_root,
transactions_root,
receipts_root,
withdrawals_root,
logs_bloom,
timestamp,
mix_hash: evm_env.block_env.prevrandao.unwrap_or_default(),
nonce: BEACON_NONCE.into(),
base_fee_per_gas: Some(evm_env.block_env.basefee),
number: evm_env.block_env.number.saturating_to(),
gas_limit: evm_env.block_env.gas_limit,
difficulty: evm_env.block_env.difficulty,
gas_used: *gas_used,
extra_data: self.extra_data.clone(),
parent_beacon_block_root: ctx.parent_beacon_block_root,
blob_gas_used,
excess_blob_gas,
requests_hash,
};
Ok(Block {
header,
body: BlockBody { transactions, ommers: Default::default(), withdrawals },
})
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/seismic/evm/src/receipts.rs | crates/seismic/evm/src/receipts.rs | use alloy_consensus::{Eip658Value, Receipt};
use alloy_evm::eth::receipt_builder::{ReceiptBuilder, ReceiptBuilderCtx};
use reth_evm::Evm;
use reth_seismic_primitives::{SeismicReceipt, SeismicTransactionSigned};
use seismic_alloy_consensus::SeismicTxType;
/// A builder that operates on seismic-reth primitive types, specifically
/// [`SeismicTransactionSigned`] and [`SeismicReceipt`].
///
/// Why is this different than SeismicAlloyReceiptBuilder in seismic-evm? Can we reuse code?
#[derive(Debug, Default, Clone, Copy)]
#[non_exhaustive]
pub struct SeismicRethReceiptBuilder;
impl ReceiptBuilder for SeismicRethReceiptBuilder {
type Transaction = SeismicTransactionSigned;
type Receipt = SeismicReceipt;
fn build_receipt<E: Evm>(
&self,
ctx: ReceiptBuilderCtx<'_, SeismicTransactionSigned, E>,
) -> Self::Receipt {
match ctx.tx.tx_type() {
ty => {
let receipt = Receipt {
status: Eip658Value::Eip658(ctx.result.is_success()),
cumulative_gas_used: ctx.cumulative_gas_used,
logs: ctx.result.into_logs(),
};
match ty {
SeismicTxType::Legacy => SeismicReceipt::Legacy(receipt),
SeismicTxType::Eip1559 => SeismicReceipt::Eip1559(receipt),
SeismicTxType::Eip2930 => SeismicReceipt::Eip2930(receipt),
SeismicTxType::Eip4844 => SeismicReceipt::Eip4844(receipt),
SeismicTxType::Eip7702 => SeismicReceipt::Eip7702(receipt),
SeismicTxType::Seismic => SeismicReceipt::Seismic(receipt),
}
}
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/seismic/reth/src/lib.rs | crates/seismic/reth/src/lib.rs | //! Seismic meta crate that provides access to commonly used reth dependencies.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![cfg_attr(not(feature = "std"), no_std)]
#![allow(unused_crate_dependencies)]
/// Re-exported ethereum types
#[doc(inline)]
pub use reth_seismic_primitives::*;
/// Re-exported reth primitives
pub mod primitives {
#[doc(inline)]
pub use reth_primitives_traits::*;
}
/// Re-exported cli types
#[cfg(feature = "cli")]
pub use reth_seismic_cli as cli;
/// Re-exported from `reth_chainspec`
pub mod chainspec {
#[doc(inline)]
pub use reth_chainspec::*;
}
/// Re-exported evm types
#[cfg(feature = "evm")]
pub mod evm {
#[doc(inline)]
pub use reth_seismic_evm::*;
#[doc(inline)]
pub use reth_evm as primitives;
}
/// Re-exported reth network types
#[cfg(feature = "network")]
pub mod network {
#[doc(inline)]
pub use reth_network::*;
}
/// Re-exported reth provider types
#[cfg(feature = "provider")]
pub mod provider {
#[doc(inline)]
pub use reth_provider::*;
#[doc(inline)]
pub use reth_db as db;
}
/// Re-exported reth storage api types
#[cfg(feature = "storage-api")]
pub mod storage {
#[doc(inline)]
pub use reth_storage_api::*;
}
/// Re-exported ethereum node
#[cfg(feature = "node-api")]
pub mod node {
#[doc(inline)]
pub use reth_node_api as api;
#[cfg(feature = "node")]
pub use reth_seismic_node::*;
}
/// Re-exported reth trie types
#[cfg(feature = "trie")]
pub mod trie {
#[doc(inline)]
pub use reth_trie::*;
}
/// Re-exported rpc types
#[cfg(feature = "rpc")]
pub mod rpc {
#[doc(inline)]
pub use reth_rpc::*;
#[doc(inline)]
pub use reth_seismic_rpc::*;
#[doc(inline)]
pub use reth_rpc_api as api;
#[doc(inline)]
pub use reth_rpc_builder as builder;
#[doc(inline)]
pub use reth_rpc_eth_types as eth;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/seismic/txpool/src/lib.rs | crates/seismic/txpool/src/lib.rs | //! Seismic-Reth Transaction pool.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
use reth_transaction_pool::{
CoinbaseTipOrdering, EthTransactionValidator, Pool, TransactionValidationTaskExecutor,
};
/// Type alias for default seismic transaction pool
pub type SeismicTransactionPool<Client, S, T = SeismicPooledTransaction> = Pool<
TransactionValidationTaskExecutor<EthTransactionValidator<Client, T>>,
CoinbaseTipOrdering<T>,
S,
>;
mod transaction;
pub use transaction::SeismicPooledTransaction;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/seismic/txpool/src/transaction.rs | crates/seismic/txpool/src/transaction.rs | use alloy_consensus::{transaction::Recovered, BlobTransactionValidationError, Typed2718};
use alloy_eips::{
eip2930::AccessList, eip7594::BlobTransactionSidecarVariant, eip7702::SignedAuthorization,
Encodable2718,
};
use alloy_primitives::{Address, Bytes, TxHash, TxKind, B256, U256};
use c_kzg::KzgSettings;
use core::fmt::Debug;
use reth_primitives_traits::{InMemorySize, SignedTransaction};
use reth_seismic_primitives::SeismicTransactionSigned;
use reth_transaction_pool::{
EthBlobTransactionSidecar, EthPoolTransaction, EthPooledTransaction, PoolTransaction,
};
use seismic_alloy_consensus::SeismicTxEnvelope;
use std::sync::Arc;
/// Pool Transaction for Seismic.
#[derive(Debug, Clone, derive_more::Deref)]
pub struct SeismicPooledTransaction<Cons = SeismicTransactionSigned, Pooled = SeismicTxEnvelope> {
#[deref]
inner: EthPooledTransaction<Cons>,
/// The pooled transaction type.
_pd: core::marker::PhantomData<Pooled>,
}
impl<Cons: SignedTransaction, Pooled> SeismicPooledTransaction<Cons, Pooled> {
/// Create a new [`SeismicPooledTransaction`].
pub fn new(transaction: Recovered<Cons>, encoded_length: usize) -> Self {
Self {
inner: EthPooledTransaction::new(transaction, encoded_length),
_pd: core::marker::PhantomData,
}
}
}
impl<Cons, Pooled> PoolTransaction for SeismicPooledTransaction<Cons, Pooled>
where
Cons: SignedTransaction + From<Pooled>,
Pooled: SignedTransaction + TryFrom<Cons, Error: core::error::Error>,
{
type TryFromConsensusError = <Pooled as TryFrom<Cons>>::Error;
type Consensus = Cons;
type Pooled = Pooled;
fn hash(&self) -> &TxHash {
self.inner.transaction.tx_hash()
}
fn sender(&self) -> Address {
self.inner.transaction.signer()
}
fn sender_ref(&self) -> &Address {
self.inner.transaction.signer_ref()
}
fn cost(&self) -> &U256 {
&self.inner.cost
}
fn encoded_length(&self) -> usize {
self.inner.encoded_length
}
fn clone_into_consensus(&self) -> Recovered<Self::Consensus> {
self.inner.transaction().clone()
}
fn into_consensus(self) -> Recovered<Self::Consensus> {
self.inner.transaction
}
fn from_pooled(tx: Recovered<Self::Pooled>) -> Self {
let encoded_len = tx.encode_2718_len();
Self::new(tx.convert(), encoded_len)
}
}
impl<Cons: Typed2718, Pooled> Typed2718 for SeismicPooledTransaction<Cons, Pooled> {
fn ty(&self) -> u8 {
self.inner.ty()
}
}
impl<Cons: InMemorySize, Pooled> InMemorySize for SeismicPooledTransaction<Cons, Pooled> {
fn size(&self) -> usize {
self.inner.size()
}
}
impl<Cons, Pooled> alloy_consensus::Transaction for SeismicPooledTransaction<Cons, Pooled>
where
Cons: alloy_consensus::Transaction + SignedTransaction, // Ensure Cons has the methods
Pooled: Debug + Send + Sync + 'static, /* From Optimism example, for
* completeness */
{
fn chain_id(&self) -> Option<u64> {
self.inner.chain_id()
}
fn nonce(&self) -> u64 {
self.inner.nonce()
}
fn gas_limit(&self) -> u64 {
self.inner.gas_limit()
}
fn gas_price(&self) -> Option<u128> {
self.inner.gas_price()
}
fn max_fee_per_gas(&self) -> u128 {
self.inner.max_fee_per_gas()
}
fn max_priority_fee_per_gas(&self) -> Option<u128> {
self.inner.max_priority_fee_per_gas()
}
fn max_fee_per_blob_gas(&self) -> Option<u128> {
self.inner.max_fee_per_blob_gas()
}
fn value(&self) -> U256 {
self.inner.value()
}
fn input(&self) -> &Bytes {
self.inner.input()
}
fn access_list(&self) -> Option<&AccessList> {
self.inner.access_list()
}
fn blob_versioned_hashes(&self) -> Option<&[B256]> {
self.inner.blob_versioned_hashes()
}
fn authorization_list(&self) -> Option<&[SignedAuthorization]> {
self.inner.authorization_list()
}
fn priority_fee_or_price(&self) -> u128 {
self.inner.priority_fee_or_price()
}
fn effective_gas_price(&self, base_fee: Option<u64>) -> u128 {
self.inner.effective_gas_price(base_fee)
}
fn is_dynamic_fee(&self) -> bool {
self.inner.is_dynamic_fee()
}
fn kind(&self) -> TxKind {
self.inner.kind()
}
fn is_create(&self) -> bool {
self.inner.is_create()
}
}
impl<Cons, Pooled> EthPoolTransaction for SeismicPooledTransaction<Cons, Pooled>
where
Cons: SignedTransaction + From<Pooled>,
Pooled: SignedTransaction + TryFrom<Cons>,
<Pooled as TryFrom<Cons>>::Error: core::error::Error,
{
fn take_blob(&mut self) -> EthBlobTransactionSidecar {
EthBlobTransactionSidecar::None
}
fn try_into_pooled_eip4844(
self,
_sidecar: Arc<BlobTransactionSidecarVariant>,
) -> Option<Recovered<Self::Pooled>> {
None
}
fn try_from_eip4844(
_tx: Recovered<Self::Consensus>,
_sidecar: BlobTransactionSidecarVariant,
) -> Option<Self> {
None
}
fn validate_blob(
&self,
_sidecar: &BlobTransactionSidecarVariant,
_settings: &KzgSettings,
) -> Result<(), BlobTransactionValidationError> {
Err(BlobTransactionValidationError::NotBlobTransaction(self.ty()))
}
}
#[cfg(test)]
mod tests {
use crate::SeismicPooledTransaction;
use alloy_consensus::transaction::Recovered;
use alloy_eips::eip2718::Encodable2718;
use reth_primitives_traits::transaction::error::InvalidTransactionError;
use reth_provider::test_utils::MockEthProvider;
use reth_seismic_chainspec::SEISMIC_MAINNET;
use reth_seismic_primitives::test_utils::get_signed_seismic_tx;
use reth_transaction_pool::{
blobstore::InMemoryBlobStore, error::InvalidPoolTransactionError,
validate::EthTransactionValidatorBuilder, TransactionOrigin, TransactionValidationOutcome,
};
#[tokio::test]
async fn validate_seismic_transaction() {
// setup validator
let client = MockEthProvider::default().with_chain_spec(SEISMIC_MAINNET.clone());
let validator = EthTransactionValidatorBuilder::new(client)
.no_shanghai()
.no_cancun()
.build(InMemoryBlobStore::default());
// check that a SeismicTypedTransaction::Seismic is valid
let origin = TransactionOrigin::External;
let signer = Default::default();
let signed_seismic_tx = get_signed_seismic_tx();
let signed_recovered = Recovered::new_unchecked(signed_seismic_tx, signer);
let len = signed_recovered.encode_2718_len();
let pooled_tx: SeismicPooledTransaction =
SeismicPooledTransaction::new(signed_recovered, len);
let outcome = validator.validate_one(origin, pooled_tx);
match outcome {
TransactionValidationOutcome::Invalid(
_,
InvalidPoolTransactionError::Consensus(InvalidTransactionError::InsufficientFunds(
_,
)),
) => {
// expected since the client (MockEthProvider) state does not have funds for any
// accounts account balance is one of the last things checked in
// validate_one, so getting that far good news
}
_ => panic!("Did not get expected outcome, got: {:?}", outcome),
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/seismic/hardforks/src/lib.rs | crates/seismic/hardforks/src/lib.rs | //! Seismic-Reth hard forks.
extern crate alloc;
use alloc::vec;
use alloy_primitives::uint;
use once_cell::sync::Lazy as LazyLock;
use reth_ethereum_forks::{ChainHardforks, EthereumHardfork, ForkCondition, Hardfork};
/// Seismic hardfork enum
#[derive(Clone, Debug)]
#[allow(missing_docs)]
pub enum SeismicHardfork {
Mercury,
}
impl Hardfork for SeismicHardfork {
fn name(&self) -> &'static str {
match self {
Self::Mercury => "Mercury",
}
}
}
/// Mainnet hardforks
/// Based off EthereumHardfork::mainnet(),
/// with existing eth hardforks activated at block 0
pub static SEISMIC_MAINNET_HARDFORKS: LazyLock<ChainHardforks> = LazyLock::new(|| {
ChainHardforks::new(vec![
(EthereumHardfork::Frontier.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Homestead.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Dao.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Tangerine.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::SpuriousDragon.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Byzantium.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Constantinople.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Petersburg.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Istanbul.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::MuirGlacier.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Berlin.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::London.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::ArrowGlacier.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::GrayGlacier.boxed(), ForkCondition::Block(0)),
(
EthereumHardfork::Paris.boxed(),
ForkCondition::TTD {
activation_block_number: 0,
fork_block: None,
total_difficulty: uint!(58_750_000_000_000_000_000_000_U256),
},
),
(EthereumHardfork::Shanghai.boxed(), ForkCondition::Timestamp(0)),
(EthereumHardfork::Cancun.boxed(), ForkCondition::Timestamp(0)),
(EthereumHardfork::Prague.boxed(), ForkCondition::Timestamp(0)),
(SeismicHardfork::Mercury.boxed(), ForkCondition::Timestamp(0)),
])
});
/// Dev hardforks
pub static SEISMIC_DEV_HARDFORKS: LazyLock<ChainHardforks> = LazyLock::new(|| {
ChainHardforks::new(vec![
(EthereumHardfork::Frontier.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Homestead.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Dao.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Tangerine.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::SpuriousDragon.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Byzantium.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Constantinople.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Petersburg.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Istanbul.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::MuirGlacier.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Berlin.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::London.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::ArrowGlacier.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::GrayGlacier.boxed(), ForkCondition::Block(0)),
(
EthereumHardfork::Paris.boxed(),
ForkCondition::TTD {
activation_block_number: 0,
fork_block: None,
total_difficulty: uint!(58_750_000_000_000_000_000_000_U256),
},
),
(EthereumHardfork::Shanghai.boxed(), ForkCondition::Timestamp(0)),
(EthereumHardfork::Cancun.boxed(), ForkCondition::Timestamp(0)),
(EthereumHardfork::Prague.boxed(), ForkCondition::Timestamp(0)),
(SeismicHardfork::Mercury.boxed(), ForkCondition::Timestamp(0)),
])
});
#[cfg(test)]
mod tests {
use super::*;
use core::panic;
#[test]
fn check_ethereum_hardforks_at_zero() {
let eth_mainnet_forks = EthereumHardfork::mainnet();
let seismic_hardforks = SEISMIC_MAINNET_HARDFORKS.clone();
for eth_hf in eth_mainnet_forks {
let (fork, _) = eth_hf;
let lookup = seismic_hardforks.get(fork);
match lookup {
Some(condition) => {
if fork <= EthereumHardfork::Prague {
assert!(
condition.active_at_timestamp(0) || condition.active_at_block(0),
"Hardfork {} not active at timestamp 1",
fork
);
}
}
None => {
panic!("Hardfork {} not found in hardforks", fork);
}
}
}
}
#[test]
fn check_seismic_hardforks_at_zero() {
let seismic_hardforks = SEISMIC_MAINNET_HARDFORKS.clone();
assert!(
seismic_hardforks.get(SeismicHardfork::Mercury).is_some(),
"Missing hardfork mercury"
);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/seismic/node/src/node.rs | crates/seismic/node/src/node.rs | //! Seismic Node types config.
use crate::{
engine::{SeismicEngineTypes, SeismicEngineValidator},
txpool::SeismicTransactionPool,
};
use alloy_eips::merge::EPOCH_SLOTS;
use alloy_rpc_types_engine::ExecutionData;
use reth_chainspec::{ChainSpec, EthChainSpec};
use reth_consensus::{ConsensusError, FullConsensus};
use reth_engine_primitives::{NoopInvalidBlockHook, TreeConfig};
use reth_eth_wire_types::NewBlock;
use reth_evm::{
ConfigureEngineEvm, ConfigureEvm, EvmFactory, EvmFactoryFor, NextBlockEnvAttributes,
};
use reth_network::{NetworkHandle, NetworkPrimitives};
use reth_node_api::{AddOnsContext, FullNodeComponents, NodeAddOns, PrimitivesTy, TxTy};
use reth_node_builder::{
components::{
BasicPayloadServiceBuilder, ComponentsBuilder, ConsensusBuilder, ExecutorBuilder,
NetworkBuilder, PayloadBuilderBuilder, PoolBuilder,
},
node::{FullNodeTypes, NodeTypes},
rpc::{
BasicEngineApiBuilder, BasicEngineValidator, BasicEngineValidatorBuilder, EngineApiBuilder,
EngineValidatorAddOn, EngineValidatorBuilder, EthApiBuilder, PayloadValidatorBuilder,
RethRpcAddOns, RethRpcMiddleware, RpcAddOns, RpcHandle, RpcModuleContainer,
},
BuilderContext, DebugNode, Node, NodeAdapter, NodeComponentsBuilder, PayloadBuilderConfig,
};
use reth_node_ethereum::consensus::EthBeaconConsensus;
use reth_payload_primitives::PayloadAttributesBuilder;
use reth_provider::{providers::ProviderFactoryBuilder, CanonStateSubscriptions, EthStorage};
use reth_rpc::ValidationApi;
use reth_rpc_api::BlockSubmissionValidationApiServer;
use reth_rpc_builder::{config::RethRpcServerConfig, Identity};
use reth_rpc_eth_types::{
error::{api::FromEvmHalt, FromEvmError},
EthApiError,
};
use reth_rpc_server_types::RethRpcModule;
use reth_seismic_evm::SeismicEvmConfig;
use reth_seismic_payload_builder::SeismicBuilderConfig;
use reth_seismic_primitives::{SeismicPrimitives, SeismicReceipt, SeismicTransactionSigned};
use reth_seismic_rpc::{SeismicEthApiBuilder, SeismicEthApiError, SeismicRethWithSignable};
use reth_transaction_pool::{
blobstore::{DiskFileBlobStore, DiskFileBlobStoreConfig},
CoinbaseTipOrdering, PoolTransaction, TransactionPool, TransactionValidationTaskExecutor,
};
use revm::context::TxEnv;
use seismic_alloy_consensus::SeismicTxEnvelope;
use std::{sync::Arc, time::SystemTime};
use crate::seismic_evm_config;
/// Storage implementation for Seismic.
pub type SeismicStorage = EthStorage<SeismicTransactionSigned>;
#[derive(Debug, Default, Clone)]
#[non_exhaustive]
/// Type configuration for a regular Seismic node.
pub struct SeismicNode;
impl SeismicNode {
/// Returns the components for the given [`EnclaveArgs`].
pub fn components<Node>(
&self,
) -> ComponentsBuilder<
Node,
SeismicPoolBuilder,
BasicPayloadServiceBuilder<SeismicPayloadBuilder>,
SeismicNetworkBuilder,
SeismicExecutorBuilder,
SeismicConsensusBuilder,
>
where
Node: FullNodeTypes<
Types: NodeTypes<
Payload = SeismicEngineTypes,
ChainSpec = ChainSpec,
Primitives = SeismicPrimitives,
>,
>,
{
ComponentsBuilder::default()
.node_types::<Node>()
.pool(SeismicPoolBuilder::default())
.executor(SeismicExecutorBuilder::default())
.payload(BasicPayloadServiceBuilder::<SeismicPayloadBuilder>::default())
.network(SeismicNetworkBuilder::default())
.executor(SeismicExecutorBuilder::default())
.consensus(SeismicConsensusBuilder::default())
}
/// Instantiates the [`ProviderFactoryBuilder`] for an opstack node.
///
/// # Open a Providerfactory in read-only mode from a datadir
///
/// See also: [`ProviderFactoryBuilder`] and
/// [`ReadOnlyConfig`](reth_provider::providers::ReadOnlyConfig).
///
/// ```no_run
/// use reth_chainspec::BASE_MAINNET;
/// use reth_seismic_node::SeismicNode;
///
/// let factory = SeismicNode::provider_factory_builder()
/// .open_read_only(BASE_MAINNET.clone(), "datadir")
/// .unwrap();
/// ```
///
/// # Open a Providerfactory manually with with all required components
///
/// ```no_run
/// use reth_chainspec::ChainSpecBuilder;
/// use reth_db::open_db_read_only;
/// use reth_provider::providers::StaticFileProvider;
/// use reth_seismic_node::SeismicNode;
/// use std::sync::Arc;
///
/// let factory = SeismicNode::provider_factory_builder()
/// .db(Arc::new(open_db_read_only("db", Default::default()).unwrap()))
/// .chainspec(ChainSpecBuilder::base_mainnet().build().into())
/// .static_file(StaticFileProvider::read_only("db/static_files", false).unwrap())
/// .build_provider_factory();
/// ```
pub fn provider_factory_builder() -> ProviderFactoryBuilder<Self> {
ProviderFactoryBuilder::default()
}
}
impl<N> Node<N> for SeismicNode
where
N: FullNodeTypes<
Types: NodeTypes<
Payload = SeismicEngineTypes,
ChainSpec = ChainSpec,
Primitives = SeismicPrimitives,
Storage = SeismicStorage,
>,
>,
{
type ComponentsBuilder = ComponentsBuilder<
N,
SeismicPoolBuilder,
BasicPayloadServiceBuilder<SeismicPayloadBuilder>,
SeismicNetworkBuilder,
SeismicExecutorBuilder,
SeismicConsensusBuilder,
>;
type AddOns = SeismicAddOns<
NodeAdapter<N, <Self::ComponentsBuilder as NodeComponentsBuilder<N>>::Components>,
SeismicEthApiBuilder<SeismicRethWithSignable>,
SeismicEngineValidatorBuilder,
BasicEngineApiBuilder<SeismicEngineValidatorBuilder>,
BasicEngineValidatorBuilder<SeismicEngineValidatorBuilder>,
Identity,
>;
fn components_builder(&self) -> Self::ComponentsBuilder {
Self::components(self)
}
fn add_ons(&self) -> Self::AddOns {
Self::AddOns::builder().build::<
NodeAdapter<N, <Self::ComponentsBuilder as NodeComponentsBuilder<N>>::Components>,
SeismicEthApiBuilder<SeismicRethWithSignable>,
SeismicEngineValidatorBuilder,
BasicEngineApiBuilder<SeismicEngineValidatorBuilder>,
BasicEngineValidatorBuilder<SeismicEngineValidatorBuilder>,
Identity
>()
}
}
impl NodeTypes for SeismicNode {
type Primitives = SeismicPrimitives;
type ChainSpec = ChainSpec;
type Storage = SeismicStorage;
type Payload = SeismicEngineTypes;
}
impl<N> DebugNode<N> for SeismicNode
where
N: FullNodeComponents<Types = Self>,
{
type RpcBlock = alloy_rpc_types_eth::Block<seismic_alloy_consensus::SeismicTxEnvelope>;
fn rpc_to_primitive_block(rpc_block: Self::RpcBlock) -> reth_node_api::BlockTy<Self> {
let alloy_rpc_types_eth::Block { header, transactions, .. } = rpc_block;
reth_seismic_primitives::SeismicBlock {
header: header.inner,
body: reth_seismic_primitives::SeismicBlockBody {
transactions: transactions.into_transactions().map(Into::into).collect(),
..Default::default()
},
}
}
fn local_payload_attributes_builder(
chain_spec: &Self::ChainSpec,
) -> impl PayloadAttributesBuilder<
<<Self as reth_node_api::NodeTypes>::Payload as reth_node_api::PayloadTypes>::PayloadAttributes,
>{
reth_engine_local::LocalPayloadAttributesBuilder::new(Arc::new(chain_spec.clone()))
}
}
/// Add-ons w.r.t. seismic
#[derive(Debug)]
pub struct SeismicAddOns<
N: FullNodeComponents,
EthB: EthApiBuilder<N> = SeismicEthApiBuilder<SeismicRethWithSignable>,
PVB = SeismicEngineValidatorBuilder,
EB = BasicEngineApiBuilder<SeismicEngineValidatorBuilder>,
EVB = BasicEngineValidatorBuilder<SeismicEngineValidatorBuilder>,
RpcMiddleware = Identity,
> {
inner: RpcAddOns<N, EthB, PVB, EB, EVB, RpcMiddleware>,
}
impl<N, EthB, PVB, EB, EVB, RpcMiddleware> SeismicAddOns<N, EthB, PVB, EB, EVB, RpcMiddleware>
where
N: FullNodeComponents,
EthB: EthApiBuilder<N>,
{
/// Build a [`SeismicAddOns`] using [`SeismicAddOnsBuilder`].
pub fn builder() -> SeismicAddOnsBuilder {
SeismicAddOnsBuilder::default()
}
}
/// A regular seismic evm and executor builder.
#[derive(Debug, Default, Clone)]
pub struct SeismicAddOnsBuilder {}
impl SeismicAddOnsBuilder {
/// Builds an instance of [`SeismicAddOns`].
pub fn build<N, EthB, PVB, EB, EVB, RpcMiddleware>(
self,
) -> SeismicAddOns<N, EthB, PVB, EB, EVB, RpcMiddleware>
where
N: FullNodeComponents<
Types: NodeTypes<
ChainSpec = ChainSpec,
Primitives = SeismicPrimitives,
Storage = SeismicStorage,
Payload = SeismicEngineTypes,
>,
Evm: ConfigureEvm<NextBlockEnvCtx = NextBlockEnvAttributes>,
>,
EthB: EthApiBuilder<N> + Default,
PVB: Default,
EB: Default,
EVB: Default,
RpcMiddleware: Default,
{
SeismicAddOns {
inner: RpcAddOns::new(
EthB::default(),
PVB::default(),
EB::default(),
EVB::default(),
RpcMiddleware::default(),
),
}
}
}
impl<N> Default for SeismicAddOns<N>
where
N: FullNodeComponents<
Types: NodeTypes<
ChainSpec = ChainSpec,
Primitives = SeismicPrimitives,
Storage = SeismicStorage,
Payload = SeismicEngineTypes,
>,
Evm: ConfigureEvm<NextBlockEnvCtx = NextBlockEnvAttributes>,
>,
SeismicEthApiBuilder<SeismicRethWithSignable>: EthApiBuilder<N>,
{
fn default() -> Self {
Self::builder().build::<
N,
SeismicEthApiBuilder<SeismicRethWithSignable>,
SeismicEngineValidatorBuilder,
BasicEngineApiBuilder<SeismicEngineValidatorBuilder>,
BasicEngineValidatorBuilder<SeismicEngineValidatorBuilder>,
Identity
>()
}
}
impl<N, EthB, PVB, EB, EVB, RpcMiddleware> NodeAddOns<N>
for SeismicAddOns<N, EthB, PVB, EB, EVB, RpcMiddleware>
where
N: FullNodeComponents<
Types: NodeTypes<
ChainSpec = ChainSpec,
Primitives = SeismicPrimitives,
Storage = SeismicStorage,
Payload = SeismicEngineTypes,
>,
Evm: ConfigureEvm<NextBlockEnvCtx = NextBlockEnvAttributes>,
>,
EthB: EthApiBuilder<N>,
PVB: PayloadValidatorBuilder<N>,
EB: EngineApiBuilder<N>,
EVB: EngineValidatorBuilder<N>,
RpcMiddleware: RethRpcMiddleware,
EthApiError: FromEvmError<N::Evm>,
SeismicEthApiError:
FromEvmError<N::Evm> + FromEvmHalt<<EvmFactoryFor<N::Evm> as EvmFactory>::HaltReason>,
EvmFactoryFor<N::Evm>: EvmFactory<Tx = seismic_revm::SeismicTransaction<TxEnv>>,
{
type Handle = RpcHandle<N, EthB::EthApi>;
async fn launch_add_ons(
self,
ctx: reth_node_api::AddOnsContext<'_, N>,
) -> eyre::Result<Self::Handle> {
let validation_api = ValidationApi::new(
ctx.node.provider().clone(),
Arc::new(ctx.node.consensus().clone()),
ctx.node.evm_config().clone(),
ctx.config.rpc.flashbots_config(),
Box::new(ctx.node.task_executor().clone()),
Arc::new(SeismicEngineValidator::new(ctx.config.chain.clone())),
);
self.inner
.launch_add_ons_with(ctx, move |container| {
let RpcModuleContainer { modules, .. } = container;
modules.merge_if_module_configured(
RethRpcModule::Flashbots,
validation_api.into_rpc(),
)?;
Ok(())
})
.await
}
}
impl<N, EthB, PVB, EB, EVB, RpcMiddleware> RethRpcAddOns<N>
for SeismicAddOns<N, EthB, PVB, EB, EVB, RpcMiddleware>
where
N: FullNodeComponents<
Types: NodeTypes<
ChainSpec = ChainSpec,
Primitives = SeismicPrimitives,
Storage = SeismicStorage,
Payload = SeismicEngineTypes,
>,
Evm: ConfigureEvm<NextBlockEnvCtx = NextBlockEnvAttributes>,
>,
EthB: EthApiBuilder<N>,
PVB: PayloadValidatorBuilder<N>,
EB: EngineApiBuilder<N>,
EVB: EngineValidatorBuilder<N>,
RpcMiddleware: RethRpcMiddleware,
EthApiError: FromEvmError<N::Evm>,
SeismicEthApiError: FromEvmError<N::Evm>,
EvmFactoryFor<N::Evm>: EvmFactory<Tx = seismic_revm::SeismicTransaction<TxEnv>>,
{
type EthApi = EthB::EthApi;
fn hooks_mut(&mut self) -> &mut reth_node_builder::rpc::RpcHooks<N, Self::EthApi> {
self.inner.hooks_mut()
}
}
impl<N, EthB, PVB, EB, EVB, RpcMiddleware> EngineValidatorAddOn<N>
for SeismicAddOns<N, EthB, PVB, EB, EVB, RpcMiddleware>
where
N: FullNodeComponents<
Types: NodeTypes<
ChainSpec = ChainSpec,
Primitives = SeismicPrimitives,
Storage = SeismicStorage,
Payload = SeismicEngineTypes,
>,
Evm: ConfigureEvm<NextBlockEnvCtx = NextBlockEnvAttributes>
+ ConfigureEngineEvm<ExecutionData>,
>,
EthB: EthApiBuilder<N>,
PVB: PayloadValidatorBuilder<N>,
EB: EngineApiBuilder<N>,
EVB: EngineValidatorBuilder<N> + Send,
RpcMiddleware: Send,
{
type ValidatorBuilder = EVB;
fn engine_validator_builder(&self) -> Self::ValidatorBuilder {
EngineValidatorAddOn::engine_validator_builder(&self.inner)
}
}
/// A regular seismic evm and executor builder.
#[derive(Debug, Default, Clone, Copy)]
#[non_exhaustive]
pub struct SeismicExecutorBuilder;
impl<Node> ExecutorBuilder<Node> for SeismicExecutorBuilder
where
Node: FullNodeTypes<Types: NodeTypes<ChainSpec = ChainSpec, Primitives = SeismicPrimitives>>,
{
type EVM = SeismicEvmConfig;
async fn build_evm(self, ctx: &BuilderContext<Node>) -> eyre::Result<Self::EVM> {
let purpose_keys = crate::purpose_keys::get_purpose_keys();
let evm_config = seismic_evm_config(ctx.chain_spec(), purpose_keys);
Ok(evm_config)
}
}
/// A basic ethereum transaction pool.
///
/// This contains various settings that can be configured and take precedence over the node's
/// config.
#[derive(Debug, Default, Clone, Copy)]
#[non_exhaustive]
pub struct SeismicPoolBuilder;
impl<Node> PoolBuilder<Node> for SeismicPoolBuilder
where
Node: FullNodeTypes<
Types: NodeTypes<
Payload = SeismicEngineTypes,
ChainSpec = ChainSpec,
Primitives = SeismicPrimitives,
>,
>,
// T: EthPoolTransaction<Consensus = TxTy<Node::Types>>
// + MaybeConditionalTransaction
// + MaybeInteropTransaction,
{
type Pool = SeismicTransactionPool<Node::Provider, DiskFileBlobStore>;
async fn build_pool(self, ctx: &BuilderContext<Node>) -> eyre::Result<Self::Pool> {
let data_dir = ctx.config().datadir();
let pool_config = ctx.pool_config();
let blob_cache_size = if let Some(blob_cache_size) = pool_config.blob_cache_size {
blob_cache_size
} else {
// get the current blob params for the current timestamp
let current_timestamp =
SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)?.as_secs();
let blob_params = ctx
.chain_spec()
.blob_params_at_timestamp(current_timestamp)
.unwrap_or(ctx.chain_spec().blob_params.cancun);
// Derive the blob cache size from the target blob count, to auto scale it by
// multiplying it with the slot count for 2 epochs: 384 for pectra
(blob_params.target_blob_count * EPOCH_SLOTS * 2) as u32
};
let custom_config =
DiskFileBlobStoreConfig::default().with_max_cached_entries(blob_cache_size);
let blob_store = DiskFileBlobStore::open(data_dir.blobstore(), custom_config)?;
let head_timestamp_seconds = if cfg!(feature = "timestamp-in-seconds") {
ctx.head().timestamp
} else {
ctx.head().timestamp / 1000
};
let validator = TransactionValidationTaskExecutor::eth_builder(ctx.provider().clone())
.with_head_timestamp(head_timestamp_seconds)
.kzg_settings(ctx.kzg_settings()?)
.with_local_transactions_config(pool_config.local_transactions_config.clone())
.with_additional_tasks(ctx.config().txpool.additional_validation_tasks)
.build_with_tasks(ctx.task_executor().clone(), blob_store.clone());
let transaction_pool = reth_transaction_pool::Pool::new(
validator,
CoinbaseTipOrdering::default(),
blob_store,
pool_config,
);
// info!(target: "reth::cli", "Transaction pool initialized");
let transactions_path = data_dir.txpool_transactions();
// spawn txpool maintenance task
{
let pool = transaction_pool.clone();
let chain_events = ctx.provider().canonical_state_stream();
let client = ctx.provider().clone();
let transactions_backup_config =
reth_transaction_pool::maintain::LocalTransactionBackupConfig::with_local_txs_backup(transactions_path);
ctx.task_executor().spawn_critical_with_graceful_shutdown_signal(
"local transactions backup task",
|shutdown| {
reth_transaction_pool::maintain::backup_local_transactions_task(
shutdown,
pool.clone(),
transactions_backup_config,
)
},
);
// spawn the maintenance task
ctx.task_executor().spawn_critical(
"txpool maintenance task",
reth_transaction_pool::maintain::maintain_transaction_pool_future(
client,
pool,
chain_events,
ctx.task_executor().clone(),
reth_transaction_pool::maintain::MaintainPoolConfig {
max_tx_lifetime: transaction_pool.config().max_queued_lifetime,
..Default::default()
},
),
);
// debug!(target: "reth::cli", "Spawned txpool maintenance task");
}
Ok(transaction_pool)
}
}
/// A basic seismic payload service builder
#[derive(Debug, Default, Clone)]
pub struct SeismicPayloadBuilder;
impl SeismicPayloadBuilder {
/// A helper method initializing [`reth_ethereum_payload_builder::EthereumPayloadBuilder`]
/// with the given EVM config.
pub fn build<Types, Node, Evm, Pool>(
self,
evm_config: Evm,
ctx: &BuilderContext<Node>,
pool: Pool,
) -> eyre::Result<reth_seismic_payload_builder::SeismicPayloadBuilder<Pool, Node::Provider, Evm>>
where
Node: FullNodeTypes<
Types: NodeTypes<
Payload = SeismicEngineTypes,
ChainSpec = ChainSpec,
Primitives = SeismicPrimitives,
>,
>,
Pool: TransactionPool<Transaction: PoolTransaction<Consensus = TxTy<Node::Types>>>
+ Unpin
+ 'static,
Evm: ConfigureEvm<Primitives = PrimitivesTy<Node::Types>>,
// Txs: SeismicPayloadTransactions<Pool::Transaction>,
{
let conf = ctx.payload_builder_config();
let chain = ctx.chain_spec().chain();
let gas_limit = conf.gas_limit_for(chain);
Ok(reth_seismic_payload_builder::SeismicPayloadBuilder::new(
ctx.provider().clone(),
pool,
evm_config,
SeismicBuilderConfig::new().with_gas_limit(gas_limit),
))
}
}
impl<Node, Pool> PayloadBuilderBuilder<Node, Pool, SeismicEvmConfig> for SeismicPayloadBuilder
where
Node: FullNodeTypes<
Types: NodeTypes<
Payload = SeismicEngineTypes,
ChainSpec = ChainSpec,
Primitives = SeismicPrimitives,
>,
>,
Pool: TransactionPool<Transaction: PoolTransaction<Consensus = TxTy<Node::Types>>>
+ Unpin
+ 'static,
{
type PayloadBuilder =
reth_seismic_payload_builder::SeismicPayloadBuilder<Pool, Node::Provider, SeismicEvmConfig>;
async fn build_payload_builder(
self,
ctx: &BuilderContext<Node>,
pool: Pool,
evm_config: SeismicEvmConfig,
) -> eyre::Result<Self::PayloadBuilder> {
let conf = ctx.payload_builder_config();
let chain = ctx.chain_spec().chain();
let gas_limit = conf.gas_limit_for(chain);
let payload_builder = reth_seismic_payload_builder::SeismicPayloadBuilder::new(
ctx.provider().clone(),
pool,
evm_config,
SeismicBuilderConfig::new().with_gas_limit(gas_limit),
);
Ok(payload_builder)
}
}
/// A basic ethereum payload service.
#[derive(Debug, Default, Clone, Copy)]
pub struct SeismicNetworkBuilder {
// TODO add closure to modify network
}
impl<Node, Pool> NetworkBuilder<Node, Pool> for SeismicNetworkBuilder
where
Node: FullNodeTypes<Types: NodeTypes<ChainSpec = ChainSpec, Primitives = SeismicPrimitives>>,
Pool: TransactionPool<
Transaction: PoolTransaction<Consensus = TxTy<Node::Types>, Pooled = SeismicTxEnvelope>, /* equiv to op_alloy_consensus::OpPooledTransaction>, */
> + Unpin
+ 'static,
{
type Network = NetworkHandle<SeismicNetworkPrimitives>;
async fn build_network(
self,
ctx: &BuilderContext<Node>,
pool: Pool,
) -> eyre::Result<NetworkHandle<SeismicNetworkPrimitives>> {
let network = ctx.network_builder().await?;
let handle = ctx.start_network(network, pool);
// info!(target: "reth::cli", enode=%handle.local_node_record(), "P2P networking
// initialized");
Ok(handle)
}
}
/// A basic seismic consensus builder.
#[derive(Debug, Default, Clone)]
#[non_exhaustive]
pub struct SeismicConsensusBuilder;
impl<Node> ConsensusBuilder<Node> for SeismicConsensusBuilder
where
Node: FullNodeTypes<Types: NodeTypes<ChainSpec = ChainSpec, Primitives = SeismicPrimitives>>,
{
type Consensus = Arc<dyn FullConsensus<SeismicPrimitives, Error = ConsensusError>>;
async fn build_consensus(self, ctx: &BuilderContext<Node>) -> eyre::Result<Self::Consensus> {
Ok(Arc::new(EthBeaconConsensus::new(ctx.chain_spec())))
}
}
/// Builder for [`EthereumEngineValidator`].
#[derive(Debug, Default, Clone)]
#[non_exhaustive]
pub struct SeismicEngineValidatorBuilder;
impl<Node, Types> EngineValidatorBuilder<Node> for SeismicEngineValidatorBuilder
where
Types: NodeTypes<
ChainSpec = ChainSpec,
Primitives = SeismicPrimitives,
Payload = SeismicEngineTypes,
>,
Node: FullNodeComponents<Types = Types>,
Node::Evm: ConfigureEngineEvm<ExecutionData>,
{
type EngineValidator = BasicEngineValidator<Node::Provider, Node::Evm, SeismicEngineValidator>;
async fn build_tree_validator(
self,
ctx: &AddOnsContext<'_, Node>,
tree_config: TreeConfig,
) -> eyre::Result<Self::EngineValidator> {
let seismic_validator = SeismicEngineValidator::new(ctx.config.chain.clone());
Ok(BasicEngineValidator::new(
ctx.node.provider().clone(),
Arc::new(ctx.node.consensus().clone()),
ctx.node.evm_config().clone(),
seismic_validator,
tree_config,
Box::new(NoopInvalidBlockHook::default()),
))
}
}
impl<Node> PayloadValidatorBuilder<Node> for SeismicEngineValidatorBuilder
where
Node: FullNodeComponents<
Types: NodeTypes<
ChainSpec = ChainSpec,
Primitives = SeismicPrimitives,
Payload = SeismicEngineTypes,
>,
>,
{
type Validator = SeismicEngineValidator;
async fn build(self, ctx: &AddOnsContext<'_, Node>) -> eyre::Result<Self::Validator> {
Ok(SeismicEngineValidator::new(ctx.config.chain.clone()))
}
}
/// Network primitive types used by Seismic network.
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Hash)]
#[non_exhaustive]
pub struct SeismicNetworkPrimitives;
impl NetworkPrimitives for SeismicNetworkPrimitives {
type BlockHeader = alloy_consensus::Header;
type BlockBody = alloy_consensus::BlockBody<SeismicTransactionSigned>;
type Block = alloy_consensus::Block<SeismicTransactionSigned>;
type BroadcastedTransaction = SeismicTransactionSigned;
type PooledTransaction = SeismicTxEnvelope;
type Receipt = SeismicReceipt;
type NewBlockPayload = NewBlock<Self::Block>;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/seismic/node/src/evm.rs | crates/seismic/node/src/evm.rs | //! Ethereum EVM support
#[doc(inline)]
pub use reth_evm::execute::BasicBlockExecutorProvider;
#[doc(inline)]
pub use reth_evm_ethereum::execute::EthExecutorProvider;
#[doc(inline)]
pub use reth_evm_ethereum::{EthEvm, SeismicEvmConfig};
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/seismic/node/src/engine.rs | crates/seismic/node/src/engine.rs | //! Ethereum specific engine API types and impls.
use std::sync::Arc;
use alloy_rpc_types_engine::{ExecutionData, ExecutionPayload, ExecutionPayloadEnvelopeV5};
pub use alloy_rpc_types_engine::{
ExecutionPayloadEnvelopeV2, ExecutionPayloadEnvelopeV3, ExecutionPayloadEnvelopeV4,
ExecutionPayloadV1, PayloadAttributes,
};
use reth_chainspec::ChainSpec;
use reth_engine_primitives::{EngineTypes, PayloadValidator};
use reth_ethereum_payload_builder::EthereumExecutionPayloadValidator;
use reth_node_api::{
validate_execution_requests, validate_version_specific_fields, EngineApiMessageVersion,
EngineApiValidator, EngineObjectValidationError, NewPayloadError, PayloadOrAttributes,
};
use reth_payload_builder::{EthBuiltPayload, EthPayloadBuilderAttributes};
use reth_payload_primitives::{BuiltPayload, PayloadTypes};
use reth_primitives_traits::{NodePrimitives, RecoveredBlock, SealedBlock};
use reth_seismic_primitives::SeismicPrimitives;
/// The types used in the default mainnet ethereum beacon consensus engine.
#[derive(Debug, Default, Clone, serde::Deserialize, serde::Serialize)]
#[non_exhaustive]
pub struct SeismicEngineTypes<T: PayloadTypes = SeismicPayloadTypes> {
_marker: core::marker::PhantomData<T>,
}
impl<
T: PayloadTypes<
ExecutionData = ExecutionData,
BuiltPayload: BuiltPayload<
Primitives: NodePrimitives<Block = reth_seismic_primitives::SeismicBlock>,
>,
>,
> PayloadTypes for SeismicEngineTypes<T>
{
type ExecutionData = T::ExecutionData;
type BuiltPayload = T::BuiltPayload;
type PayloadAttributes = T::PayloadAttributes;
type PayloadBuilderAttributes = T::PayloadBuilderAttributes;
fn block_to_payload(
block: SealedBlock<
<<Self::BuiltPayload as BuiltPayload>::Primitives as NodePrimitives>::Block,
>,
) -> Self::ExecutionData {
let (payload, sidecar) =
ExecutionPayload::from_block_unchecked(block.hash(), &block.into_block());
ExecutionData { payload, sidecar }
}
}
impl<T> EngineTypes for SeismicEngineTypes<T>
where
T: PayloadTypes<ExecutionData = ExecutionData>,
T::BuiltPayload: BuiltPayload<Primitives: NodePrimitives<Block = reth_seismic_primitives::SeismicBlock>>
+ TryInto<ExecutionPayloadV1>
+ TryInto<ExecutionPayloadEnvelopeV2>
+ TryInto<ExecutionPayloadEnvelopeV3>
+ TryInto<ExecutionPayloadEnvelopeV4>
+ TryInto<ExecutionPayloadEnvelopeV5>,
{
type ExecutionPayloadEnvelopeV1 = ExecutionPayloadV1;
type ExecutionPayloadEnvelopeV2 = ExecutionPayloadEnvelopeV2;
type ExecutionPayloadEnvelopeV3 = ExecutionPayloadEnvelopeV3;
type ExecutionPayloadEnvelopeV4 = ExecutionPayloadEnvelopeV4;
type ExecutionPayloadEnvelopeV5 = ExecutionPayloadEnvelopeV5;
}
/// A default payload type for [`EthEngineTypes`]
#[derive(Debug, Default, Clone, serde::Deserialize, serde::Serialize)]
#[non_exhaustive]
pub struct SeismicPayloadTypes;
impl PayloadTypes for SeismicPayloadTypes {
type BuiltPayload = EthBuiltPayload<SeismicPrimitives>;
type PayloadAttributes = PayloadAttributes;
type PayloadBuilderAttributes = EthPayloadBuilderAttributes;
type ExecutionData = ExecutionData;
fn block_to_payload(
block: SealedBlock<
<<Self::BuiltPayload as BuiltPayload>::Primitives as NodePrimitives>::Block,
>,
) -> Self::ExecutionData {
let (payload, sidecar) =
ExecutionPayload::from_block_unchecked(block.hash(), &block.into_block());
ExecutionData { payload, sidecar }
}
}
/// Validator for the ethereum engine API.
#[derive(Debug, Clone)]
pub struct SeismicEngineValidator {
inner: EthereumExecutionPayloadValidator<ChainSpec>,
}
impl SeismicEngineValidator {
/// Instantiates a new validator.
pub const fn new(chain_spec: Arc<ChainSpec>) -> Self {
Self { inner: EthereumExecutionPayloadValidator::new(chain_spec) }
}
/// Returns the chain spec used by the validator.
#[inline]
fn chain_spec(&self) -> &ChainSpec {
self.inner.chain_spec()
}
}
impl PayloadValidator<SeismicEngineTypes> for SeismicEngineValidator {
type Block = reth_seismic_primitives::SeismicBlock;
fn ensure_well_formed_payload(
&self,
payload: ExecutionData,
) -> Result<RecoveredBlock<Self::Block>, NewPayloadError> {
let sealed_block = self.inner.ensure_well_formed_payload(payload)?;
sealed_block.try_recover().map_err(|e| NewPayloadError::Other(e.into()))
}
}
impl<Types> EngineApiValidator<Types> for SeismicEngineValidator
where
Types: PayloadTypes<PayloadAttributes = PayloadAttributes, ExecutionData = ExecutionData>,
{
fn validate_version_specific_fields(
&self,
version: EngineApiMessageVersion,
payload_or_attrs: PayloadOrAttributes<'_, ExecutionData, PayloadAttributes>,
) -> Result<(), EngineObjectValidationError> {
payload_or_attrs
.execution_requests()
.map(|requests| validate_execution_requests(requests))
.transpose()?;
validate_version_specific_fields(self.chain_spec(), version, payload_or_attrs)
}
fn ensure_well_formed_attributes(
&self,
version: EngineApiMessageVersion,
attributes: &PayloadAttributes,
) -> Result<(), EngineObjectValidationError> {
validate_version_specific_fields(
self.chain_spec(),
version,
PayloadOrAttributes::<ExecutionData, PayloadAttributes>::PayloadAttributes(attributes),
)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/seismic/node/src/lib.rs | crates/seismic/node/src/lib.rs | //! Standalone crate for Seismic-specific Reth configuration and builder types.
//!
//! # features
//! - `js-tracer`: Enable the `JavaScript` tracer for the `debug_trace` endpoints
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
// #![cfg_attr(not(feature = "std"), no_std)]
pub mod args;
pub mod engine;
pub mod node;
pub mod purpose_keys;
pub use reth_seismic_txpool as txpool;
pub mod utils;
pub use reth_seismic_payload_builder::SeismicPayloadBuilder;
pub use reth_seismic_evm::*;
use reth_chainspec::ChainSpec;
use std::sync::Arc;
/// Creates a Seismic EVM configuration with the given chain spec and purpose keys.
pub fn seismic_evm_config(
spec: Arc<ChainSpec>,
purpose_keys: &'static seismic_enclave::GetPurposeKeysResponse,
) -> SeismicEvmConfig {
SeismicEvmConfig::new(spec, purpose_keys)
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/seismic/node/src/args.rs | crates/seismic/node/src/args.rs | //! clap [Args](clap::Args) for RPC related arguments.
use std::net::{IpAddr, Ipv4Addr};
use clap::Args;
/// Parameters for configuring the enclave more granularity via CLI
#[derive(Debug, Clone, Args, PartialEq, Eq, Copy)]
#[command(next_help_heading = "Enclave")]
pub struct EnclaveArgs {
/// Auth server address to listen on
#[arg(long = "enclave.endpoint-addr", default_value_t = IpAddr::V4(Ipv4Addr::UNSPECIFIED))]
pub enclave_server_addr: IpAddr,
/// Auth server port to listen on
#[arg(long = "enclave.endpoint-port", default_value_t = 7878)]
pub enclave_server_port: u16,
/// Spin up mock server for testing purpose
#[arg(long = "enclave.mock-server", action = clap::ArgAction::SetTrue)]
pub mock_server: bool,
/// Enclave client timeout
#[arg(long = "enclave.timeout", default_value_t = 5)]
pub enclave_timeout: u64,
}
impl Default for EnclaveArgs {
fn default() -> Self {
Self {
enclave_server_addr: IpAddr::V4(Ipv4Addr::UNSPECIFIED),
enclave_server_port: 7878,
mock_server: false,
enclave_timeout: 5,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use clap::{Args, Parser};
/// A helper type to parse Args more easily
#[derive(Parser)]
struct CommandParser<T: Args> {
#[command(flatten)]
args: T,
}
#[test]
fn test_enclave_args_parser() {
let args = CommandParser::<EnclaveArgs>::parse_from(["reth node"]).args;
let addr = args.enclave_server_addr;
let port = args.enclave_server_port;
let mock = args.mock_server;
assert_eq!(port, 7878);
assert_eq!(addr, IpAddr::V4(Ipv4Addr::UNSPECIFIED));
assert_eq!(mock, false);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/seismic/node/src/utils.rs | crates/seismic/node/src/utils.rs | //! test utils for the e2e tests
/// Test utils for the seismic rpc api
pub mod test_utils {
use alloy_primitives::Address;
use alloy_rpc_types::{Block, Header, Transaction, TransactionReceipt};
use jsonrpsee::http_client::HttpClient;
use reth_rpc_eth_api::EthApiClient;
use reth_seismic_chainspec::SEISMIC_DEV;
use seismic_alloy_rpc_types::SeismicTransactionRequest;
use serde_json::Value;
use std::{path::PathBuf, process::Stdio};
use tokio::{
io::{AsyncBufReadExt, AsyncWriteExt, BufReader},
process::Command,
sync::mpsc,
};
pub use reth_seismic_primitives::test_utils::{
client_decrypt, client_encrypt, get_ciphertext, get_client_io_sk, get_encryption_nonce,
get_network_public_key, get_plaintext, get_seismic_elements, get_seismic_tx,
get_signed_seismic_tx, get_signed_seismic_tx_bytes, get_signed_seismic_tx_encoding,
get_signed_seismic_tx_typed_data, get_signing_private_key, get_unsigned_seismic_tx_request,
get_unsigned_seismic_tx_typed_data, get_wrong_private_key, sign_seismic_tx, sign_tx,
};
// use reth_seismic_evm::engine::SeismicEngineValidator;
/// Seismic reth test command
#[derive(Debug)]
pub struct SeismicRethTestCommand();
impl SeismicRethTestCommand {
/// Run the seismic reth test command
pub async fn run(tx: mpsc::Sender<()>, mut shutdown_rx: mpsc::Receiver<()>) {
let output = Command::new("cargo")
.arg("metadata")
.arg("--format-version=1")
.output()
.await
.unwrap();
let metadata: Value = serde_json::from_slice(&output.stdout).unwrap();
let workspace_root = metadata.get("workspace_root").unwrap().as_str().unwrap();
println!("Workspace root: {}", workspace_root);
let mut child = Command::new("cargo")
.arg("run")
.arg("--bin")
.arg("seismic-reth") // Specify the binary name
.arg("--")
.arg("node")
.arg("--datadir")
.arg(SeismicRethTestCommand::data_dir().to_str().unwrap())
.arg("--dev")
.arg("--dev.block-max-transactions")
.arg("1")
.arg("--enclave.mock-server")
.arg("-vvvv")
.arg("--disable-discovery")
.current_dir(workspace_root)
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.expect("Failed to start the binary");
tokio::spawn(async move {
let stdout = child.stdout.as_mut().expect("Failed to capture stdout");
let stderr = child.stderr.as_mut().expect("Failed to capture stderr");
let mut stdout_reader = BufReader::new(stdout);
let mut stderr_reader = BufReader::new(stderr);
let mut stdout_line = String::new();
let mut stderr_line = String::new();
let mut sent = false;
std::panic::set_hook(Box::new(|info| {
eprintln!("β PANIC DETECTED: {:?}", info);
}));
loop {
tokio::select! {
result = stdout_reader.read_line(&mut stdout_line) => {
if result.unwrap() == 0 {
eprintln!("π STDOUT reached EOF! Breaking loop.");
break;
}
eprint!("{}", stdout_line);
if stdout_line.contains("Starting consensus engine") && !sent {
eprintln!("π Reth server is ready!");
let _ = tx.send(()).await;
sent = true;
}
stdout_line.clear();
tokio::io::stdout().flush().await.unwrap();
}
result = stderr_reader.read_line(&mut stderr_line) => {
if result.unwrap() == 0 {
eprintln!("π STDERR reached EOF! Breaking loop.");
break;
}
eprint!("{}", stderr_line);
stderr_line.clear();
}
Some(_) = shutdown_rx.recv() => {
eprintln!("π Shutdown signal received! Breaking loop.");
break;
}
}
}
println!("β
Exiting loop.");
child.kill().await.unwrap();
println!("β
Killed child process.");
});
}
/// Get the data directory for the seismic reth test command
pub fn data_dir() -> PathBuf {
static TEMP_DIR: once_cell::sync::Lazy<tempfile::TempDir> =
once_cell::sync::Lazy::new(|| tempfile::tempdir().unwrap());
TEMP_DIR.path().to_path_buf()
}
/// Get the chain id for the seismic reth test command
pub fn chain_id() -> u64 {
SEISMIC_DEV.chain().into()
}
/// Get the url for the seismic reth test command
pub fn url() -> String {
format!("http://127.0.0.1:8545")
}
}
/// Get the nonce from the client
pub async fn get_nonce(client: &HttpClient, address: Address) -> u64 {
let nonce = EthApiClient::<
SeismicTransactionRequest,
Transaction,
Block,
TransactionReceipt,
Header,
>::transaction_count(client, address, None)
.await
.unwrap();
nonce.wrapping_to::<u64>()
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/seismic/node/src/purpose_keys.rs | crates/seismic/node/src/purpose_keys.rs | //! Global storage for purpose keys fetched from enclave on boot.
//!
//! This module provides thread-safe access to purpose keys that are fetched once
//! during node startup and then used throughout the application lifetime.
use seismic_enclave::GetPurposeKeysResponse;
use std::sync::OnceLock;
/// Global storage for purpose keys.
/// These keys are fetched once from the enclave during node startup.
static PURPOSE_KEYS: OnceLock<GetPurposeKeysResponse> = OnceLock::new();
/// Initialize the global purpose keys.
/// This should be called once during node startup, after the enclave is booted.
///
/// # Panics
/// Panics if called more than once.
pub fn init_purpose_keys(keys: GetPurposeKeysResponse) {
PURPOSE_KEYS.set(keys).expect("Purpose keys already initialized");
}
/// Get a reference to the purpose keys.
///
/// # Panics
/// Panics if the keys haven't been initialized yet.
pub fn get_purpose_keys() -> &'static GetPurposeKeysResponse {
PURPOSE_KEYS.get().expect("Purpose keys not initialized")
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/seismic/node/tests/e2e/dev.rs | crates/seismic/node/tests/e2e/dev.rs | use alloy_eips::eip2718::Encodable2718;
use alloy_genesis::Genesis;
use alloy_primitives::{b256, hex};
use futures::StreamExt;
use reth_chainspec::ChainSpec;
use reth_node_api::{BlockBody, FullNodeComponents, FullNodePrimitives, NodeTypes};
use reth_node_builder::{
rpc::RethRpcAddOns, EngineNodeLauncher, FullNode, NodeBuilder, NodeConfig, NodeHandle,
};
use reth_node_core::args::DevArgs;
use reth_node_ethereum::{node::EthereumAddOns, EthereumNode};
use reth_provider::{providers::BlockchainProvider, CanonStateSubscriptions};
use reth_rpc_eth_api::helpers::EthTransactions;
use reth_tasks::TaskManager;
use std::sync::Arc;
#[tokio::test(flavor = "multi_thread")]
async fn can_run_dev_node() -> eyre::Result<()> {
reth_tracing::init_test_tracing();
let tasks = TaskManager::current();
let exec = tasks.executor();
let node_config = NodeConfig::test()
.with_chain(custom_chain())
.with_dev(DevArgs { dev: true, ..Default::default() });
let NodeHandle { node, .. } = NodeBuilder::new(node_config.clone())
.testing_node(exec.clone())
.with_types_and_provider::<EthereumNode, BlockchainProvider<_>>()
.with_components(EthereumNode::components())
.with_add_ons(EthereumAddOns::default())
.launch_with_fn(|builder| {
let launcher = EngineNodeLauncher::new(
builder.task_executor().clone(),
builder.config().datadir(),
Default::default(),
);
builder.launch_with(launcher)
})
.await?;
assert_chain_advances(node).await;
Ok(())
}
async fn assert_chain_advances<N, AddOns>(node: FullNode<N, AddOns>)
where
N: FullNodeComponents<Provider: CanonStateSubscriptions>,
AddOns: RethRpcAddOns<N, EthApi: EthTransactions>,
N::Types: NodeTypes<Primitives: FullNodePrimitives>,
{
let mut notifications = node.provider.canonical_state_stream();
// submit tx through rpc
let raw_tx = hex!("02f876820a28808477359400847735940082520894ab0840c0e43688012c1adb0f5e3fc665188f83d28a029d394a5d630544000080c080a0a044076b7e67b5deecc63f61a8d7913fab86ca365b344b5759d1fe3563b4c39ea019eab979dd000da04dfc72bb0377c092d30fd9e1cab5ae487de49586cc8b0090");
let eth_api = node.rpc_registry.eth_api();
let hash = eth_api.send_raw_transaction(raw_tx.into()).await.unwrap();
let expected = b256!("0xb1c6512f4fc202c04355fbda66755e0e344b152e633010e8fd75ecec09b63398");
assert_eq!(hash, expected);
println!("submitted transaction: {hash}");
let head = notifications.next().await.unwrap();
let tx = &head.tip().body().transactions()[0];
assert_eq!(tx.trie_hash(), hash);
println!("mined transaction: {hash}");
}
fn custom_chain() -> Arc<ChainSpec> {
let custom_genesis = r#"
{
"nonce": "0x42",
"timestamp": "0x0",
"extraData": "0x5343",
"gasLimit": "0x13880",
"difficulty": "0x400000000",
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"coinbase": "0x0000000000000000000000000000000000000000",
"alloc": {
"0x6Be02d1d3665660d22FF9624b7BE0551ee1Ac91b": {
"balance": "0x4a47e3c12448f4ad000000"
}
},
"number": "0x0",
"gasUsed": "0x0",
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"config": {
"ethash": {},
"chainId": 2600,
"homesteadBlock": 0,
"eip150Block": 0,
"eip155Block": 0,
"eip158Block": 0,
"byzantiumBlock": 0,
"constantinopleBlock": 0,
"petersburgBlock": 0,
"istanbulBlock": 0,
"berlinBlock": 0,
"londonBlock": 0,
"terminalTotalDifficulty": 0,
"terminalTotalDifficultyPassed": true,
"shanghaiTime": 0
}
}
"#;
let genesis: Genesis = serde_json::from_str(custom_genesis).unwrap();
Arc::new(genesis.into())
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/seismic/node/tests/e2e/eth.rs | crates/seismic/node/tests/e2e/eth.rs | use crate::utils::eth_payload_attributes;
use alloy_genesis::Genesis;
use reth_chainspec::{ChainSpecBuilder, MAINNET};
use reth_e2e_test_utils::{
node::NodeTestContext, setup, transaction::TransactionTestContext, wallet::Wallet,
};
use reth_node_builder::{NodeBuilder, NodeHandle};
use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig};
use reth_node_ethereum::EthereumNode;
use reth_tasks::TaskManager;
use std::sync::Arc;
#[tokio::test(flavor = "multi_thread")]
async fn can_run_eth_node() -> eyre::Result<()> {
reth_tracing::init_test_tracing();
let (mut nodes, _tasks, wallet) = setup::<EthereumNode>(
1,
Arc::new(
ChainSpecBuilder::default()
.chain(MAINNET.chain)
.genesis(serde_json::from_str(include_str!("../assets/genesis.json")).unwrap())
.cancun_activated()
.build(),
),
false,
eth_payload_attributes,
)
.await?;
let mut node = nodes.pop().unwrap();
let raw_tx = TransactionTestContext::transfer_tx_bytes(1, wallet.inner).await;
// make the node advance
let tx_hash = node.rpc.inject_tx(raw_tx).await?;
// make the node advance
let payload = node.advance_block().await?;
let block_hash = payload.block().hash();
let block_number = payload.block().number;
// assert the block has been committed to the blockchain
node.assert_new_block(tx_hash, block_hash, block_number).await?;
Ok(())
}
#[tokio::test(flavor = "multi_thread")]
#[cfg(unix)]
async fn can_run_eth_node_with_auth_engine_api_over_ipc() -> eyre::Result<()> {
reth_tracing::init_test_tracing();
let exec = TaskManager::current();
let exec = exec.executor();
// Chain spec with test allocs
let genesis: Genesis = serde_json::from_str(include_str!("../assets/genesis.json")).unwrap();
let chain_spec = Arc::new(
ChainSpecBuilder::default()
.chain(MAINNET.chain)
.genesis(genesis)
.cancun_activated()
.build(),
);
// Node setup
let node_config = NodeConfig::test()
.with_chain(chain_spec)
.with_rpc(RpcServerArgs::default().with_unused_ports().with_http().with_auth_ipc());
let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config)
.testing_node(exec)
.node(EthereumNode::default())
.launch()
.await?;
let mut node = NodeTestContext::new(node, eth_payload_attributes).await?;
// Configure wallet from test mnemonic and create dummy transfer tx
let wallet = Wallet::default();
let raw_tx = TransactionTestContext::transfer_tx_bytes(1, wallet.inner).await;
// make the node advance
let tx_hash = node.rpc.inject_tx(raw_tx).await?;
// make the node advance
let payload = node.advance_block().await?;
let block_hash = payload.block().hash();
let block_number = payload.block().number;
// assert the block has been committed to the blockchain
node.assert_new_block(tx_hash, block_hash, block_number).await?;
Ok(())
}
#[tokio::test(flavor = "multi_thread")]
#[cfg(unix)]
async fn test_failed_run_eth_node_with_no_auth_engine_api_over_ipc_opts() -> eyre::Result<()> {
reth_tracing::init_test_tracing();
let exec = TaskManager::current();
let exec = exec.executor();
// Chain spec with test allocs
let genesis: Genesis = serde_json::from_str(include_str!("../assets/genesis.json")).unwrap();
let chain_spec = Arc::new(
ChainSpecBuilder::default()
.chain(MAINNET.chain)
.genesis(genesis)
.cancun_activated()
.build(),
);
// Node setup
let node_config = NodeConfig::test().with_chain(chain_spec);
let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config)
.testing_node(exec)
.node(EthereumNode::default())
.launch()
.await?;
let node = NodeTestContext::new(node, eth_payload_attributes).await?;
// Ensure that the engine api client is not available
let client = node.inner.engine_ipc_client().await;
assert!(client.is_none(), "ipc auth should be disabled by default");
Ok(())
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/seismic/node/tests/e2e/integration.rs | crates/seismic/node/tests/e2e/integration.rs | //! This file is used to test the seismic node.
use alloy_dyn_abi::EventExt;
use alloy_json_abi::{Event, EventParam};
use alloy_network::{ReceiptResponse, TransactionBuilder};
use alloy_primitives::{
aliases::{B96, U96},
hex,
hex::FromHex,
Bytes, IntoLogData, TxKind, B256, U256,
};
use alloy_provider::{PendingTransactionBuilder, Provider, SendableTx};
use alloy_rpc_types::{Block, Header, TransactionInput, TransactionRequest};
use alloy_sol_types::{sol, SolCall, SolValue};
use reth_e2e_test_utils::wallet::Wallet;
use reth_rpc_eth_api::EthApiClient;
use reth_seismic_node::utils::test_utils::{
client_decrypt, get_nonce, get_signed_seismic_tx_bytes, get_signed_seismic_tx_typed_data,
get_unsigned_seismic_tx_request, SeismicRethTestCommand,
};
use reth_seismic_primitives::{SeismicBlock, SeismicTransactionSigned};
use reth_seismic_rpc::ext::EthApiOverrideClient;
use seismic_alloy_network::{wallet::SeismicWallet, SeismicReth};
use seismic_alloy_provider::{
test_utils::ContractTestContext, SeismicProviderExt, SeismicSignedProvider,
};
use seismic_alloy_rpc_types::{
SeismicCallRequest, SeismicTransactionReceipt, SeismicTransactionRequest, SimBlock,
SimulatePayload,
};
use seismic_enclave::aes_decrypt;
use std::{thread, time::Duration};
use tokio::sync::mpsc;
const PRECOMPILES_TEST_SET_AES_KEY_SELECTOR: &str = "a0619040"; // setAESKey(suint256)
const PRECOMPILES_TEST_ENCRYPTED_LOG_SELECTOR: &str = "28696e36"; // submitMessage(bytes)
// #[tokio::test(flavor = "multi_thread")]
// async fn unit_test() {
// let reth_rpc_url = SeismicRethTestCommand::url();
// let chain_id = SeismicRethTestCommand::chain_id();
// let client =
// jsonrpsee::http_client::HttpClientBuilder::default().build(reth_rpc_url).unwrap(); let wallet
// = Wallet::default().with_chain_id(chain_id);
// let tx_bytes = get_signed_seismic_tx_bytes(
// &wallet.inner,
// get_nonce(&client, wallet.inner.address()).await,
// TxKind::Create,
// chain_id,
// ContractTestContext::get_deploy_input_plaintext(),
// )
// .await;
// println!("tx_bytes: {:?}", tx_bytes);
// }
#[tokio::test(flavor = "multi_thread")]
async fn integration_test() {
// set this to true when you want to spin up a node
// outside the test to see logs more easily
let manual_debug = false;
let mut shutdown_tx_top: Option<mpsc::Sender<()>> = None;
if !manual_debug {
// spin up a reth node
let (tx, mut rx) = mpsc::channel(1);
let (shutdown_tx, shutdown_rx) = mpsc::channel(1);
shutdown_tx_top = Some(shutdown_tx);
SeismicRethTestCommand::run(tx, shutdown_rx).await;
rx.recv().await.unwrap();
}
test_seismic_reth_rpc().await;
test_seismic_reth_rpc_with_typed_data().await;
test_seismic_reth_rpc_with_rust_client().await;
test_seismic_reth_rpc_simulate_block().await;
test_seismic_precompiles_end_to_end().await;
if !manual_debug {
let _ = shutdown_tx_top.unwrap().try_send(()).unwrap();
println!("shutdown signal sent");
thread::sleep(Duration::from_secs(1));
}
}
// this is the same test as basic.rs but with actual RPC calls and standalone reth instance
async fn test_seismic_reth_rpc() {
let reth_rpc_url = SeismicRethTestCommand::url();
let chain_id = SeismicRethTestCommand::chain_id();
let client = jsonrpsee::http_client::HttpClientBuilder::default().build(reth_rpc_url).unwrap();
let wallet = Wallet::default().with_chain_id(chain_id);
println!("wallet: {:?}", wallet);
let tx_hash = EthApiOverrideClient::<Block>::send_raw_transaction(
&client,
get_signed_seismic_tx_bytes(
&wallet.inner,
get_nonce(&client, wallet.inner.address()).await,
TxKind::Create,
chain_id,
ContractTestContext::get_deploy_input_plaintext(),
)
.await
.into(),
)
.await
.unwrap();
// assert_eq!(tx_hash, itx.tx_hashes[0]);
thread::sleep(Duration::from_secs(3));
println!("eth_sendRawTransaction deploying contract tx_hash: {:?}", tx_hash);
// Get the transaction receipt
let receipt = EthApiClient::<
SeismicTransactionRequest,
SeismicTransactionSigned,
SeismicBlock,
SeismicTransactionReceipt,
Header,
>::transaction_receipt(&client, tx_hash)
.await
.unwrap()
.unwrap();
let contract_addr = receipt.contract_address.unwrap();
println!(
"eth_getTransactionReceipt getting contract deployment transaction receipt: {:?}",
receipt
);
assert_eq!(receipt.status(), true);
// Make sure the code of the contract is deployed
let code = EthApiClient::<
SeismicTransactionRequest,
SeismicTransactionSigned,
SeismicBlock,
SeismicTransactionReceipt,
Header,
>::get_code(&client, contract_addr, None)
.await
.unwrap();
assert_eq!(ContractTestContext::get_code(), code);
println!("eth_getCode getting contract deployment code: {:?}", code);
// eth_call to check the parity. Should be 0
let output = EthApiOverrideClient::<Block>::call(
&client,
get_signed_seismic_tx_bytes(
&wallet.inner,
get_nonce(&client, wallet.inner.address()).await,
TxKind::Call(contract_addr),
chain_id,
ContractTestContext::get_is_odd_input_plaintext(),
)
.await
.into(),
None,
None,
None,
)
.await
.unwrap();
let decrypted_output = client_decrypt(&output).unwrap();
println!("eth_call decrypted output: {:?}", decrypted_output);
assert_eq!(U256::from_be_slice(&decrypted_output), U256::ZERO);
// Send transaction to set suint
let tx_hash = EthApiClient::<
SeismicTransactionRequest,
SeismicTransactionSigned,
SeismicBlock,
SeismicTransactionReceipt,
Header,
>::send_raw_transaction(
&client,
get_signed_seismic_tx_bytes(
&wallet.inner,
get_nonce(&client, wallet.inner.address()).await,
TxKind::Call(contract_addr),
chain_id,
ContractTestContext::get_set_number_input_plaintext(),
)
.await
.into(),
)
.await
.unwrap();
println!("eth_sendRawTransaction setting number transaction tx_hash: {:?}", tx_hash);
thread::sleep(Duration::from_secs(1));
// Get the transaction receipt
let receipt = EthApiClient::<
SeismicTransactionRequest,
SeismicTransactionSigned,
SeismicBlock,
SeismicTransactionReceipt,
Header,
>::transaction_receipt(&client, tx_hash)
.await
.unwrap()
.unwrap();
println!("eth_getTransactionReceipt getting set_number transaction receipt: {:?}", receipt);
assert_eq!(receipt.status(), true);
// Final eth_call to check the parity. Should be 1
let output = EthApiOverrideClient::<SeismicBlock>::call(
&client,
get_signed_seismic_tx_bytes(
&wallet.inner,
get_nonce(&client, wallet.inner.address()).await,
TxKind::Call(contract_addr),
chain_id,
ContractTestContext::get_is_odd_input_plaintext(),
)
.await
.into(),
None,
None,
None,
)
.await
.unwrap();
let decrypted_output = client_decrypt(&output).unwrap();
println!("eth_call decrypted output: {:?}", decrypted_output);
assert_eq!(U256::from_be_slice(&decrypted_output), U256::from(1));
let simulate_tx_request = get_unsigned_seismic_tx_request(
&wallet.inner,
get_nonce(&client, wallet.inner.address()).await,
TxKind::Call(contract_addr),
chain_id,
ContractTestContext::get_is_odd_input_plaintext(),
)
.await;
// test eth_estimateGas
let gas = EthApiOverrideClient::<Block>::estimate_gas(
&client,
simulate_tx_request.clone(),
None,
None,
)
.await
.unwrap();
println!("eth_estimateGas for is_odd() gas: {:?}", gas);
assert!(gas > U256::ZERO);
let access_list =
EthApiClient::<
SeismicTransactionRequest,
SeismicTransactionSigned,
SeismicBlock,
SeismicTransactionReceipt,
Header,
>::create_access_list(&client, simulate_tx_request.inner.clone().into(), None, None)
.await
.unwrap();
println!("eth_createAccessList for is_odd() access_list: {:?}", access_list);
// test call
let output = EthApiOverrideClient::<Block>::call(
&client,
simulate_tx_request.clone().into(),
None,
None,
None,
)
.await
.unwrap();
println!("eth_call is_odd() decrypted output: {:?}", output);
// call with no transaction type
let output = EthApiOverrideClient::<Block>::call(
&client,
SeismicTransactionRequest {
inner: TransactionRequest {
from: Some(wallet.inner.address()),
input: TransactionInput {
data: Some(ContractTestContext::get_is_odd_input_plaintext()),
..Default::default()
},
to: Some(TxKind::Call(contract_addr)),
..Default::default()
},
seismic_elements: None,
}
.into(),
None,
None,
None,
)
.await
.unwrap();
println!("eth_call is_odd() with no transaction type decrypted output: {:?}", output);
}
async fn test_seismic_reth_rpc_with_typed_data() {
let reth_rpc_url = SeismicRethTestCommand::url();
let chain_id = SeismicRethTestCommand::chain_id();
let client = jsonrpsee::http_client::HttpClientBuilder::default().build(reth_rpc_url).unwrap();
let wallet = Wallet::default().with_chain_id(chain_id);
let tx_hash = EthApiOverrideClient::<Block>::send_raw_transaction(
&client,
get_signed_seismic_tx_typed_data(
&wallet.inner,
get_nonce(&client, wallet.inner.address()).await,
TxKind::Create,
chain_id,
ContractTestContext::get_deploy_input_plaintext(),
)
.await
.into(),
)
.await
.unwrap();
// assert_eq!(tx_hash, itx.tx_hashes[0]);
thread::sleep(Duration::from_secs(1));
println!("eth_sendRawTransaction deploying contract tx_hash: {:?}", tx_hash);
// Get the transaction receipt
let receipt = EthApiClient::<
SeismicTransactionRequest,
SeismicTransactionSigned,
SeismicBlock,
SeismicTransactionReceipt,
Header,
>::transaction_receipt(&client, tx_hash)
.await
.unwrap()
.unwrap();
let contract_addr = receipt.contract_address.unwrap();
println!("contract_addr: {:?}", contract_addr);
println!(
"eth_getTransactionReceipt getting contract deployment transaction receipt: {:?}",
receipt
);
assert_eq!(receipt.status(), true);
// Make sure the code of the contract is deployed
let code = EthApiClient::<
SeismicTransactionRequest,
SeismicTransactionSigned,
SeismicBlock,
SeismicTransactionReceipt,
Header,
>::get_code(&client, contract_addr, None)
.await
.unwrap();
assert_eq!(ContractTestContext::get_code(), code);
println!("eth_getCode getting contract deployment code: {:?}", code);
// eth_call to check the parity. Should be 0
let output = EthApiOverrideClient::<Block>::call(
&client,
get_signed_seismic_tx_typed_data(
&wallet.inner,
get_nonce(&client, wallet.inner.address()).await,
TxKind::Call(contract_addr),
chain_id,
ContractTestContext::get_is_odd_input_plaintext(),
)
.await
.into(),
None,
None,
None,
)
.await
.unwrap();
let decrypted_output = client_decrypt(&output).unwrap();
println!("eth_call decrypted output: {:?}", decrypted_output);
assert_eq!(U256::from_be_slice(&decrypted_output), U256::ZERO);
}
// this is the same test as basic.rs but with actual RPC calls and standalone reth instance
// with rust client in alloy
async fn test_seismic_reth_rpc_with_rust_client() {
let reth_rpc_url = SeismicRethTestCommand::url();
let chain_id = SeismicRethTestCommand::chain_id();
let _wallet = Wallet::default().with_chain_id(chain_id);
let wallet: SeismicWallet<SeismicReth> = SeismicWallet::from(_wallet.inner);
let provider = SeismicSignedProvider::new(wallet, reqwest::Url::parse(&reth_rpc_url).unwrap());
let req = TransactionBuilder::<SeismicReth>::with_kind(
TransactionBuilder::<SeismicReth>::with_input(
SeismicTransactionRequest::default(),
ContractTestContext::get_deploy_input_plaintext(),
),
TxKind::Create,
);
let pending_transaction: PendingTransactionBuilder<SeismicReth> =
provider.send_transaction(req).await.unwrap();
let tx_hash = pending_transaction.tx_hash();
// assert_eq!(tx_hash, itx.tx_hashes[0]);
thread::sleep(Duration::from_secs(1));
println!("eth_sendRawTransaction deploying contract tx_hash: {:?}", tx_hash);
// Get the transaction receipt
let receipt = provider.get_transaction_receipt(tx_hash.clone()).await.unwrap().unwrap();
let contract_addr = receipt.contract_address.unwrap();
println!(
"eth_getTransactionReceipt getting contract deployment transaction receipt: {:?}",
receipt
);
assert_eq!(receipt.status(), true);
// Make sure the code of the contract is deployed
let code = provider.get_code_at(contract_addr).await.unwrap();
assert_eq!(ContractTestContext::get_code(), code);
println!("eth_getCode getting contract deployment code: {:?}", code);
// eth_call to check the parity. Should be 0
let output = provider
.seismic_call(SendableTx::Builder(TransactionBuilder::<SeismicReth>::with_to(
TransactionBuilder::<SeismicReth>::with_input(
SeismicTransactionRequest::default(),
ContractTestContext::get_is_odd_input_plaintext(),
),
contract_addr,
)))
.await
.unwrap();
println!("eth_call decrypted output: {:?}", output);
assert_eq!(U256::from_be_slice(&output), U256::ZERO);
// Send transaction to set suint
let pending_transaction = provider
.send_transaction(TransactionBuilder::<SeismicReth>::with_to(
TransactionBuilder::<SeismicReth>::with_input(
SeismicTransactionRequest::default(),
ContractTestContext::get_set_number_input_plaintext(),
),
contract_addr,
))
.await
.unwrap();
let tx_hash = pending_transaction.tx_hash();
println!("eth_sendRawTransaction setting number transaction tx_hash: {:?}", tx_hash);
thread::sleep(Duration::from_secs(1));
// Get the transaction receipt
let receipt = provider.get_transaction_receipt(tx_hash.clone()).await.unwrap().unwrap();
println!("eth_getTransactionReceipt getting set_number transaction receipt: {:?}", receipt);
assert_eq!(receipt.status(), true);
// Final eth_call to check the parity. Should be 1
let output = provider
.seismic_call(SendableTx::Builder(TransactionBuilder::<SeismicReth>::with_to(
TransactionBuilder::<SeismicReth>::with_input(
SeismicTransactionRequest::default(),
ContractTestContext::get_is_odd_input_plaintext(),
),
contract_addr,
)))
.await
.unwrap();
println!("eth_call decrypted output: {:?}", output);
assert_eq!(U256::from_be_slice(&output), U256::from(1));
// // eth_estimateGas cannot be called directly with rust client
// // eth_createAccessList cannot be called directly with rust client
// // rust client also does not support Eip712::typed data requests
}
async fn test_seismic_reth_rpc_simulate_block() {
let reth_rpc_url = SeismicRethTestCommand::url();
let chain_id = SeismicRethTestCommand::chain_id();
let client = jsonrpsee::http_client::HttpClientBuilder::default().build(reth_rpc_url).unwrap();
let wallet = Wallet::default().with_chain_id(chain_id);
let nonce = get_nonce(&client, wallet.inner.address()).await;
let tx_bytes = get_signed_seismic_tx_bytes(
&wallet.inner,
nonce,
TxKind::Create,
chain_id,
ContractTestContext::get_deploy_input_plaintext(),
)
.await;
let tx_typed_data = get_signed_seismic_tx_typed_data(
&wallet.inner,
nonce + 1,
TxKind::Create,
chain_id,
ContractTestContext::get_deploy_input_plaintext(),
)
.await;
let block_1 = SimBlock {
block_overrides: None,
state_overrides: None,
calls: vec![
SeismicCallRequest::Bytes(tx_bytes),
SeismicCallRequest::TypedData(tx_typed_data),
],
};
let tx_bytes = get_signed_seismic_tx_bytes(
&wallet.inner,
nonce + 2,
TxKind::Create,
chain_id,
ContractTestContext::get_deploy_input_plaintext(),
)
.await;
let tx_typed_data = get_signed_seismic_tx_typed_data(
&wallet.inner,
nonce + 3,
TxKind::Create,
chain_id,
ContractTestContext::get_deploy_input_plaintext(),
)
.await;
let block_2 = SimBlock {
block_overrides: None,
state_overrides: None,
calls: vec![
SeismicCallRequest::Bytes(tx_bytes),
SeismicCallRequest::TypedData(tx_typed_data),
],
};
let tx_bytes = get_signed_seismic_tx_bytes(
&wallet.inner,
nonce + 4,
TxKind::Create,
chain_id,
ContractTestContext::get_deploy_input_plaintext(),
)
.await;
let tx_typed_data = get_signed_seismic_tx_typed_data(
&wallet.inner,
nonce + 5,
TxKind::Create,
chain_id,
ContractTestContext::get_deploy_input_plaintext(),
)
.await;
let block_3 = SimBlock {
block_overrides: None,
state_overrides: None,
calls: vec![
SeismicCallRequest::Bytes(tx_bytes),
SeismicCallRequest::TypedData(tx_typed_data),
],
};
let simulate_payload = SimulatePayload::<SeismicCallRequest> {
block_state_calls: vec![block_1, block_2, block_3],
trace_transfers: false,
validation: false,
return_full_transactions: false,
};
let result =
EthApiOverrideClient::<Block>::simulate_v1(&client, simulate_payload, None).await.unwrap();
for block_result in result {
for call in block_result.calls {
let decrypted_output = client_decrypt(&call.return_data).unwrap();
println!("decrypted_output: {:?}", decrypted_output);
assert_eq!(decrypted_output, ContractTestContext::get_code());
}
}
}
async fn test_seismic_precompiles_end_to_end() {
let reth_rpc_url = SeismicRethTestCommand::url();
let chain_id = SeismicRethTestCommand::chain_id();
let _wallet = Wallet::default().with_chain_id(chain_id);
let wallet: SeismicWallet<SeismicReth> = SeismicWallet::from(_wallet.inner);
let provider = SeismicSignedProvider::new(wallet, reqwest::Url::parse(&reth_rpc_url).unwrap());
let req = TransactionBuilder::<SeismicReth>::with_kind(
TransactionBuilder::<SeismicReth>::with_input(
SeismicTransactionRequest::default(),
get_encryption_precompiles_contracts(),
),
TxKind::Create,
);
let pending_transaction = provider.send_transaction(req).await.unwrap();
let tx_hash = pending_transaction.tx_hash();
thread::sleep(Duration::from_secs(1));
// Get the transaction receipt
let receipt = provider.get_transaction_receipt(tx_hash.clone()).await.unwrap().unwrap();
let contract_addr = receipt.contract_address.unwrap();
assert_eq!(receipt.status(), true);
let code = provider.get_code_at(contract_addr).await.unwrap();
assert_eq!(get_runtime_code(), code);
// Prepare addresses & keys
let private_key =
B256::from_hex("7e34abdcd62eade2e803e0a8123a0015ce542b380537eff288d6da420bcc2d3b").unwrap();
//
// 2. Tx #1: Set AES key in the contract
//
let unencrypted_aes_key = get_input_data(PRECOMPILES_TEST_SET_AES_KEY_SELECTOR, private_key);
let req = TransactionBuilder::<SeismicReth>::with_kind(
TransactionBuilder::<SeismicReth>::with_input(
SeismicTransactionRequest::default(),
unencrypted_aes_key,
),
TxKind::Call(contract_addr),
);
let pending_transaction = provider.send_transaction(req).await.unwrap();
let tx_hash = pending_transaction.tx_hash();
thread::sleep(Duration::from_secs(1));
// Get the transaction receipt
let receipt = provider.get_transaction_receipt(tx_hash.clone()).await.unwrap().unwrap();
assert_eq!(receipt.status(), true);
//
// 3. Tx #2: Encrypt & send "hello world"
//
let raw_message = "hello world";
let message = Bytes::from(raw_message);
type PlaintextType = Bytes; // used for AbiEncode / AbiDecode
let encoded_message = PlaintextType::abi_encode(&message);
let unencrypted_input =
concat_input_data(PRECOMPILES_TEST_ENCRYPTED_LOG_SELECTOR, encoded_message.into());
let req = TransactionBuilder::<SeismicReth>::with_kind(
TransactionBuilder::<SeismicReth>::with_input(
SeismicTransactionRequest::default(),
unencrypted_input,
),
TxKind::Call(contract_addr),
);
let pending_transaction = provider.send_transaction(req).await.unwrap();
let tx_hash = pending_transaction.tx_hash();
thread::sleep(Duration::from_secs(1));
// Get the transaction receipt
let receipt = provider.get_transaction_receipt(tx_hash.clone()).await.unwrap().unwrap();
assert_eq!(receipt.status(), true);
//
// 4. Tx #3: On-chain decrypt
//
let logs = receipt.inner.logs();
assert_eq!(logs.len(), 1);
assert_eq!(logs[0].inner.address, contract_addr);
// Decode the EncryptedMessage event
let log_data = logs[0].inner.data.clone();
let event = Event {
name: "EncryptedMessage".into(),
inputs: vec![
EventParam { ty: "uint96".into(), indexed: true, ..Default::default() },
EventParam { ty: "bytes".into(), indexed: false, ..Default::default() },
],
anonymous: false,
};
let decoded = event.decode_log(&log_data.into_log_data()).unwrap();
sol! {
#[derive(Debug, PartialEq)]
interface Encryption {
function decrypt(uint96 nonce, bytes calldata ciphertext)
external
view
onlyOwner
returns (bytes memory plaintext);
}
}
// Extract (nonce, ciphertext)
let nonce: U96 =
U96::from_be_bytes(B96::from_slice(&decoded.indexed[0].abi_encode_packed()).into());
let ciphertext = Bytes::from(decoded.body[0].abi_encode_packed());
let call = Encryption::decryptCall { nonce, ciphertext: ciphertext.clone() };
let unencrypted_decrypt_call: Bytes = call.abi_encode().into();
let req = TransactionBuilder::<SeismicReth>::with_kind(
TransactionBuilder::<SeismicReth>::with_input(
SeismicTransactionRequest::default(),
unencrypted_decrypt_call,
),
TxKind::Call(contract_addr),
);
let decrypted_output = provider.seismic_call(SendableTx::Builder(req)).await.unwrap();
let result_bytes = PlaintextType::abi_decode(&Bytes::from(decrypted_output))
.expect("failed to decode the bytes");
let final_string =
String::from_utf8(result_bytes.to_vec()).expect("invalid utf8 in decrypted bytes");
assert_eq!(final_string, raw_message);
// Local Decrypt
let secp_private = secp256k1::SecretKey::from_slice(private_key.as_ref()).unwrap();
let aes_key: &[u8; 32] = &secp_private.secret_bytes()[0..32].try_into().unwrap();
let nonce: [u8; 12] = decoded.indexed[0].abi_encode_packed().try_into().unwrap();
let decrypted_locally =
aes_decrypt(aes_key.into(), &ciphertext, nonce).expect("AES decryption failed");
assert_eq!(decrypted_locally, message);
}
/// Get the deploy input plaintext
/// https://github.com/SeismicSystems/early-builds/blob/main/encrypted_logs/src/end-to-end-mvp/EncryptedLogs.sol
fn get_encryption_precompiles_contracts() -> Bytes {
Bytes::from_static(&hex!("6080604052348015600e575f5ffd5b50335f5f6101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550610dce8061005b5f395ff3fe608060405234801561000f575f5ffd5b506004361061004a575f3560e01c806328696e361461004e5780638da5cb5b1461006a578063a061904014610088578063ce75255b146100a4575b5f5ffd5b61006860048036038101906100639190610687565b6100d4565b005b61007261019a565b60405161007f9190610711565b60405180910390f35b6100a2600480360381019061009d919061075d565b6101be565b005b6100be60048036038101906100b991906107c9565b610256565b6040516100cb9190610896565b60405180910390f35b5f6100dd610412565b90505f61012d8285858080601f0160208091040260200160405190810160405280939291908181526020018383808284375f81840152601f19601f820116905080830192505050505050506104f5565b9050816bffffffffffffffffffffffff167f093a34a48cc07b4bf1355d9c15ec71077c85342d872753188302f99341f961008260405160200161017091906108f0565b60405160208183030381529060405260405161018c9190610896565b60405180910390a250505050565b5f5f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b5f5f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161461024c576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161024390610986565b60405180910390fd5b8060018190b15050565b60605f5f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16146102e6576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016102dd90610986565b60405180910390fd5b5f838390501161032b576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610322906109ee565b60405180910390fd5b5f606790505f6001b086868660405160200161034a9493929190610a92565b60405160208183030381529060405290505f5f8373ffffffffffffffffffffffffffffffffffffffff168360405161038291906108f0565b5f60405180830381855afa9150503d805f81146103ba576040519150601f19603f3d011682016040523d82523d5f602084013e6103bf565b606091505b509150915081610404576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016103fb90610b3c565b60405180910390fd5b809450505050509392505050565b5f5f606490505f5f8273ffffffffffffffffffffffffffffffffffffffff1660206040516020016104439190610b9d565b60405160208183030381529060405260405161045f91906108f0565b5f60405180830381855afa9150503d805f8114610497576040519150601f19603f3d011682016040523d82523d5f602084013e61049c565b606091505b5091509150816104e1576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016104d890610c01565b60405180910390fd5b5f60208201519050805f1c94505050505090565b60605f606690505f6001b0858560405160200161051493929190610c1f565b60405160208183030381529060405290505f5f8373ffffffffffffffffffffffffffffffffffffffff168360405161054c91906108f0565b5f60405180830381855afa9150503d805f8114610584576040519150601f19603f3d011682016040523d82523d5f602084013e610589565b606091505b5091509150816105ce576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016105c590610cc7565b60405180910390fd5b5f815111610611576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161060890610d55565b60405180910390fd5b8094505050505092915050565b5f5ffd5b5f5ffd5b5f5ffd5b5f5ffd5b5f5ffd5b5f5f83601f84011261064757610646610626565b5b8235905067ffffffffffffffff8111156106645761066361062a565b5b6020830191508360018202830111156106805761067f61062e565b5b9250929050565b5f5f6020838503121561069d5761069c61061e565b5b5f83013567ffffffffffffffff8111156106ba576106b9610622565b5b6106c685828601610632565b92509250509250929050565b5f73ffffffffffffffffffffffffffffffffffffffff82169050919050565b5f6106fb826106d2565b9050919050565b61070b816106f1565b82525050565b5f6020820190506107245f830184610702565b92915050565b5f819050919050565b61073c8161072a565b8114610746575f5ffd5b50565b5f8135905061075781610733565b92915050565b5f602082840312156107725761077161061e565b5b5f61077f84828501610749565b91505092915050565b5f6bffffffffffffffffffffffff82169050919050565b6107a881610788565b81146107b2575f5ffd5b50565b5f813590506107c38161079f565b92915050565b5f5f5f604084860312156107e0576107df61061e565b5b5f6107ed868287016107b5565b935050602084013567ffffffffffffffff81111561080e5761080d610622565b5b61081a86828701610632565b92509250509250925092565b5f81519050919050565b5f82825260208201905092915050565b8281835e5f83830152505050565b5f601f19601f8301169050919050565b5f61086882610826565b6108728185610830565b9350610882818560208601610840565b61088b8161084e565b840191505092915050565b5f6020820190508181035f8301526108ae818461085e565b905092915050565b5f81905092915050565b5f6108ca82610826565b6108d481856108b6565b93506108e4818560208601610840565b80840191505092915050565b5f6108fb82846108c0565b915081905092915050565b5f82825260208201905092915050565b7f4f6e6c79206f776e65722063616e2063616c6c20746869732066756e6374696f5f8201527f6e00000000000000000000000000000000000000000000000000000000000000602082015250565b5f610970602183610906565b915061097b82610916565b604082019050919050565b5f6020820190508181035f83015261099d81610964565b9050919050565b7f436970686572746578742063616e6e6f7420626520656d7074790000000000005f82015250565b5f6109d8601a83610906565b91506109e3826109a4565b602082019050919050565b5f6020820190508181035f830152610a05816109cc565b9050919050565b5f819050919050565b610a26610a218261072a565b610a0c565b82525050565b5f8160a01b9050919050565b5f610a4282610a2c565b9050919050565b610a5a610a5582610788565b610a38565b82525050565b828183375f83830152505050565b5f610a7983856108b6565b9350610a86838584610a60565b82840190509392505050565b5f610a9d8287610a15565b602082019150610aad8286610a49565b600c82019150610abe828486610a6e565b915081905095945050505050565b7f414553206465637279707420707265636f6d70696c652063616c6c206661696c5f8201527f6564000000000000000000000000000000000000000000000000000000000000602082015250565b5f610b26602283610906565b9150610b3182610acc565b604082019050919050565b5f6020820190508181035f830152610b5381610b1a565b9050919050565b5f63ffffffff82169050919050565b5f8160e01b9050919050565b5f610b7f82610b69565b9050919050565b610b97610b9282610b5a565b610b75565b82525050565b5f610ba88284610b86565b60048201915081905092915050565b7f524e4720507265636f6d70696c652063616c6c206661696c65640000000000005f82015250565b5f610beb601a83610906565b9150610bf682610bb7565b602082019050919050565b5f6020820190508181035f830152610c1881610bdf565b9050919050565b5f610c2a8286610a15565b602082019150610c3a8285610a49565b600c82019150610c4a82846108c0565b9150819050949350505050565b7f41455320656e637279707420707265636f6d70696c652063616c6c206661696c5f8201527f6564000000000000000000000000000000000000000000000000000000000000602082015250565b5f610cb1602283610906565b9150610cbc82610c57565b604082019050919050565b5f6020820190508181035f830152610cde81610ca5565b9050919050565b7f456e6372797074696f6e2063616c6c2072657475726e6564206e6f206f7574705f8201527f7574000000000000000000000000000000000000000000000000000000000000602082015250565b5f610d3f602283610906565b9150610d4a82610ce5565b604082019050919050565b5f6020820190508181035f830152610d6c81610d33565b905091905056fea2646970667358221220cdc3edd7891930a1ad58becbe2b3f7679ecfc78a3b1f8a803d4c381c8318287864736f6c637827302e382e32382d63692e323032342e31312e342b636f6d6d69742e32306261666332392e6d6f640058"))
}
fn get_runtime_code() -> Bytes {
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | true |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/seismic/node/tests/e2e/utils.rs | crates/seismic/node/tests/e2e/utils.rs | use alloy_eips::{eip2930::AccessListItem, eip7702::Authorization, BlockId, BlockNumberOrTag};
use alloy_primitives::{bytes, Address, B256, U256};
use alloy_provider::{
network::{
Ethereum, EthereumWallet, NetworkWallet, TransactionBuilder, TransactionBuilder7702,
},
Provider, ProviderBuilder, SendableTx,
};
use alloy_rpc_types_engine::PayloadAttributes;
use alloy_rpc_types_eth::TransactionRequest;
use alloy_signer::SignerSync;
use rand::{seq::SliceRandom, Rng};
use reth_e2e_test_utils::{wallet::Wallet, NodeHelperType, TmpDB};
use reth_ethereum_engine_primitives::PayloadBuilderAttributes;
use reth_ethereum_primitives::TxType;
use reth_node_api::NodeTypesWithDBAdapter;
use reth_node_ethereum::EthereumNode;
use reth_provider::FullProvider;
/// Helper function to create a new eth payload attributes
pub(crate) fn eth_payload_attributes(timestamp: u64) -> PayloadBuilderAttributes {
let attributes = PayloadAttributes {
timestamp,
prev_randao: B256::ZERO,
suggested_fee_recipient: Address::ZERO,
withdrawals: Some(vec![]),
parent_beacon_block_root: Some(B256::ZERO),
};
PayloadBuilderAttributes::new(B256::ZERO, attributes)
}
/// Advances node by producing blocks with random transactions.
pub(crate) async fn advance_with_random_transactions<Provider>(
node: &mut NodeHelperType<EthereumNode, Provider>,
num_blocks: usize,
rng: &mut impl Rng,
finalize: bool,
) -> eyre::Result<()>
where
Provider: FullProvider<NodeTypesWithDBAdapter<EthereumNode, TmpDB>>,
{
let provider = ProviderBuilder::new().on_http(node.rpc_url());
let signers = Wallet::new(1).with_chain_id(provider.get_chain_id().await?).gen();
// simple contract which writes to storage on any call
let dummy_bytecode = bytes!("6080604052348015600f57600080fd5b50602880601d6000396000f3fe4360a09081523360c0526040608081905260e08152902080805500fea164736f6c6343000810000a");
let mut call_destinations = signers.iter().map(|s| s.address()).collect::<Vec<_>>();
for _ in 0..num_blocks {
let tx_count = rng.gen_range(1..20);
let mut pending = vec![];
for _ in 0..tx_count {
let signer = signers.choose(rng).unwrap();
let tx_type = TxType::try_from(rng.gen_range(0..=4) as u64).unwrap();
let nonce = provider
.get_transaction_count(signer.address())
.block_id(BlockId::Number(BlockNumberOrTag::Pending))
.await?;
let mut tx =
TransactionRequest::default().with_from(signer.address()).with_nonce(nonce);
let should_create =
rng.gen::<bool>() && tx_type != TxType::Eip4844 && tx_type != TxType::Eip7702;
if should_create {
tx = tx.into_create().with_input(dummy_bytecode.clone());
} else {
tx = tx.with_to(*call_destinations.choose(rng).unwrap()).with_input(
(0..rng.gen_range(0..10000)).map(|_| rng.gen()).collect::<Vec<u8>>(),
);
}
if matches!(tx_type, TxType::Legacy | TxType::Eip2930) {
tx = tx.with_gas_price(provider.get_gas_price().await?);
}
if rng.gen::<bool>() || tx_type == TxType::Eip2930 {
tx = tx.with_access_list(
vec![AccessListItem {
address: *call_destinations.choose(rng).unwrap(),
storage_keys: (0..rng.gen_range(0..100)).map(|_| rng.gen()).collect(),
}]
.into(),
);
}
if tx_type == TxType::Eip7702 {
let signer = signers.choose(rng).unwrap();
let auth = Authorization {
chain_id: U256::from(provider.get_chain_id().await?),
address: *call_destinations.choose(rng).unwrap(),
nonce: provider
.get_transaction_count(signer.address())
.block_id(BlockId::Number(BlockNumberOrTag::Pending))
.await?,
};
let sig = signer.sign_hash_sync(&auth.signature_hash())?;
tx = tx.with_authorization_list(vec![auth.into_signed(sig)])
}
let gas = provider
.estimate_gas(tx.clone())
.block(BlockId::Number(BlockNumberOrTag::Pending))
.await
.unwrap_or(1_000_000);
tx.set_gas_limit(gas);
let SendableTx::Builder(tx) = provider.fill(tx).await? else { unreachable!() };
let tx =
NetworkWallet::<Ethereum>::sign_request(&EthereumWallet::new(signer.clone()), tx)
.await?;
pending.push(provider.send_tx_envelope(tx).await?);
}
let payload = node.build_and_submit_payload().await?;
if finalize {
node.update_forkchoice(payload.block().hash(), payload.block().hash()).await?;
} else {
let last_safe =
provider.get_block_by_number(BlockNumberOrTag::Safe).await?.unwrap().header.hash;
node.update_forkchoice(last_safe, payload.block().hash()).await?;
}
for pending in pending {
let receipt = pending.get_receipt().await?;
if let Some(address) = receipt.contract_address {
call_destinations.push(address);
}
}
}
Ok(())
}
/// Test utils for the seismic rpc api
#[cfg(test)]
pub mod test_utils {
use super::*;
use crate::ext::test_address;
use alloy_consensus::{SignableTransaction, TxEnvelope, TypedTransaction};
use alloy_dyn_abi::TypedData;
use alloy_eips::eip2718::Encodable2718;
use alloy_primitives::{
aliases::U96, hex_literal, Address, Bytes, PrimitiveSignature, TxKind, B256, U256,
};
use alloy_rpc_types::{
engine::PayloadAttributes, Block, Header, Transaction, TransactionInput, TransactionReceipt,
};
use alloy_rpc_types_eth::TransactionRequest;
use alloy_signer_local::PrivateKeySigner;
use core::str::FromStr;
use enr::EnrKey;
use jsonrpsee::http_client::HttpClient;
use jsonrpsee_core::server::Methods;
use k256::ecdsa::SigningKey;
use reth_chainspec::MAINNET;
use reth_e2e_test_utils::transaction::TransactionTestContext;
use reth_enclave::MockEnclaveServer;
use reth_network_api::noop::NoopNetwork;
use reth_payload_builder::EthPayloadBuilderAttributes;
use reth_primitives::TransactionSigned;
use reth_provider::StateProviderFactory;
use reth_rpc::EthApi;
use reth_rpc_builder::{
RpcModuleSelection, RpcServerConfig, RpcServerHandle, TransportRpcModuleConfig,
};
use reth_rpc_eth_api::EthApiClient;
use reth_seismic_chainspec::SEISMIC_DEV;
use reth_seismic_primitives::{SeismicPrimitives, SeismicTransactionSigned};
use reth_transaction_pool::test_utils::TestPool;
use secp256k1::{PublicKey, SecretKey};
use seismic_alloy_consensus::{
SeismicTxEnvelope::Seismic, SeismicTypedTransaction, TxSeismic, TxSeismicElements,
TypedDataRequest,
};
use seismic_alloy_rpc_types::SeismicTransactionRequest;
use serde_json::Value;
use std::{path::PathBuf, process::Stdio, sync::Arc};
use tokio::{
io::{AsyncBufReadExt, AsyncWriteExt, BufReader},
process::Command,
sync::mpsc,
};
// use reth_seismic_evm::engine::SeismicEngineValidator;
/// Get the nonce from the client
pub async fn get_nonce(client: &HttpClient, address: Address) -> u64 {
let nonce =
EthApiClient::<Transaction, Block, TransactionReceipt, Header>::transaction_count(
client, address, None,
)
.await
.unwrap();
nonce.wrapping_to::<u64>()
}
/// Get an unsigned seismic transaction request
pub async fn get_unsigned_seismic_tx_request(
sk_wallet: &PrivateKeySigner,
nonce: u64,
to: TxKind,
chain_id: u64,
plaintext: Bytes,
) -> SeismicTransactionRequest {
SeismicTransactionRequest {
inner: TransactionRequest {
from: Some(sk_wallet.address()),
nonce: Some(nonce),
value: Some(U256::from(0)),
to: Some(to),
gas: Some(6000000),
gas_price: Some(20e9 as u128),
chain_id: Some(chain_id),
input: TransactionInput { input: Some(client_encrypt(&plaintext)), data: None },
transaction_type: Some(TxSeismic::TX_TYPE),
..Default::default()
},
seismic_elements: Some(get_seismic_elements()),
}
}
// /// Create a seismic transaction
// pub async fn get_signed_seismic_tx_bytes(
// sk_wallet: &PrivateKeySigner,
// nonce: u64,
// to: TxKind,
// chain_id: u64,
// plaintext: Bytes,
// ) -> Bytes {
// let mut tx = get_unsigned_seismic_tx_request(sk_wallet, nonce, to, chain_id,
// plaintext).await; let signed_inner =
// TransactionTestContext::sign_tx(sk_wallet.clone(), tx.inner).await; tx.inner =
// signed_inner.into(); <TxEnvelope as Encodable2718>::encoded_2718(&tx).into()
// }
// /// Get an unsigned seismic transaction typed data
// pub async fn get_unsigned_seismic_tx_typed_data(
// sk_wallet: &PrivateKeySigner,
// nonce: u64,
// to: TxKind,
// chain_id: u64,
// decrypted_input: Bytes,
// ) -> TypedData {
// let tx_request =
// get_unsigned_seismic_tx_request(sk_wallet, nonce, to, chain_id,
// decrypted_input).await; let typed_tx =
// tx_request.inner.build_consensus_tx().unwrap(); match typed_tx {
// SeismicTypedTransaction::Seismic(seismic) => seismic.eip712_to_type_data(),
// _ => panic!("Typed transaction is not a seismic transaction"),
// }
// }
// /// Create a seismic transaction with typed data
// pub async fn get_signed_seismic_tx_typed_data(
// sk_wallet: &PrivateKeySigner,
// nonce: u64,
// to: TxKind,
// chain_id: u64,
// plaintext: Bytes,
// ) -> TypedDataRequest {
// let mut tx: SeismicTransactionRequest = get_unsigned_seismic_tx_request(sk_wallet, nonce,
// to, chain_id, plaintext).await; tx.seismic_elements.unwrap().message_version = 2;
// let signed_inner = TransactionTestContext::sign_tx(sk_wallet.clone(), tx.inner).await;
// tx.inner = signed_inner.into();
// tx
// // let tx = get_unsigned_seismic_tx_request(sk_wallet, nonce, to, chain_id,
// plaintext).await; // tx.seismic_elements.unwrap().message_version = 2;
// // let signed = TransactionTestContext::sign_tx(sk_wallet.clone(), tx).await;
// // match signed {
// // Seismic(tx) => tx.into(),
// // _ => panic!("Signed transaction is not a seismic transaction"),
// // }
// }
/// Get the network public key
pub fn get_network_public_key() -> PublicKey {
MockEnclaveServer::get_public_key()
}
/// Encrypt plaintext using network public key and client private key
pub fn get_ciphertext() -> Bytes {
let encrypted_data = client_encrypt(&get_plaintext());
encrypted_data
}
/// Encrypt plaintext using network public key and client private key
pub fn client_encrypt(plaintext: &Bytes) -> Bytes {
get_seismic_elements()
.client_encrypt(plaintext, &get_network_public_key(), &get_encryption_private_key())
.unwrap()
}
/// Decrypt ciphertext using network public key and client private key
pub fn client_decrypt(ciphertext: &Bytes) -> Bytes {
get_seismic_elements()
.client_decrypt(ciphertext, &get_network_public_key(), &get_encryption_private_key())
.unwrap()
}
/// Get the encryption private key
pub fn get_encryption_private_key() -> SecretKey {
let private_key_bytes =
hex_literal::hex!("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f");
SecretKey::from_slice(&private_key_bytes).expect("Invalid private key")
}
/// Get the encryption nonce
pub fn get_encryption_nonce() -> U96 {
U96::MAX
}
/// Get the seismic elements
pub fn get_seismic_elements() -> TxSeismicElements {
TxSeismicElements {
encryption_pubkey: get_encryption_private_key().public(),
encryption_nonce: get_encryption_nonce(),
message_version: 0,
}
}
/// Get a wrong private key
pub fn get_wrong_private_key() -> SecretKey {
let private_key_bytes =
hex_literal::hex!("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1e");
SecretKey::from_slice(&private_key_bytes).expect("Invalid private key")
}
/// Get the signing private key
pub fn get_signing_private_key() -> SigningKey {
let private_key_bytes =
hex_literal::hex!("ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80");
let signing_key =
SigningKey::from_bytes(&private_key_bytes.into()).expect("Invalid private key");
signing_key
}
/// Get the plaintext for a seismic transaction
pub fn get_plaintext() -> Bytes {
Bytes::from_str("24a7f0b7000000000000000000000000000000000000000000000000000000000000000b")
.unwrap()
}
/// Get a seismic transaction
pub fn get_seismic_tx() -> TxSeismic {
let ciphertext = get_ciphertext();
TxSeismic {
chain_id: 1337,
nonce: 1,
gas_price: 20000000000,
gas_limit: 210000,
to: alloy_primitives::TxKind::Call(
Address::from_str("0x5fbdb2315678afecb367f032d93f642f64180aa3").unwrap(),
),
value: U256::ZERO,
input: Bytes::copy_from_slice(&ciphertext),
seismic_elements: get_seismic_elements(),
}
}
/// Get the encoding of a signed seismic transaction
pub fn get_signed_seismic_tx_encoding() -> Vec<u8> {
let signed_tx = get_signed_seismic_tx();
let mut encoding = Vec::new();
signed_tx.encode_2718(&mut encoding);
encoding
}
/// Sign a seismic transaction
pub fn sign_seismic_tx(tx: &TxSeismic) -> PrimitiveSignature {
let _signature = get_signing_private_key()
.clone()
.sign_prehash_recoverable(tx.signature_hash().as_slice())
.expect("Failed to sign");
let recoverid = _signature.1;
let _signature = _signature.0;
let signature = PrimitiveSignature::new(
U256::from_be_slice(_signature.r().to_bytes().as_slice()),
U256::from_be_slice(_signature.s().to_bytes().as_slice()),
recoverid.is_y_odd(),
);
signature
}
/// Get a signed seismic transaction
pub fn get_signed_seismic_tx() -> SeismicTransactionSigned {
let tx = get_seismic_tx();
let signature = sign_seismic_tx(&tx);
SignableTransaction::into_signed(tx, signature).into()
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/seismic/node/tests/e2e/blobs.rs | crates/seismic/node/tests/e2e/blobs.rs | use crate::utils::eth_payload_attributes;
use alloy_genesis::Genesis;
use reth_chainspec::{ChainSpecBuilder, MAINNET};
use reth_e2e_test_utils::{
node::NodeTestContext, transaction::TransactionTestContext, wallet::Wallet,
};
use reth_node_builder::{NodeBuilder, NodeHandle};
use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig};
use reth_node_ethereum::EthereumNode;
use reth_tasks::TaskManager;
use reth_transaction_pool::TransactionPool;
use std::sync::Arc;
#[tokio::test(flavor = "multi_thread")]
async fn can_handle_blobs() -> eyre::Result<()> {
reth_tracing::init_test_tracing();
let tasks = TaskManager::current();
let exec = tasks.executor();
let genesis: Genesis = serde_json::from_str(include_str!("../assets/genesis.json")).unwrap();
let chain_spec = Arc::new(
ChainSpecBuilder::default()
.chain(MAINNET.chain)
.genesis(genesis)
.cancun_activated()
.build(),
);
let genesis_hash = chain_spec.genesis_hash();
let node_config = NodeConfig::test()
.with_chain(chain_spec)
.with_unused_ports()
.with_rpc(RpcServerArgs::default().with_unused_ports().with_http());
let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config.clone())
.testing_node(exec.clone())
.node(EthereumNode::default())
.launch()
.await?;
let mut node = NodeTestContext::new(node, eth_payload_attributes).await?;
let wallets = Wallet::new(2).gen();
let blob_wallet = wallets.first().unwrap();
let second_wallet = wallets.last().unwrap();
// inject normal tx
let raw_tx = TransactionTestContext::transfer_tx_bytes(1, second_wallet.clone()).await;
let tx_hash = node.rpc.inject_tx(raw_tx).await?;
// build payload with normal tx
let payload = node.new_payload().await?;
// clean the pool
node.inner.pool.remove_transactions(vec![tx_hash]);
// build blob tx
let blob_tx = TransactionTestContext::tx_with_blobs_bytes(1, blob_wallet.clone()).await?;
// inject blob tx to the pool
let blob_tx_hash = node.rpc.inject_tx(blob_tx).await?;
// fetch it from rpc
let envelope = node.rpc.envelope_by_hash(blob_tx_hash).await?;
// validate sidecar
TransactionTestContext::validate_sidecar(envelope);
// build a payload
let blob_payload = node.new_payload().await?;
// submit the blob payload
let blob_block_hash = node.submit_payload(blob_payload).await?;
node.update_forkchoice(genesis_hash, blob_block_hash).await?;
// submit normal payload (reorg)
let block_hash = node.submit_payload(payload).await?;
node.update_forkchoice(genesis_hash, block_hash).await?;
tokio::time::sleep(std::time::Duration::from_secs(3)).await;
// expects the blob tx to be back in the pool
let envelope = node.rpc.envelope_by_hash(blob_tx_hash).await?;
// make sure the sidecar is present
TransactionTestContext::validate_sidecar(envelope);
Ok(())
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/seismic/node/tests/e2e/p2p.rs | crates/seismic/node/tests/e2e/p2p.rs | use futures::StreamExt;
use reth_seismic_node::utils::{advance_chain, setup};
use std::sync::Arc;
use tokio::sync::Mutex;
#[tokio::test]
async fn can_sync() -> eyre::Result<()> {
reth_tracing::init_test_tracing();
let (mut nodes, _tasks, wallet) = setup(3).await?;
let wallet = Arc::new(Mutex::new(wallet));
let third_node = nodes.pop().unwrap();
let mut second_node = nodes.pop().unwrap();
let mut first_node = nodes.pop().unwrap();
let tip: usize = 90;
let tip_index: usize = tip - 1;
let reorg_depth = 2;
// On first node, create a chain up to block number 90a
let canonical_payload_chain = advance_chain(tip, &mut first_node, wallet.clone()).await?;
let canonical_chain =
canonical_payload_chain.iter().map(|p| p.block().hash()).collect::<Vec<_>>();
// On second node, sync optimistically up to block number 88a
second_node.update_optimistic_forkchoice(canonical_chain[tip_index - reorg_depth - 1]).await?;
second_node
.wait_block(
(tip - reorg_depth - 1) as u64,
canonical_chain[tip_index - reorg_depth - 1],
true,
)
.await?;
// We send FCU twice to ensure that pool receives canonical chain update on the second FCU
// This is required because notifications are not sent during backfill sync
second_node.update_optimistic_forkchoice(canonical_chain[tip_index - reorg_depth]).await?;
second_node
.wait_block((tip - reorg_depth) as u64, canonical_chain[tip_index - reorg_depth], false)
.await?;
second_node.canonical_stream.next().await.unwrap();
// Trigger backfill sync until block 80
third_node
.update_forkchoice(canonical_chain[tip_index - 10], canonical_chain[tip_index - 10])
.await?;
third_node.wait_block((tip - 10) as u64, canonical_chain[tip_index - 10], true).await?;
// Trigger live sync to block 90
third_node.update_optimistic_forkchoice(canonical_chain[tip_index]).await?;
third_node.wait_block(tip as u64, canonical_chain[tip_index], false).await?;
// On second node, create a side chain: 88a -> 89b -> 90b
wallet.lock().await.inner_nonce -= reorg_depth as u64;
second_node.payload.timestamp = first_node.payload.timestamp - reorg_depth as u64; // TODO: probably want to make it node agnostic
let side_payload_chain = advance_chain(reorg_depth, &mut second_node, wallet.clone()).await?;
let side_chain = side_payload_chain.iter().map(|p| p.block().hash()).collect::<Vec<_>>();
// Creates fork chain by submitting 89b payload.
// By returning Valid here, op-node will finally return a finalized hash
let _ = third_node.submit_payload(side_payload_chain[0].clone()).await;
// It will issue a pipeline reorg to 88a, and then make 89b canonical AND finalized.
third_node.update_forkchoice(side_chain[0], side_chain[0]).await?;
// Make sure we have the updated block
third_node.wait_unwind((tip - reorg_depth) as u64).await?;
third_node
.wait_block(
side_payload_chain[0].block().number,
side_payload_chain[0].block().hash(),
false,
)
.await?;
Ok(())
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/seismic/node/tests/e2e/testsuite.rs | crates/seismic/node/tests/e2e/testsuite.rs | use alloy_primitives::{Address, B256};
use eyre::Result;
use alloy_rpc_types_engine::PayloadAttributes;
use reth_e2e_test_utils::testsuite::{
actions::AssertMineBlock,
setup::{NetworkSetup, Setup},
TestBuilder,
};
use reth_chainspec::{ChainSpecBuilder, SEISMIC_MAINNET};
use reth_seismic_node::{SeismicEngineTypes, SeismicNode};
use std::sync::Arc;
#[tokio::test]
async fn test_testsuite_op_assert_mine_block() -> Result<()> {
reth_tracing::init_test_tracing();
let setup = Setup::default()
.with_chain_spec(Arc::new(
ChainSpecBuilder::default()
.chain(SEISMIC_MAINNET.chain)
.genesis(serde_json::from_str(include_str!("../assets/genesis.json")).unwrap())
.build()
.into(),
))
.with_network(NetworkSetup::single_node());
let test =
TestBuilder::new().with_setup(setup).with_action(AssertMineBlock::<SeismicEngineTypes>::new(
0,
vec![],
Some(B256::ZERO),
// TODO: refactor once we have actions to generate payload attributes.
PayloadAttributes {
payload_attributes: alloy_rpc_types_engine::PayloadAttributes {
timestamp: std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_millis() as u64,
prev_randao: B256::random(),
suggested_fee_recipient: Address::random(),
withdrawals: None,
parent_beacon_block_root: None,
},
transactions: None,
no_tx_pool: None,
eip_1559_params: None,
gas_limit: Some(30_000_000),
},
));
test.run::<SeismicNode>().await?;
Ok(())
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/seismic/node/tests/e2e/rpc.rs | crates/seismic/node/tests/e2e/rpc.rs | use crate::utils::eth_payload_attributes;
use alloy_eips::{calc_next_block_base_fee, eip2718::Encodable2718};
use alloy_primitives::{Address, B256, U256};
use alloy_provider::{network::EthereumWallet, Provider, ProviderBuilder, SendableTx};
use alloy_rpc_types_beacon::relay::{
BidTrace, BuilderBlockValidationRequestV3, BuilderBlockValidationRequestV4,
SignedBidSubmissionV3, SignedBidSubmissionV4,
};
use alloy_rpc_types_engine::{BlobsBundleV1, ExecutionPayloadV3};
use alloy_rpc_types_eth::TransactionRequest;
use rand::{rngs::StdRng, Rng, SeedableRng};
use reth_chainspec::{ChainSpecBuilder, MAINNET};
use reth_e2e_test_utils::setup_engine;
use reth_node_ethereum::EthereumNode;
use reth_payload_primitives::BuiltPayload;
use std::sync::Arc;
alloy_sol_types::sol! {
#[sol(rpc, bytecode = "6080604052348015600f57600080fd5b5060405160db38038060db833981016040819052602a91607a565b60005b818110156074576040805143602082015290810182905260009060600160408051601f19818403018152919052805160209091012080555080606d816092565b915050602d565b505060b8565b600060208284031215608b57600080fd5b5051919050565b60006001820160b157634e487b7160e01b600052601160045260246000fd5b5060010190565b60168060c56000396000f3fe6080604052600080fdfea164736f6c6343000810000a")]
contract GasWaster {
constructor(uint256 iterations) {
for (uint256 i = 0; i < iterations; i++) {
bytes32 slot = keccak256(abi.encode(block.number, i));
assembly {
sstore(slot, slot)
}
}
}
}
}
#[tokio::test(flavor = "multi_thread")]
async fn test_fee_history() -> eyre::Result<()> {
reth_tracing::init_test_tracing();
let seed: [u8; 32] = rand::thread_rng().gen();
let mut rng = StdRng::from_seed(seed);
println!("Seed: {:?}", seed);
let chain_spec = Arc::new(
ChainSpecBuilder::default()
.chain(MAINNET.chain)
.genesis(serde_json::from_str(include_str!("../assets/genesis.json")).unwrap())
.cancun_activated()
.build(),
);
let (mut nodes, _tasks, wallet) =
setup_engine::<EthereumNode>(1, chain_spec.clone(), false, eth_payload_attributes).await?;
let mut node = nodes.pop().unwrap();
let provider = ProviderBuilder::new()
.wallet(EthereumWallet::new(wallet.gen().swap_remove(0)))
.on_http(node.rpc_url());
let fee_history = provider.get_fee_history(10, 0_u64.into(), &[]).await?;
let genesis_base_fee = chain_spec.initial_base_fee().unwrap() as u128;
let expected_first_base_fee = genesis_base_fee -
genesis_base_fee / chain_spec.base_fee_params_at_block(0).max_change_denominator;
assert_eq!(fee_history.base_fee_per_gas[0], genesis_base_fee);
assert_eq!(fee_history.base_fee_per_gas[1], expected_first_base_fee,);
// Spend some gas
let builder = GasWaster::deploy_builder(&provider, U256::from(500)).send().await?;
node.advance_block().await?;
let receipt = builder.get_receipt().await?;
assert!(receipt.status());
let block = provider.get_block_by_number(1.into()).await?.unwrap();
assert_eq!(block.header.gas_used, receipt.gas_used,);
assert_eq!(block.header.base_fee_per_gas.unwrap(), expected_first_base_fee as u64);
for _ in 0..100 {
let _ =
GasWaster::deploy_builder(&provider, U256::from(rng.gen_range(0..1000))).send().await?;
node.advance_block().await?;
}
let latest_block = provider.get_block_number().await?;
for _ in 0..100 {
let latest_block = rng.gen_range(0..=latest_block);
let block_count = rng.gen_range(1..=(latest_block + 1));
let fee_history = provider.get_fee_history(block_count, latest_block.into(), &[]).await?;
let mut prev_header = provider
.get_block_by_number((latest_block + 1 - block_count).into())
.await?
.unwrap()
.header;
for block in (latest_block + 2 - block_count)..=latest_block {
let expected_base_fee = calc_next_block_base_fee(
prev_header.gas_used,
prev_header.gas_limit,
prev_header.base_fee_per_gas.unwrap(),
chain_spec.base_fee_params_at_block(block),
);
let header = provider.get_block_by_number(block.into()).await?.unwrap().header;
assert_eq!(header.base_fee_per_gas.unwrap(), expected_base_fee);
assert_eq!(
header.base_fee_per_gas.unwrap(),
fee_history.base_fee_per_gas[(block + block_count - 1 - latest_block) as usize]
as u64
);
prev_header = header;
}
}
Ok(())
}
#[tokio::test(flavor = "multi_thread")]
async fn test_flashbots_validate_v3() -> eyre::Result<()> {
reth_tracing::init_test_tracing();
let chain_spec = Arc::new(
ChainSpecBuilder::default()
.chain(MAINNET.chain)
.genesis(serde_json::from_str(include_str!("../assets/genesis.json")).unwrap())
.cancun_activated()
.build(),
);
let (mut nodes, _tasks, wallet) =
setup_engine::<EthereumNode>(1, chain_spec.clone(), false, eth_payload_attributes).await?;
let mut node = nodes.pop().unwrap();
let provider = ProviderBuilder::new()
.wallet(EthereumWallet::new(wallet.gen().swap_remove(0)))
.on_http(node.rpc_url());
node.advance(100, |_| {
let provider = provider.clone();
Box::pin(async move {
let SendableTx::Envelope(tx) =
provider.fill(TransactionRequest::default().to(Address::ZERO)).await.unwrap()
else {
unreachable!()
};
tx.encoded_2718().into()
})
})
.await?;
let _ = provider.send_transaction(TransactionRequest::default().to(Address::ZERO)).await?;
let payload = node.new_payload().await?;
let mut request = BuilderBlockValidationRequestV3 {
request: SignedBidSubmissionV3 {
message: BidTrace {
parent_hash: payload.block().parent_hash,
block_hash: payload.block().hash(),
gas_used: payload.block().gas_used,
gas_limit: payload.block().gas_limit,
..Default::default()
},
execution_payload: ExecutionPayloadV3::from_block_unchecked(
payload.block().hash(),
&payload.block().clone().into_block(),
),
blobs_bundle: BlobsBundleV1::new([]),
signature: Default::default(),
},
parent_beacon_block_root: payload.block().parent_beacon_block_root.unwrap(),
registered_gas_limit: payload.block().gas_limit,
};
assert!(provider
.raw_request::<_, ()>("flashbots_validateBuilderSubmissionV3".into(), (&request,))
.await
.is_ok());
request.registered_gas_limit -= 1;
assert!(provider
.raw_request::<_, ()>("flashbots_validateBuilderSubmissionV3".into(), (&request,))
.await
.is_err());
request.registered_gas_limit += 1;
request.request.execution_payload.payload_inner.payload_inner.state_root = B256::ZERO;
assert!(provider
.raw_request::<_, ()>("flashbots_validateBuilderSubmissionV3".into(), (&request,))
.await
.is_err());
Ok(())
}
#[tokio::test(flavor = "multi_thread")]
async fn test_flashbots_validate_v4() -> eyre::Result<()> {
reth_tracing::init_test_tracing();
let chain_spec = Arc::new(
ChainSpecBuilder::default()
.chain(MAINNET.chain)
.genesis(serde_json::from_str(include_str!("../assets/genesis.json")).unwrap())
.prague_activated()
.build(),
);
let (mut nodes, _tasks, wallet) =
setup_engine::<EthereumNode>(1, chain_spec.clone(), false, eth_payload_attributes).await?;
let mut node = nodes.pop().unwrap();
let provider = ProviderBuilder::new()
.wallet(EthereumWallet::new(wallet.gen().swap_remove(0)))
.on_http(node.rpc_url());
node.advance(100, |_| {
let provider = provider.clone();
Box::pin(async move {
let SendableTx::Envelope(tx) =
provider.fill(TransactionRequest::default().to(Address::ZERO)).await.unwrap()
else {
unreachable!()
};
tx.encoded_2718().into()
})
})
.await?;
let _ = provider.send_transaction(TransactionRequest::default().to(Address::ZERO)).await?;
let payload = node.new_payload().await?;
let mut request = BuilderBlockValidationRequestV4 {
request: SignedBidSubmissionV4 {
message: BidTrace {
parent_hash: payload.block().parent_hash,
block_hash: payload.block().hash(),
gas_used: payload.block().gas_used,
gas_limit: payload.block().gas_limit,
..Default::default()
},
execution_payload: ExecutionPayloadV3::from_block_unchecked(
payload.block().hash(),
&payload.block().clone().into_block(),
),
blobs_bundle: BlobsBundleV1::new([]),
execution_requests: payload.requests().unwrap().try_into().unwrap(),
signature: Default::default(),
},
parent_beacon_block_root: payload.block().parent_beacon_block_root.unwrap(),
registered_gas_limit: payload.block().gas_limit,
};
provider
.raw_request::<_, ()>("flashbots_validateBuilderSubmissionV4".into(), (&request,))
.await
.expect("request should validate");
request.registered_gas_limit -= 1;
assert!(provider
.raw_request::<_, ()>("flashbots_validateBuilderSubmissionV4".into(), (&request,))
.await
.is_err());
request.registered_gas_limit += 1;
request.request.execution_payload.payload_inner.payload_inner.state_root = B256::ZERO;
assert!(provider
.raw_request::<_, ()>("flashbots_validateBuilderSubmissionV4".into(), (&request,))
.await
.is_err());
Ok(())
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/seismic/node/tests/e2e/main.rs | crates/seismic/node/tests/e2e/main.rs | #![allow(missing_docs)]
mod integration;
// mod p2p;
// mod testsuite;
const fn main() {}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/seismic/node/tests/it/builder.rs | crates/seismic/node/tests/it/builder.rs | //! Node builder setup tests.
use reth_db::test_utils::create_test_rw_db;
use reth_node_api::{FullNodeComponents, NodeTypesWithDBAdapter};
use reth_node_builder::{Node, NodeBuilder, NodeConfig};
use reth_provider::providers::BlockchainProvider;
use reth_seismic_chainspec::SEISMIC_MAINNET;
use reth_seismic_node::node::SeismicNode;
#[test]
fn test_basic_setup() {
// parse CLI -> config
let config = NodeConfig::new(SEISMIC_MAINNET.clone());
let db = create_test_rw_db();
let seismic_node = SeismicNode::default();
let _builder = NodeBuilder::new(config)
.with_database(db)
.with_types_and_provider::<SeismicNode, BlockchainProvider<NodeTypesWithDBAdapter<SeismicNode, _>>>()
.with_components(seismic_node.components())
.with_add_ons(seismic_node.add_ons())
.on_component_initialized(move |ctx| {
let _provider = ctx.provider();
Ok(())
})
.on_node_started(|_full_node| Ok(()))
.on_rpc_started(|_ctx, handles| {
let _client = handles.rpc.http_client();
Ok(())
})
.extend_rpc_modules(|ctx| {
let _ = ctx.config();
let _ = ctx.node().provider();
Ok(())
})
.check_launch();
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/seismic/node/tests/it/exex.rs | crates/seismic/node/tests/it/exex.rs | use futures::future;
use reth_db::test_utils::create_test_rw_db;
use reth_exex::ExExContext;
use reth_node_api::FullNodeComponents;
use reth_node_builder::{NodeBuilder, NodeConfig};
use reth_node_ethereum::{node::EthereumAddOns, EthereumNode};
use std::{
future::Future,
pin::Pin,
task::{Context, Poll},
};
struct DummyExEx<Node: FullNodeComponents> {
_ctx: ExExContext<Node>,
}
impl<Node> Future for DummyExEx<Node>
where
Node: FullNodeComponents,
{
type Output = eyre::Result<()>;
fn poll(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Self::Output> {
Poll::Pending
}
}
#[test]
fn basic_exex() {
let config = NodeConfig::test();
let db = create_test_rw_db();
let _builder = NodeBuilder::new(config)
.with_database(db)
.with_types::<EthereumNode>()
.with_components(EthereumNode::components())
.with_add_ons(EthereumAddOns::default())
.install_exex("dummy", move |ctx| future::ok(DummyExEx { _ctx: ctx }))
.check_launch();
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/seismic/node/tests/it/main.rs | crates/seismic/node/tests/it/main.rs | #![allow(missing_docs)]
mod builder;
mod exex;
const fn main() {}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/seismic/rpc/src/lib.rs | crates/seismic/rpc/src/lib.rs | //! Seismic-Reth RPC support.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
mod eth;
pub use eth::*;
mod error;
pub use error::*;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/seismic/rpc/src/error.rs | crates/seismic/rpc/src/error.rs | //! Error types for the seismic rpc api.
use std::convert::Infallible;
use alloy_rpc_types_eth::BlockError;
use reth_provider::ProviderError;
use reth_rpc_eth_api::{AsEthApiError, EthTxEnvError, TransactionConversionError};
use reth_rpc_eth_types::{error::api::FromEvmHalt, EthApiError};
use reth_rpc_server_types::result::internal_rpc_err;
use revm::context_interface::result::EVMError;
use revm_context::result::HaltReason;
use seismic_revm::SeismicHaltReason;
#[derive(Debug, thiserror::Error)]
/// Seismic API error
pub enum SeismicEthApiError {
/// Eth error
#[error(transparent)]
Eth(#[from] EthApiError),
/// Enclave error
#[error("enclave error: {0}")]
EnclaveError(String),
/// Attempting to access public storage with cload
#[error("invalid public storage access")]
InvalidPublicStorageAccess,
/// Attempting to access private storage with sload
#[error("invalid private storage access")]
InvalidPrivateStorageAccess,
}
impl AsEthApiError for SeismicEthApiError {
fn as_err(&self) -> Option<&EthApiError> {
match self {
Self::Eth(err) => Some(err),
_ => None,
}
}
}
impl From<SeismicEthApiError> for jsonrpsee::types::error::ErrorObject<'static> {
fn from(error: SeismicEthApiError) -> Self {
match error {
SeismicEthApiError::Eth(e) => e.into(),
SeismicEthApiError::EnclaveError(e) => internal_rpc_err(format!("enclave error: {e}")),
SeismicEthApiError::InvalidPrivateStorageAccess => {
internal_rpc_err("invalid private storage access")
}
SeismicEthApiError::InvalidPublicStorageAccess => {
internal_rpc_err("invalid public storage access")
}
}
}
}
impl FromEvmHalt<SeismicHaltReason> for SeismicEthApiError {
fn from_evm_halt(halt: SeismicHaltReason, gas_limit: u64) -> Self {
match halt {
SeismicHaltReason::InvalidPrivateStorageAccess => {
SeismicEthApiError::InvalidPrivateStorageAccess
}
SeismicHaltReason::InvalidPublicStorageAccess => {
SeismicEthApiError::InvalidPublicStorageAccess
}
SeismicHaltReason::Base(halt) => EthApiError::from_evm_halt(halt, gas_limit).into(),
}
}
}
impl From<TransactionConversionError> for SeismicEthApiError {
fn from(value: TransactionConversionError) -> Self {
Self::Eth(EthApiError::from(value))
}
}
impl From<EthTxEnvError> for SeismicEthApiError {
fn from(value: EthTxEnvError) -> Self {
Self::Eth(EthApiError::from(value))
}
}
impl From<ProviderError> for SeismicEthApiError {
fn from(value: ProviderError) -> Self {
Self::Eth(EthApiError::from(value))
}
}
impl From<BlockError> for SeismicEthApiError {
fn from(value: BlockError) -> Self {
Self::Eth(EthApiError::from(value))
}
}
impl From<Infallible> for SeismicEthApiError {
fn from(value: Infallible) -> Self {
match value {}
}
}
impl From<EVMError<ProviderError>> for SeismicEthApiError {
fn from(error: EVMError<ProviderError>) -> Self {
Self::Eth(EthApiError::from(error))
}
}
// Implementation for revm halt reason (base case)
impl From<HaltReason> for SeismicEthApiError {
fn from(halt: HaltReason) -> Self {
Self::Eth(EthApiError::other(internal_rpc_err(format!("EVM halted: {halt:?}"))))
}
}
// FromEvmHalt implementation for base revm halt reason
impl FromEvmHalt<HaltReason> for SeismicEthApiError {
fn from_evm_halt(halt: HaltReason, gas_limit: u64) -> Self {
// Delegate to the existing From implementation for the halt reason
// and use the gas limit info if needed
Self::Eth(EthApiError::other(internal_rpc_err(format!(
"EVM halted: {halt:?} (gas limit: {gas_limit})"
))))
}
}
#[cfg(test)]
mod tests {
use crate::error::SeismicEthApiError;
#[test]
fn enclave_error_message() {
let err: jsonrpsee::types::error::ErrorObject<'static> =
SeismicEthApiError::EnclaveError("test".to_string()).into();
assert_eq!(err.message(), "enclave error: test");
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/seismic/rpc/src/eth/pending_block.rs | crates/seismic/rpc/src/eth/pending_block.rs | //! Loads Seismic pending block for a RPC response.
use crate::{SeismicEthApi, SeismicEthApiError};
use reth_rpc_eth_api::{helpers::LoadPendingBlock, FromEvmError, RpcConvert, RpcNodeCore};
use reth_rpc_eth_types::PendingBlock;
impl<N, Rpc> LoadPendingBlock for SeismicEthApi<N, Rpc>
where
N: RpcNodeCore,
SeismicEthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<Primitives = N::Primitives>,
{
#[inline]
fn pending_block(&self) -> &tokio::sync::Mutex<Option<PendingBlock<Self::Primitives>>> {
self.inner.pending_block()
}
#[inline]
fn pending_env_builder(
&self,
) -> &dyn reth_rpc_eth_api::helpers::pending_block::PendingEnvBuilder<Self::Evm> {
self.inner.pending_env_builder()
}
#[inline]
fn pending_block_kind(&self) -> reth_rpc_eth_types::builder::config::PendingBlockKind {
self.inner.pending_block_kind()
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/seismic/rpc/src/eth/ext.rs | crates/seismic/rpc/src/eth/ext.rs | //! seismic implementation of eth api and its extensions
//!
//! Overrides the eth_ namespace to be compatible with seismic specific types
//! Most endpoints handle transaction decrytpion before passing to the inner eth api
//! For `eth_sendRawTransaction`, we directly call the inner eth api without decryption
//! See that function's docs for more details
use super::api::FullSeismicApi;
use crate::utils::convert_seismic_call_to_tx_request;
use alloy_dyn_abi::TypedData;
use alloy_json_rpc::RpcObject;
use alloy_primitives::{Address, Bytes, B256, U256};
use alloy_rpc_types::{
state::{EvmOverrides, StateOverride},
BlockId, BlockOverrides, TransactionRequest,
};
use alloy_rpc_types_eth::simulate::{
SimBlock as EthSimBlock, SimulatePayload as EthSimulatePayload, SimulatedBlock,
};
use futures::Future;
use jsonrpsee::{
core::{async_trait, RpcResult},
proc_macros::rpc,
};
use reth_rpc_eth_api::{
helpers::{EthCall, EthTransactions},
RpcBlock, RpcTypes,
};
use reth_rpc_eth_types::EthApiError;
use reth_tracing::tracing::*;
use seismic_alloy_consensus::{InputDecryptionElements, TypedDataRequest};
use seismic_alloy_rpc_types::{
SeismicCallRequest, SeismicRawTxRequest, SeismicTransactionRequest,
SimBlock as SeismicSimBlock, SimulatePayload as SeismicSimulatePayload,
};
use seismic_enclave::{secp256k1::PublicKey, GetPurposeKeysResponse};
use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4};
/// trait interface for a custom rpc namespace: `seismic`
///
/// This defines an additional namespace where all methods are configured as trait functions.
#[cfg_attr(not(feature = "client"), rpc(server, namespace = "seismic"))]
#[cfg_attr(feature = "client", rpc(server, client, namespace = "seismic"))]
pub trait SeismicApi {
/// Returns the network public key
#[method(name = "getTeePublicKey")]
async fn get_tee_public_key(&self) -> RpcResult<PublicKey>;
}
/// Implementation of the seismic rpc api
#[derive(Debug, Clone)]
pub struct SeismicApi {
purpose_keys: GetPurposeKeysResponse,
}
impl SeismicApi {
/// Creates a new seismic api instance
pub const fn new(purpose_keys: GetPurposeKeysResponse) -> Self {
Self { purpose_keys }
}
}
#[async_trait]
impl SeismicApiServer for SeismicApi {
async fn get_tee_public_key(&self) -> RpcResult<PublicKey> {
trace!(target: "rpc::seismic", "Serving seismic_getTeePublicKey");
Ok(self.purpose_keys.tx_io_pk)
}
}
/// Localhost with port 0 so a free port is used.
pub const fn test_address() -> SocketAddr {
SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, 0))
}
/// Extension trait for `EthTransactions` to add custom transaction sending functionalities.
pub trait SeismicTransaction: EthTransactions {
/// Decodes, signs (if necessary via an internal signer or enclave),
/// and submits a typed data transaction to the pool.
/// Returns the hash of the transaction.
fn send_typed_data_transaction(
&self,
tx_request: TypedDataRequest,
) -> impl Future<Output = Result<B256, Self::Error>> + Send;
}
/// Seismic `eth_` RPC namespace overrides.
#[cfg_attr(not(feature = "client"), rpc(server, namespace = "eth"))]
#[cfg_attr(feature = "client", rpc(server, client, namespace = "eth"))]
pub trait EthApiOverride<B: RpcObject> {
/// Returns the account and storage values of the specified account including the Merkle-proof.
/// This call can be used to verify that the data you are pulling from is not tampered with.
#[method(name = "signTypedData_v4")]
async fn sign_typed_data_v4(&self, address: Address, data: TypedData) -> RpcResult<String>;
/// `eth_simulateV1` executes an arbitrary number of transactions on top of the requested state.
/// The transactions are packed into individual blocks. Overrides can be provided.
#[method(name = "simulateV1")]
async fn simulate_v1(
&self,
opts: SeismicSimulatePayload<SeismicCallRequest>,
block_number: Option<BlockId>,
) -> RpcResult<Vec<SimulatedBlock<B>>>;
/// Executes a new message call immediately without creating a transaction on the block chain.
#[method(name = "call")]
async fn call(
&self,
request: SeismicCallRequest,
block_number: Option<BlockId>,
state_overrides: Option<StateOverride>,
block_overrides: Option<Box<BlockOverrides>>,
) -> RpcResult<Bytes>;
/// Sends signed transaction, returning its hash.
#[method(name = "sendRawTransaction")]
async fn send_raw_transaction(&self, bytes: SeismicRawTxRequest) -> RpcResult<B256>;
/// Generates and returns an estimate of how much gas is necessary to allow the transaction to
/// complete.
#[method(name = "estimateGas")]
async fn estimate_gas(
&self,
request: SeismicTransactionRequest,
block_number: Option<BlockId>,
state_override: Option<StateOverride>,
) -> RpcResult<U256>;
}
/// Implementation of the `eth_` namespace override
#[derive(Debug, Clone)]
pub struct EthApiExt<Eth> {
eth_api: Eth,
purpose_keys: GetPurposeKeysResponse,
}
impl<Eth> EthApiExt<Eth> {
/// Create a new `EthApiExt` module.
pub const fn new(eth_api: Eth, purpose_keys: GetPurposeKeysResponse) -> Self {
Self { eth_api, purpose_keys }
}
}
#[async_trait]
impl<Eth> EthApiOverrideServer<RpcBlock<Eth::NetworkTypes>> for EthApiExt<Eth>
where
Eth: FullSeismicApi + Send + Sync + 'static,
Eth::Error: Send + Sync + 'static,
jsonrpsee_types::error::ErrorObject<'static>: From<Eth::Error>,
<Eth::NetworkTypes as RpcTypes>::TransactionRequest:
From<TransactionRequest> + AsRef<TransactionRequest> + Send + Sync + 'static,
{
/// Handler for: `eth_signTypedData_v4`
///
/// TODO: determine if this should be removed, seems the same as eth functionality
async fn sign_typed_data_v4(&self, from: Address, data: TypedData) -> RpcResult<String> {
debug!(target: "reth-seismic-rpc::eth", "Serving seismic eth_signTypedData_v4 extension");
let signature = EthTransactions::sign_typed_data(&self.eth_api, &data, from)
.map_err(|err| err.into())?;
let signature = alloy_primitives::hex::encode(signature);
Ok(format!("0x{signature}"))
}
/// Handler for: `eth_simulateV1`
async fn simulate_v1(
&self,
payload: SeismicSimulatePayload<SeismicCallRequest>,
block_number: Option<BlockId>,
) -> RpcResult<Vec<SimulatedBlock<RpcBlock<Eth::NetworkTypes>>>> {
debug!(target: "reth-seismic-rpc::eth", "Serving seismic eth_simulateV1 extension");
let seismic_sim_blocks: Vec<SeismicSimBlock<SeismicCallRequest>> =
payload.block_state_calls.clone();
// Recover EthSimBlocks from the SeismicSimulatePayload<SeismicCallRequest>
let mut eth_simulated_blocks: Vec<
EthSimBlock<<Eth::NetworkTypes as RpcTypes>::TransactionRequest>,
> = Vec::with_capacity(payload.block_state_calls.len());
for block in payload.block_state_calls {
let SeismicSimBlock { block_overrides, state_overrides, calls } = block;
let mut prepared_calls = Vec::with_capacity(calls.len());
for call in calls {
let seismic_tx_request = convert_seismic_call_to_tx_request(call)?;
let seismic_tx_request = seismic_tx_request
.plaintext_copy(&self.purpose_keys.tx_io_sk)
.map_err(|e| ext_decryption_error(e.to_string()))?;
let tx_request: TransactionRequest = seismic_tx_request.inner;
prepared_calls.push(tx_request.into());
}
let prepared_block =
EthSimBlock { block_overrides, state_overrides, calls: prepared_calls };
eth_simulated_blocks.push(prepared_block);
}
// Call Eth simulate_v1, which only takes EthSimPayload/EthSimBlock
let mut result = EthCall::simulate_v1(
&self.eth_api,
EthSimulatePayload {
block_state_calls: eth_simulated_blocks.clone(),
trace_transfers: payload.trace_transfers,
validation: payload.validation,
return_full_transactions: payload.return_full_transactions,
},
block_number,
)
.await?;
// Convert Eth Blocks back to Seismic blocks
for (block, result) in seismic_sim_blocks.iter().zip(result.iter_mut()) {
let SeismicSimBlock::<SeismicCallRequest> { calls, .. } = block;
let SimulatedBlock { calls: call_results, .. } = result;
for (call_result, call) in call_results.iter_mut().zip(calls.iter()) {
let seismic_tx_request = convert_seismic_call_to_tx_request(call.clone())?;
if let Some(seismic_elements) = seismic_tx_request.seismic_elements {
// if there are seismic elements, encrypt the output
let encrypted_output = seismic_elements
.encrypt(&self.purpose_keys.tx_io_sk, &call_result.return_data)
.map_err(|e| ext_encryption_error(e.to_string()))?;
call_result.return_data = encrypted_output;
}
}
}
Ok(result)
}
/// Handler for: `eth_call`
async fn call(
&self,
request: SeismicCallRequest,
block_number: Option<BlockId>,
state_overrides: Option<StateOverride>,
block_overrides: Option<Box<BlockOverrides>>,
) -> RpcResult<Bytes> {
debug!(target: "reth-seismic-rpc::eth", ?request, ?block_number, ?state_overrides, ?block_overrides, "Serving seismic eth_call extension");
// process different CallRequest types
let seismic_tx_request = convert_seismic_call_to_tx_request(request)?;
// decrypt seismic elements
let tx_request = seismic_tx_request
.plaintext_copy(&self.purpose_keys.tx_io_sk)
.map_err(|e| ext_decryption_error(e.to_string()))?
.inner;
// call inner
let result = EthCall::call(
&self.eth_api,
tx_request.into(),
block_number,
EvmOverrides::new(state_overrides, block_overrides),
)
.await?;
// encrypt result
if let Some(seismic_elements) = seismic_tx_request.seismic_elements {
return Ok(seismic_elements
.encrypt(&self.purpose_keys.tx_io_sk, &result)
.map_err(|e| ext_encryption_error(e.to_string()))?);
} else {
Ok(result)
}
}
/// Handler for: `eth_sendRawTransaction`
///
/// Directly calls the inner eth api without decryption
/// We do this so that it is encrypted in the tx pool, so it is encrypted in blocks
/// decryption during execution is handled by the [`SeismicBlockExecutor`]
async fn send_raw_transaction(&self, tx: SeismicRawTxRequest) -> RpcResult<B256> {
debug!(target: "reth-seismic-rpc::eth", ?tx, "Serving overridden eth_sendRawTransaction extension");
match tx {
SeismicRawTxRequest::Bytes(bytes) => {
Ok(EthTransactions::send_raw_transaction(&self.eth_api, bytes).await?)
}
SeismicRawTxRequest::TypedData(typed_data) => {
Ok(SeismicTransaction::send_typed_data_transaction(&self.eth_api, typed_data)
.await?)
}
}
}
async fn estimate_gas(
&self,
request: SeismicTransactionRequest,
block_number: Option<BlockId>,
state_override: Option<StateOverride>,
) -> RpcResult<U256> {
debug!(target: "reth-seismic-rpc::eth", ?request, ?block_number, ?state_override, "serving seismic eth_estimateGas extension");
// decrypt
let decrypted_req = request
.plaintext_copy(&self.purpose_keys.tx_io_sk)
.map_err(|e| ext_decryption_error(e.to_string()))?;
// call inner
Ok(EthCall::estimate_gas_at(
&self.eth_api,
decrypted_req.inner.into(),
block_number.unwrap_or_default(),
state_override,
)
.await?)
}
}
/// Creates a EthApiError that says that seismic decryption failed
pub fn ext_decryption_error(e_str: String) -> EthApiError {
EthApiError::Other(Box::new(jsonrpsee_types::ErrorObject::owned(
-32000, // TODO: pick a better error code?
"Error Decrypting in Seismic EthApiExt",
Some(e_str),
)))
}
/// Creates a EthApiError that says that seismic encryption failed
pub fn ext_encryption_error(e_str: String) -> EthApiError {
EthApiError::Other(Box::new(jsonrpsee_types::ErrorObject::owned(
-32000, // TODO: pick a better error code?
"Error Encrypting in Seismic EthApiExt",
Some(e_str),
)))
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/seismic/rpc/src/eth/call.rs | crates/seismic/rpc/src/eth/call.rs | use crate::{SeismicEthApi, SeismicEthApiError};
use alloy_consensus::transaction::Either;
use alloy_eips::eip7702::{RecoveredAuthorization, SignedAuthorization};
use alloy_primitives::{TxKind, U256};
use alloy_rpc_types_eth::transaction::TransactionRequest;
use reth_evm::{EvmEnv, SpecFor, TxEnvFor};
use reth_rpc_eth_api::{
helpers::{estimate::EstimateCall, Call, EthCall},
CallFees, EthTxEnvError, FromEthApiError, FromEvmError, IntoEthApiError, RpcConvert,
RpcNodeCore, RpcTxReq,
};
use reth_rpc_eth_types::{EthApiError, RpcInvalidTransactionError};
use revm::{context::TxEnv, context_interface::Block, Database};
use seismic_alloy_consensus::SeismicTxType;
use seismic_revm::{self, transaction::abstraction::RngMode, SeismicTransaction};
impl<N, Rpc> EthCall for SeismicEthApi<N, Rpc>
where
N: RpcNodeCore,
SeismicEthApiError: FromEvmError<N::Evm>,
TxEnvFor<N::Evm>: From<SeismicTransaction<TxEnv>>,
SeismicTransaction<TxEnv>: Into<TxEnvFor<N::Evm>>,
Rpc: RpcConvert<
Primitives = N::Primitives,
Error = SeismicEthApiError,
TxEnv = TxEnvFor<N::Evm>,
Spec = SpecFor<N::Evm>,
>,
{
}
impl<N, Rpc> EstimateCall for SeismicEthApi<N, Rpc>
where
Self: Call,
Self::Error: From<EthApiError>,
N: RpcNodeCore,
Rpc: RpcConvert<
Primitives = N::Primitives,
Error = SeismicEthApiError,
TxEnv = TxEnvFor<N::Evm>,
Spec = SpecFor<N::Evm>,
>,
{
}
impl<N, Rpc> Call for SeismicEthApi<N, Rpc>
where
N: RpcNodeCore,
SeismicEthApiError: FromEvmError<N::Evm>,
TxEnvFor<N::Evm>: From<SeismicTransaction<TxEnv>>,
SeismicTransaction<TxEnv>: Into<TxEnvFor<N::Evm>>,
Rpc: RpcConvert<
Primitives = N::Primitives,
Error = SeismicEthApiError,
TxEnv = TxEnvFor<N::Evm>,
Spec = SpecFor<N::Evm>,
>,
{
#[inline]
fn call_gas_limit(&self) -> u64 {
self.inner.gas_cap()
}
#[inline]
fn max_simulate_blocks(&self) -> u64 {
self.inner.max_simulate_blocks()
}
fn create_txn_env(
&self,
evm_env: &EvmEnv<SpecFor<Self::Evm>>,
request: RpcTxReq<Rpc::Network>,
mut db: impl Database<Error: Into<EthApiError>>,
) -> Result<TxEnvFor<N::Evm>, Self::Error> {
// Convert network request to concrete TransactionRequest
let request: &TransactionRequest = request.as_ref();
// Ensure that if versioned hashes are set, they're not empty
if request.blob_versioned_hashes.as_ref().is_some_and(|hashes| hashes.is_empty()) {
return Err(RpcInvalidTransactionError::BlobTransactionMissingBlobHashes.into_eth_err());
}
let tx_type = if request.authorization_list.is_some() {
SeismicTxType::Eip7702
} else if request.max_fee_per_gas.is_some() || request.max_priority_fee_per_gas.is_some() {
SeismicTxType::Eip1559
} else if request.access_list.is_some() {
SeismicTxType::Eip2930
} else {
SeismicTxType::Seismic
} as u8;
let TransactionRequest {
from,
to,
gas_price,
max_fee_per_gas,
max_priority_fee_per_gas,
gas,
value,
input,
nonce,
access_list,
chain_id,
blob_versioned_hashes,
max_fee_per_blob_gas,
authorization_list,
transaction_type: _,
sidecar: _,
} = request;
let CallFees { max_priority_fee_per_gas, gas_price, max_fee_per_blob_gas } =
CallFees::ensure_fees(
gas_price.map(U256::from),
max_fee_per_gas.map(U256::from),
max_priority_fee_per_gas.map(U256::from),
U256::from(evm_env.block_env.basefee),
blob_versioned_hashes.as_deref(),
max_fee_per_blob_gas.map(U256::from),
evm_env.block_env.blob_gasprice().map(U256::from),
)
.map_err(|e| EthTxEnvError::CallFees(e))?;
let gas_limit = gas.unwrap_or(
// Use maximum allowed gas limit. The reason for this
// is that both Erigon and Geth use pre-configured gas cap even if
// it's possible to derive the gas limit from the block:
// <https://github.com/ledgerwatch/erigon/blob/eae2d9a79cb70dbe30b3a6b79c436872e4605458/cmd/rpcdaemon/commands/trace_adhoc.go#L956
// https://github.com/ledgerwatch/erigon/blob/eae2d9a79cb70dbe30b3a6b79c436872e4605458/eth/ethconfig/config.go#L94>
evm_env.block_env.gas_limit,
);
let chain_id = chain_id.unwrap_or(evm_env.cfg_env.chain_id);
let caller = from.unwrap_or_default();
let nonce = if let Some(nonce) = nonce {
*nonce
} else {
db.basic(caller).map_err(Into::into)?.map(|acc| acc.nonce).unwrap_or_default()
};
let authorization_list: Vec<Either<SignedAuthorization, RecoveredAuthorization>> =
authorization_list
.clone()
.unwrap_or_default()
.iter()
.map(|auth| Either::Left(auth.clone()))
.collect();
let env = TxEnv {
tx_type,
gas_limit,
nonce,
caller,
gas_price: gas_price.saturating_to(),
gas_priority_fee: max_priority_fee_per_gas.map(|v| v.saturating_to()),
kind: to.unwrap_or(TxKind::Create),
value: value.unwrap_or_default(),
data: input
.clone()
.try_into_unique_input()
.map_err(Self::Error::from_eth_err)?
.unwrap_or_default(),
chain_id: Some(chain_id),
access_list: access_list.clone().unwrap_or_default(),
// EIP-4844 fields
blob_hashes: blob_versioned_hashes.clone().unwrap_or_default(),
max_fee_per_blob_gas: max_fee_per_blob_gas
.map(|v| v.saturating_to())
.unwrap_or_default(),
// EIP-7702 fields
authorization_list,
};
tracing::debug!("reth-seismic-rpc::eth create_txn_env {:?}", env);
Ok(SeismicTransaction {
base: env,
tx_hash: Default::default(),
rng_mode: RngMode::Simulation,
}
.into())
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/seismic/rpc/src/eth/block.rs | crates/seismic/rpc/src/eth/block.rs | //! Loads and formats Seismic block RPC response.
use crate::{SeismicEthApi, SeismicEthApiError};
use reth_rpc_eth_api::{
helpers::{EthBlocks, LoadBlock},
FromEvmError, RpcConvert, RpcNodeCore,
};
impl<N, Rpc> EthBlocks for SeismicEthApi<N, Rpc>
where
N: RpcNodeCore,
SeismicEthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<Primitives = N::Primitives, Error = SeismicEthApiError>,
{
}
impl<N, Rpc> LoadBlock for SeismicEthApi<N, Rpc>
where
N: RpcNodeCore,
SeismicEthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<Primitives = N::Primitives, Error = SeismicEthApiError>,
{
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/seismic/rpc/src/eth/receipt.rs | crates/seismic/rpc/src/eth/receipt.rs | //! Loads and formats Seismic receipt RPC response.
use reth_rpc_convert::transaction::{ConvertReceiptInput, ReceiptConverter};
use reth_rpc_eth_api::{helpers::LoadReceipt, RpcConvert, RpcNodeCore};
use reth_rpc_eth_types::{receipt::build_receipt, EthApiError};
use reth_seismic_primitives::{SeismicPrimitives, SeismicReceipt};
use seismic_alloy_consensus::SeismicReceiptEnvelope;
use seismic_alloy_rpc_types::SeismicTransactionReceipt;
use std::fmt::Debug;
use crate::{SeismicEthApi, SeismicEthApiError};
impl<N, Rpc> LoadReceipt for SeismicEthApi<N, Rpc>
where
N: RpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives, Error = SeismicEthApiError>,
{
}
/// Builds an [`SeismicTransactionReceipt`].
///
/// Like [`EthReceiptBuilder`], but with Seismic types
#[derive(Debug)]
pub struct SeismicReceiptBuilder {
/// The base response body, contains L1 fields.
pub base: SeismicTransactionReceipt,
}
impl SeismicReceiptBuilder {
/// Returns a new builder.
pub fn new(input: ConvertReceiptInput<'_, SeismicPrimitives>) -> Result<Self, EthApiError> {
let base = build_receipt(&input, None, |receipt_with_bloom| match input.receipt.as_ref() {
SeismicReceipt::Legacy(_) => SeismicReceiptEnvelope::Legacy(receipt_with_bloom),
SeismicReceipt::Eip2930(_) => SeismicReceiptEnvelope::Eip2930(receipt_with_bloom),
SeismicReceipt::Eip1559(_) => SeismicReceiptEnvelope::Eip1559(receipt_with_bloom),
SeismicReceipt::Eip7702(_) => SeismicReceiptEnvelope::Eip7702(receipt_with_bloom),
SeismicReceipt::Seismic(_) => SeismicReceiptEnvelope::Seismic(receipt_with_bloom),
#[allow(unreachable_patterns)]
_ => unreachable!(),
});
Ok(Self { base })
}
/// Builds [`SeismicTransactionReceipt`] by combing core (l1) receipt fields and additional
/// Seismic receipt fields.
pub fn build(self) -> SeismicTransactionReceipt {
self.base
}
}
/// Seismic receipt converter.
#[derive(Debug, Clone)]
pub struct SeismicReceiptConverter;
impl SeismicReceiptConverter {
/// Creates a new seismic receipt converter.
pub const fn new() -> Self {
Self
}
}
impl ReceiptConverter<SeismicPrimitives> for SeismicReceiptConverter {
type Error = SeismicEthApiError;
type RpcReceipt = SeismicTransactionReceipt;
fn convert_receipts(
&self,
inputs: Vec<ConvertReceiptInput<'_, SeismicPrimitives>>,
) -> Result<Vec<Self::RpcReceipt>, Self::Error> {
inputs
.into_iter()
.map(|input| {
SeismicReceiptBuilder::new(input)
.map_err(SeismicEthApiError::Eth)
.map(|builder| builder.build())
})
.collect()
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/seismic/rpc/src/eth/api.rs | crates/seismic/rpc/src/eth/api.rs | //! Seismic extension of API traits
use reth_rpc_eth_api::{
helpers::{EthApiSpec, EthBlocks, EthCall, EthFees, EthState, LoadReceipt, Trace},
FullEthApiTypes,
};
use super::ext::SeismicTransaction;
/// Helper trait to unify all `eth` rpc server building block traits, for simplicity.
///
/// This trait is automatically implemented for any type that implements all the `Eth` traits.
pub trait FullSeismicApi:
FullEthApiTypes
+ EthApiSpec
+ SeismicTransaction
+ EthBlocks
+ EthState
+ EthCall
+ EthFees
+ Trace
+ LoadReceipt
{
}
impl<T> FullSeismicApi for T where
T: FullEthApiTypes
+ EthApiSpec
+ SeismicTransaction
+ EthBlocks
+ EthState
+ EthCall
+ EthFees
+ Trace
+ LoadReceipt
{
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.