repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/evm/src/build.rs | crates/optimism/evm/src/build.rs | use alloc::sync::Arc;
use alloy_consensus::{
constants::EMPTY_WITHDRAWALS, proofs, Block, BlockBody, Header, TxReceipt,
EMPTY_OMMER_ROOT_HASH,
};
use alloy_eips::{eip7685::EMPTY_REQUESTS_HASH, merge::BEACON_NONCE};
use alloy_evm::block::BlockExecutorFactory;
use alloy_op_evm::OpBlockExecutionCtx;
use alloy_primitives::logs_bloom;
use reth_evm::execute::{BlockAssembler, BlockAssemblerInput};
use reth_execution_errors::BlockExecutionError;
use reth_execution_types::BlockExecutionResult;
use reth_optimism_consensus::{calculate_receipt_root_no_memo_optimism, isthmus};
use reth_optimism_forks::OpHardforks;
use reth_optimism_primitives::DepositReceipt;
use reth_primitives_traits::{Receipt, SignedTransaction};
/// Block builder for Optimism.
#[derive(Debug)]
pub struct OpBlockAssembler<ChainSpec> {
chain_spec: Arc<ChainSpec>,
}
impl<ChainSpec> OpBlockAssembler<ChainSpec> {
/// Creates a new [`OpBlockAssembler`].
pub const fn new(chain_spec: Arc<ChainSpec>) -> Self {
Self { chain_spec }
}
}
impl<ChainSpec: OpHardforks> OpBlockAssembler<ChainSpec> {
/// Builds a block for `input` without any bounds on header `H`.
pub fn assemble_block<
F: for<'a> BlockExecutorFactory<
ExecutionCtx<'a>: Into<OpBlockExecutionCtx>,
Transaction: SignedTransaction,
Receipt: Receipt + DepositReceipt,
>,
H,
>(
&self,
input: BlockAssemblerInput<'_, '_, F, H>,
) -> Result<Block<F::Transaction>, BlockExecutionError> {
let BlockAssemblerInput {
evm_env,
execution_ctx: ctx,
transactions,
output: BlockExecutionResult { receipts, gas_used, .. },
bundle_state,
state_root,
state_provider,
..
} = input;
let ctx = ctx.into();
let timestamp = evm_env.block_env.timestamp.saturating_to();
let transactions_root = proofs::calculate_transaction_root(&transactions);
let receipts_root =
calculate_receipt_root_no_memo_optimism(receipts, &self.chain_spec, timestamp);
let logs_bloom = logs_bloom(receipts.iter().flat_map(|r| r.logs()));
let mut requests_hash = None;
let withdrawals_root = if self.chain_spec.is_isthmus_active_at_timestamp(timestamp) {
// always empty requests hash post isthmus
requests_hash = Some(EMPTY_REQUESTS_HASH);
// withdrawals root field in block header is used for storage root of L2 predeploy
// `l2tol1-message-passer`
Some(
isthmus::withdrawals_root(bundle_state, state_provider)
.map_err(BlockExecutionError::other)?,
)
} else if self.chain_spec.is_canyon_active_at_timestamp(timestamp) {
Some(EMPTY_WITHDRAWALS)
} else {
None
};
let (excess_blob_gas, blob_gas_used) =
if self.chain_spec.is_ecotone_active_at_timestamp(timestamp) {
(Some(0), Some(0))
} else {
(None, None)
};
let header = Header {
parent_hash: ctx.parent_hash,
ommers_hash: EMPTY_OMMER_ROOT_HASH,
beneficiary: evm_env.block_env.beneficiary,
state_root,
transactions_root,
receipts_root,
withdrawals_root,
logs_bloom,
timestamp,
mix_hash: evm_env.block_env.prevrandao.unwrap_or_default(),
nonce: BEACON_NONCE.into(),
base_fee_per_gas: Some(evm_env.block_env.basefee),
number: evm_env.block_env.number.saturating_to(),
gas_limit: evm_env.block_env.gas_limit,
difficulty: evm_env.block_env.difficulty,
gas_used: *gas_used,
extra_data: ctx.extra_data,
parent_beacon_block_root: ctx.parent_beacon_block_root,
blob_gas_used,
excess_blob_gas,
requests_hash,
};
Ok(Block::new(
header,
BlockBody {
transactions,
ommers: Default::default(),
withdrawals: self
.chain_spec
.is_canyon_active_at_timestamp(timestamp)
.then(Default::default),
},
))
}
}
impl<ChainSpec> Clone for OpBlockAssembler<ChainSpec> {
fn clone(&self) -> Self {
Self { chain_spec: self.chain_spec.clone() }
}
}
impl<F, ChainSpec> BlockAssembler<F> for OpBlockAssembler<ChainSpec>
where
ChainSpec: OpHardforks,
F: for<'a> BlockExecutorFactory<
ExecutionCtx<'a> = OpBlockExecutionCtx,
Transaction: SignedTransaction,
Receipt: Receipt + DepositReceipt,
>,
{
type Block = Block<F::Transaction>;
fn assemble_block(
&self,
input: BlockAssemblerInput<'_, '_, F>,
) -> Result<Self::Block, BlockExecutionError> {
self.assemble_block(input)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/evm/src/receipts.rs | crates/optimism/evm/src/receipts.rs | use alloy_consensus::{Eip658Value, Receipt};
use alloy_evm::eth::receipt_builder::ReceiptBuilderCtx;
use alloy_op_evm::block::receipt_builder::OpReceiptBuilder;
use op_alloy_consensus::{OpDepositReceipt, OpTxType};
use reth_evm::Evm;
use reth_optimism_primitives::{OpReceipt, OpTransactionSigned};
/// A builder that operates on op-reth primitive types, specifically [`OpTransactionSigned`] and
/// [`OpReceipt`].
#[derive(Debug, Default, Clone, Copy)]
#[non_exhaustive]
pub struct OpRethReceiptBuilder;
impl OpReceiptBuilder for OpRethReceiptBuilder {
type Transaction = OpTransactionSigned;
type Receipt = OpReceipt;
fn build_receipt<'a, E: Evm>(
&self,
ctx: ReceiptBuilderCtx<'a, OpTransactionSigned, E>,
) -> Result<Self::Receipt, ReceiptBuilderCtx<'a, OpTransactionSigned, E>> {
match ctx.tx.tx_type() {
OpTxType::Deposit => Err(ctx),
ty => {
let receipt = Receipt {
// Success flag was added in `EIP-658: Embedding transaction status code in
// receipts`.
status: Eip658Value::Eip658(ctx.result.is_success()),
cumulative_gas_used: ctx.cumulative_gas_used,
logs: ctx.result.into_logs(),
};
Ok(match ty {
OpTxType::Legacy => OpReceipt::Legacy(receipt),
OpTxType::Eip1559 => OpReceipt::Eip1559(receipt),
OpTxType::Eip2930 => OpReceipt::Eip2930(receipt),
OpTxType::Eip7702 => OpReceipt::Eip7702(receipt),
OpTxType::Deposit => unreachable!(),
})
}
}
}
fn build_deposit_receipt(&self, inner: OpDepositReceipt) -> Self::Receipt {
OpReceipt::Deposit(inner)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/storage/src/lib.rs | crates/optimism/storage/src/lib.rs | //! Standalone crate for Optimism-Storage Reth.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![cfg_attr(not(feature = "std"), no_std)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
extern crate alloc;
mod chain;
pub use chain::OpStorage;
#[cfg(test)]
mod tests {
use reth_codecs::{test_utils::UnusedBits, validate_bitflag_backwards_compat};
use reth_db_api::models::{
CompactClientVersion, CompactU256, CompactU64, StoredBlockBodyIndices,
StoredBlockWithdrawals,
};
use reth_primitives_traits::Account;
use reth_prune_types::{PruneCheckpoint, PruneMode, PruneSegment};
use reth_stages_types::{
AccountHashingCheckpoint, CheckpointBlockRange, EntitiesCheckpoint, ExecutionCheckpoint,
HeadersCheckpoint, IndexHistoryCheckpoint, StageCheckpoint, StageUnitCheckpoint,
StorageHashingCheckpoint,
};
#[test]
fn test_ensure_backwards_compatibility() {
assert_eq!(Account::bitflag_encoded_bytes(), 2);
assert_eq!(AccountHashingCheckpoint::bitflag_encoded_bytes(), 1);
assert_eq!(CheckpointBlockRange::bitflag_encoded_bytes(), 1);
assert_eq!(CompactClientVersion::bitflag_encoded_bytes(), 0);
assert_eq!(CompactU256::bitflag_encoded_bytes(), 1);
assert_eq!(CompactU64::bitflag_encoded_bytes(), 1);
assert_eq!(EntitiesCheckpoint::bitflag_encoded_bytes(), 1);
assert_eq!(ExecutionCheckpoint::bitflag_encoded_bytes(), 0);
assert_eq!(HeadersCheckpoint::bitflag_encoded_bytes(), 0);
assert_eq!(IndexHistoryCheckpoint::bitflag_encoded_bytes(), 0);
assert_eq!(PruneCheckpoint::bitflag_encoded_bytes(), 1);
assert_eq!(PruneMode::bitflag_encoded_bytes(), 1);
assert_eq!(PruneSegment::bitflag_encoded_bytes(), 1);
assert_eq!(StageCheckpoint::bitflag_encoded_bytes(), 1);
assert_eq!(StageUnitCheckpoint::bitflag_encoded_bytes(), 1);
assert_eq!(StoredBlockBodyIndices::bitflag_encoded_bytes(), 1);
assert_eq!(StoredBlockWithdrawals::bitflag_encoded_bytes(), 0);
assert_eq!(StorageHashingCheckpoint::bitflag_encoded_bytes(), 1);
// In case of failure, refer to the documentation of the
// [`validate_bitflag_backwards_compat`] macro for detailed instructions on handling
// it.
validate_bitflag_backwards_compat!(Account, UnusedBits::NotZero);
validate_bitflag_backwards_compat!(AccountHashingCheckpoint, UnusedBits::NotZero);
validate_bitflag_backwards_compat!(CheckpointBlockRange, UnusedBits::Zero);
validate_bitflag_backwards_compat!(CompactClientVersion, UnusedBits::Zero);
validate_bitflag_backwards_compat!(CompactU256, UnusedBits::NotZero);
validate_bitflag_backwards_compat!(CompactU64, UnusedBits::NotZero);
validate_bitflag_backwards_compat!(EntitiesCheckpoint, UnusedBits::Zero);
validate_bitflag_backwards_compat!(ExecutionCheckpoint, UnusedBits::Zero);
validate_bitflag_backwards_compat!(HeadersCheckpoint, UnusedBits::Zero);
validate_bitflag_backwards_compat!(IndexHistoryCheckpoint, UnusedBits::Zero);
validate_bitflag_backwards_compat!(PruneCheckpoint, UnusedBits::NotZero);
validate_bitflag_backwards_compat!(PruneMode, UnusedBits::Zero);
validate_bitflag_backwards_compat!(PruneSegment, UnusedBits::Zero);
validate_bitflag_backwards_compat!(StageCheckpoint, UnusedBits::NotZero);
validate_bitflag_backwards_compat!(StageUnitCheckpoint, UnusedBits::Zero);
validate_bitflag_backwards_compat!(StoredBlockBodyIndices, UnusedBits::Zero);
validate_bitflag_backwards_compat!(StoredBlockWithdrawals, UnusedBits::Zero);
validate_bitflag_backwards_compat!(StorageHashingCheckpoint, UnusedBits::NotZero);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/storage/src/chain.rs | crates/optimism/storage/src/chain.rs | use alloc::{vec, vec::Vec};
use alloy_consensus::{BlockBody, Header};
use alloy_primitives::BlockNumber;
use core::marker::PhantomData;
use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardforks};
use reth_db_api::transaction::{DbTx, DbTxMut};
use reth_node_api::{FullNodePrimitives, FullSignedTx};
use reth_optimism_primitives::OpTransactionSigned;
use reth_primitives_traits::{Block, FullBlockHeader, SignedTransaction};
use reth_provider::{
providers::{ChainStorage, NodeTypesForProvider},
DatabaseProvider,
};
use reth_storage_api::{
errors::ProviderResult, BlockBodyReader, BlockBodyWriter, ChainStorageReader,
ChainStorageWriter, DBProvider, ReadBodyInput, StorageLocation,
};
/// Optimism storage implementation.
#[derive(Debug, Clone, Copy)]
pub struct OpStorage<T = OpTransactionSigned, H = Header>(PhantomData<(T, H)>);
impl<T, H> Default for OpStorage<T, H> {
fn default() -> Self {
Self(Default::default())
}
}
impl<N, T, H> ChainStorage<N> for OpStorage<T, H>
where
T: FullSignedTx,
H: FullBlockHeader,
N: FullNodePrimitives<
Block = alloy_consensus::Block<T, H>,
BlockHeader = H,
BlockBody = alloy_consensus::BlockBody<T, H>,
SignedTx = T,
>,
{
fn reader<TX, Types>(&self) -> impl ChainStorageReader<DatabaseProvider<TX, Types>, N>
where
TX: DbTx + 'static,
Types: NodeTypesForProvider<Primitives = N>,
{
self
}
fn writer<TX, Types>(&self) -> impl ChainStorageWriter<DatabaseProvider<TX, Types>, N>
where
TX: DbTxMut + DbTx + 'static,
Types: NodeTypesForProvider<Primitives = N>,
{
self
}
}
impl<Provider, T, H> BlockBodyWriter<Provider, alloy_consensus::BlockBody<T, H>> for OpStorage<T, H>
where
Provider: DBProvider<Tx: DbTxMut>,
T: SignedTransaction,
H: FullBlockHeader,
{
fn write_block_bodies(
&self,
_provider: &Provider,
_bodies: Vec<(u64, Option<alloy_consensus::BlockBody<T, H>>)>,
_write_to: StorageLocation,
) -> ProviderResult<()> {
// noop
Ok(())
}
fn remove_block_bodies_above(
&self,
_provider: &Provider,
_block: BlockNumber,
_remove_from: StorageLocation,
) -> ProviderResult<()> {
// noop
Ok(())
}
}
impl<Provider, T, H> BlockBodyReader<Provider> for OpStorage<T, H>
where
Provider: ChainSpecProvider<ChainSpec: EthChainSpec + EthereumHardforks> + DBProvider,
T: SignedTransaction,
H: FullBlockHeader,
{
type Block = alloy_consensus::Block<T, H>;
fn read_block_bodies(
&self,
provider: &Provider,
inputs: Vec<ReadBodyInput<'_, Self::Block>>,
) -> ProviderResult<Vec<<Self::Block as Block>::Body>> {
let chain_spec = provider.chain_spec();
Ok(inputs
.into_iter()
.map(|(header, transactions)| BlockBody {
transactions,
ommers: vec![],
// after shanghai the body should have an empty withdrawals list
withdrawals: chain_spec
.is_shanghai_active_at_timestamp(header.timestamp())
.then(Default::default),
})
.collect())
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/reth/src/lib.rs | crates/optimism/reth/src/lib.rs | //! Optimism meta crate that provides access to commonly used reth dependencies.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![cfg_attr(not(feature = "std"), no_std)]
#![allow(unused_crate_dependencies)]
/// Re-exported optimism types
#[doc(inline)]
pub use reth_optimism_primitives::*;
/// Re-exported reth primitives
pub mod primitives {
#[doc(inline)]
pub use reth_primitives_traits::*;
}
/// Re-exported cli types
#[cfg(feature = "cli")]
pub mod cli {
#[doc(inline)]
pub use reth_cli_util::*;
#[doc(inline)]
pub use reth_optimism_cli::*;
}
/// Re-exported pool types
#[cfg(feature = "pool")]
pub use reth_transaction_pool as pool;
/// Re-exported consensus types
#[cfg(feature = "consensus")]
pub mod consensus {
#[doc(inline)]
pub use reth_consensus::*;
/// Consensus rule checks.
pub mod validation {
#[doc(inline)]
pub use reth_consensus_common::validation::*;
#[doc(inline)]
pub use reth_optimism_consensus::validation::*;
}
}
/// Re-exported from `reth_chainspec`
#[allow(ambiguous_glob_reexports)]
pub mod chainspec {
#[doc(inline)]
pub use reth_chainspec::*;
#[doc(inline)]
pub use reth_optimism_chainspec::*;
}
/// Re-exported evm types
#[cfg(feature = "evm")]
pub mod evm {
#[doc(inline)]
pub use reth_optimism_evm::*;
#[doc(inline)]
pub use reth_evm as primitives;
#[doc(inline)]
pub use reth_revm as revm;
}
/// Re-exported exex types
#[cfg(feature = "exex")]
pub use reth_exex as exex;
/// Re-exported from `tasks`.
#[cfg(feature = "tasks")]
pub mod tasks {
pub use reth_tasks::*;
}
/// Re-exported reth network types
#[cfg(feature = "network")]
pub mod network {
#[doc(inline)]
pub use reth_eth_wire as eth_wire;
#[doc(inline)]
pub use reth_network::*;
#[doc(inline)]
pub use reth_network_api as api;
}
/// Re-exported reth provider types
#[cfg(feature = "provider")]
pub mod provider {
#[doc(inline)]
pub use reth_provider::*;
#[doc(inline)]
pub use reth_db as db;
}
/// Re-exported codec crate
#[cfg(feature = "provider")]
pub use reth_codecs as codec;
/// Re-exported reth storage api types
#[cfg(feature = "storage-api")]
pub mod storage {
#[doc(inline)]
pub use reth_storage_api::*;
}
/// Re-exported optimism node
#[cfg(feature = "node-api")]
pub mod node {
#[doc(inline)]
pub use reth_node_api as api;
#[cfg(feature = "node")]
pub use reth_node_builder as builder;
#[doc(inline)]
pub use reth_node_core as core;
#[cfg(feature = "node")]
pub use reth_optimism_node::*;
}
/// Re-exported engine types
#[cfg(feature = "node")]
pub mod engine {
#[doc(inline)]
pub use reth_engine_local as local;
#[doc(inline)]
pub use reth_optimism_node::engine::*;
}
/// Re-exported reth trie types
#[cfg(feature = "trie")]
pub mod trie {
#[doc(inline)]
pub use reth_trie::*;
#[cfg(feature = "trie-db")]
#[doc(inline)]
pub use reth_trie_db::*;
}
/// Re-exported rpc types
#[cfg(feature = "rpc")]
pub mod rpc {
#[doc(inline)]
pub use reth_optimism_rpc::*;
#[doc(inline)]
pub use reth_rpc::*;
#[doc(inline)]
pub use reth_rpc_api as api;
#[doc(inline)]
pub use reth_rpc_builder as builder;
#[doc(inline)]
pub use reth_rpc_eth_types as eth;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/txpool/src/lib.rs | crates/optimism/txpool/src/lib.rs | //! OP-Reth Transaction pool.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
mod validator;
pub use validator::{OpL1BlockInfo, OpTransactionValidator};
pub mod conditional;
pub mod supervisor;
mod transaction;
pub use transaction::{OpPooledTransaction, OpPooledTx};
mod error;
pub mod interop;
pub mod maintain;
pub use error::InvalidCrossTx;
pub mod estimated_da_size;
use reth_transaction_pool::{CoinbaseTipOrdering, Pool, TransactionValidationTaskExecutor};
/// Type alias for default optimism transaction pool
pub type OpTransactionPool<Client, S, T = OpPooledTransaction> = Pool<
TransactionValidationTaskExecutor<OpTransactionValidator<Client, T>>,
CoinbaseTipOrdering<T>,
S,
>;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/txpool/src/error.rs | crates/optimism/txpool/src/error.rs | use crate::supervisor::InteropTxValidatorError;
use reth_transaction_pool::error::PoolTransactionError;
use std::any::Any;
/// Wrapper for [`InteropTxValidatorError`] to implement [`PoolTransactionError`] for it.
#[derive(thiserror::Error, Debug)]
pub enum InvalidCrossTx {
/// Errors produced by supervisor validation
#[error(transparent)]
ValidationError(#[from] InteropTxValidatorError),
/// Error cause by cross chain tx during not active interop hardfork
#[error("cross chain tx is invalid before interop")]
CrossChainTxPreInterop,
}
impl PoolTransactionError for InvalidCrossTx {
fn is_bad_transaction(&self) -> bool {
match self {
Self::ValidationError(_) => false,
Self::CrossChainTxPreInterop => true,
}
}
fn as_any(&self) -> &dyn Any {
self
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/txpool/src/maintain.rs | crates/optimism/txpool/src/maintain.rs | //! Support for maintaining the state of the transaction pool
/// The interval for which we check transaction against supervisor, 10 min.
const TRANSACTION_VALIDITY_WINDOW: u64 = 600;
/// Interval in seconds at which the transaction should be revalidated.
const OFFSET_TIME: u64 = 60;
/// Maximum number of supervisor requests at the same time
const MAX_SUPERVISOR_QUERIES: usize = 10;
use crate::{
conditional::MaybeConditionalTransaction,
interop::{is_stale_interop, is_valid_interop, MaybeInteropTransaction},
supervisor::SupervisorClient,
};
use alloy_consensus::{conditional::BlockConditionalAttributes, BlockHeader};
use futures_util::{future::BoxFuture, FutureExt, Stream, StreamExt};
use metrics::{Gauge, Histogram};
use reth_chain_state::CanonStateNotification;
use reth_metrics::{metrics::Counter, Metrics};
use reth_primitives_traits::NodePrimitives;
use reth_transaction_pool::{error::PoolTransactionError, PoolTransaction, TransactionPool};
use std::time::Instant;
use tracing::warn;
/// Transaction pool maintenance metrics
#[derive(Metrics)]
#[metrics(scope = "transaction_pool")]
struct MaintainPoolConditionalMetrics {
/// Counter indicating the number of conditional transactions removed from
/// the pool because of exceeded block attributes.
removed_tx_conditional: Counter,
}
impl MaintainPoolConditionalMetrics {
#[inline]
fn inc_removed_tx_conditional(&self, count: usize) {
self.removed_tx_conditional.increment(count as u64);
}
}
/// Transaction pool maintenance metrics
#[derive(Metrics)]
#[metrics(scope = "transaction_pool")]
struct MaintainPoolInteropMetrics {
/// Counter indicating the number of conditional transactions removed from
/// the pool because of exceeded block attributes.
removed_tx_interop: Counter,
/// Number of interop transactions currently in the pool
pooled_interop_transactions: Gauge,
/// Counter for interop transactions that became stale and need revalidation
stale_interop_transactions: Counter,
// TODO: we also should add metric for (hash, counter) to check number of validation per tx
/// Histogram for measuring supervisor revalidation duration (congestion metric)
supervisor_revalidation_duration_seconds: Histogram,
}
impl MaintainPoolInteropMetrics {
#[inline]
fn inc_removed_tx_interop(&self, count: usize) {
self.removed_tx_interop.increment(count as u64);
}
#[inline]
fn set_interop_txs_in_pool(&self, count: usize) {
self.pooled_interop_transactions.set(count as f64);
}
#[inline]
fn inc_stale_tx_interop(&self, count: usize) {
self.stale_interop_transactions.increment(count as u64);
}
/// Record supervisor revalidation duration
#[inline]
fn record_supervisor_duration(&self, duration: std::time::Duration) {
self.supervisor_revalidation_duration_seconds.record(duration.as_secs_f64());
}
}
/// Returns a spawnable future for maintaining the state of the conditional txs in the transaction
/// pool.
pub fn maintain_transaction_pool_conditional_future<N, Pool, St>(
pool: Pool,
events: St,
) -> BoxFuture<'static, ()>
where
N: NodePrimitives,
Pool: TransactionPool + 'static,
Pool::Transaction: MaybeConditionalTransaction,
St: Stream<Item = CanonStateNotification<N>> + Send + Unpin + 'static,
{
async move {
maintain_transaction_pool_conditional(pool, events).await;
}
.boxed()
}
/// Maintains the state of the conditional tx in the transaction pool by handling new blocks and
/// reorgs.
///
/// This listens for any new blocks and reorgs and updates the conditional txs in the
/// transaction pool's state accordingly
pub async fn maintain_transaction_pool_conditional<N, Pool, St>(pool: Pool, mut events: St)
where
N: NodePrimitives,
Pool: TransactionPool,
Pool::Transaction: MaybeConditionalTransaction,
St: Stream<Item = CanonStateNotification<N>> + Send + Unpin + 'static,
{
let metrics = MaintainPoolConditionalMetrics::default();
loop {
let Some(event) = events.next().await else { break };
if let CanonStateNotification::Commit { new } = event {
let block_attr = BlockConditionalAttributes {
number: new.tip().number(),
timestamp: new.tip().timestamp(),
};
let mut to_remove = Vec::new();
for tx in &pool.pooled_transactions() {
if tx.transaction.has_exceeded_block_attributes(&block_attr) {
to_remove.push(*tx.hash());
}
}
if !to_remove.is_empty() {
let removed = pool.remove_transactions(to_remove);
metrics.inc_removed_tx_conditional(removed.len());
}
}
}
}
/// Returns a spawnable future for maintaining the state of the interop tx in the transaction pool.
pub fn maintain_transaction_pool_interop_future<N, Pool, St>(
pool: Pool,
events: St,
supervisor_client: SupervisorClient,
) -> BoxFuture<'static, ()>
where
N: NodePrimitives,
Pool: TransactionPool + 'static,
Pool::Transaction: MaybeInteropTransaction,
St: Stream<Item = CanonStateNotification<N>> + Send + Unpin + 'static,
{
async move {
maintain_transaction_pool_interop(pool, events, supervisor_client).await;
}
.boxed()
}
/// Maintains the state of the interop tx in the transaction pool by handling new blocks and reorgs.
///
/// This listens for any new blocks and reorgs and updates the interop tx in the transaction pool's
/// state accordingly
pub async fn maintain_transaction_pool_interop<N, Pool, St>(
pool: Pool,
mut events: St,
supervisor_client: SupervisorClient,
) where
N: NodePrimitives,
Pool: TransactionPool,
Pool::Transaction: MaybeInteropTransaction,
St: Stream<Item = CanonStateNotification<N>> + Send + Unpin + 'static,
{
let metrics = MaintainPoolInteropMetrics::default();
loop {
let Some(event) = events.next().await else { break };
if let CanonStateNotification::Commit { new } = event {
let timestamp = new.tip().timestamp();
let mut to_remove = Vec::new();
let mut to_revalidate = Vec::new();
let mut interop_count = 0;
// scan all pooled interop transactions
for pooled_tx in pool.pooled_transactions() {
if let Some(interop_deadline_val) = pooled_tx.transaction.interop_deadline() {
interop_count += 1;
if !is_valid_interop(interop_deadline_val, timestamp) {
to_remove.push(*pooled_tx.transaction.hash());
} else if is_stale_interop(interop_deadline_val, timestamp, OFFSET_TIME) {
to_revalidate.push(pooled_tx.transaction.clone());
}
}
}
metrics.set_interop_txs_in_pool(interop_count);
if !to_revalidate.is_empty() {
metrics.inc_stale_tx_interop(to_revalidate.len());
let revalidation_start = Instant::now();
let revalidation_stream = supervisor_client.revalidate_interop_txs_stream(
to_revalidate,
timestamp,
TRANSACTION_VALIDITY_WINDOW,
MAX_SUPERVISOR_QUERIES,
);
futures_util::pin_mut!(revalidation_stream);
while let Some((tx_item_from_stream, validation_result)) =
revalidation_stream.next().await
{
match validation_result {
Some(Ok(())) => {
tx_item_from_stream
.set_interop_deadline(timestamp + TRANSACTION_VALIDITY_WINDOW);
}
Some(Err(err)) => {
if err.is_bad_transaction() {
to_remove.push(*tx_item_from_stream.hash());
}
}
None => {
warn!(
target: "txpool",
hash = %tx_item_from_stream.hash(),
"Interop transaction no longer considered cross-chain during revalidation; removing."
);
to_remove.push(*tx_item_from_stream.hash());
}
}
}
metrics.record_supervisor_duration(revalidation_start.elapsed());
}
if !to_remove.is_empty() {
let removed = pool.remove_transactions(to_remove);
metrics.inc_removed_tx_interop(removed.len());
}
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/txpool/src/validator.rs | crates/optimism/txpool/src/validator.rs | use crate::{supervisor::SupervisorClient, InvalidCrossTx, OpPooledTx};
use alloy_consensus::{BlockHeader, Transaction};
use op_revm::L1BlockInfo;
use parking_lot::RwLock;
use reth_chainspec::ChainSpecProvider;
use reth_optimism_evm::RethL1BlockInfo;
use reth_optimism_forks::OpHardforks;
use reth_primitives_traits::{
transaction::error::InvalidTransactionError, Block, BlockBody, GotExpected, SealedBlock,
};
use reth_storage_api::{AccountInfoReader, BlockReaderIdExt, StateProviderFactory};
use reth_transaction_pool::{
error::InvalidPoolTransactionError, EthPoolTransaction, EthTransactionValidator,
TransactionOrigin, TransactionValidationOutcome, TransactionValidator,
};
use std::sync::{
atomic::{AtomicBool, AtomicU64, Ordering},
Arc,
};
/// The interval for which we check transaction against supervisor, 1 hour.
const TRANSACTION_VALIDITY_WINDOW_SECS: u64 = 3600;
/// Tracks additional infos for the current block.
#[derive(Debug, Default)]
pub struct OpL1BlockInfo {
/// The current L1 block info.
l1_block_info: RwLock<L1BlockInfo>,
/// Current block timestamp.
timestamp: AtomicU64,
/// Current block number.
number: AtomicU64,
}
impl OpL1BlockInfo {
/// Returns the most recent timestamp
pub fn timestamp(&self) -> u64 {
self.timestamp.load(Ordering::Relaxed)
}
}
/// Validator for Optimism transactions.
#[derive(Debug, Clone)]
pub struct OpTransactionValidator<Client, Tx> {
/// The type that performs the actual validation.
inner: Arc<EthTransactionValidator<Client, Tx>>,
/// Additional block info required for validation.
block_info: Arc<OpL1BlockInfo>,
/// If true, ensure that the transaction's sender has enough balance to cover the L1 gas fee
/// derived from the tracked L1 block info that is extracted from the first transaction in the
/// L2 block.
require_l1_data_gas_fee: bool,
/// Client used to check transaction validity with op-supervisor
supervisor_client: Option<SupervisorClient>,
/// tracks activated forks relevant for transaction validation
fork_tracker: Arc<OpForkTracker>,
}
impl<Client, Tx> OpTransactionValidator<Client, Tx> {
/// Returns the configured chain spec
pub fn chain_spec(&self) -> Arc<Client::ChainSpec>
where
Client: ChainSpecProvider,
{
self.inner.chain_spec()
}
/// Returns the configured client
pub fn client(&self) -> &Client {
self.inner.client()
}
/// Returns the current block timestamp.
fn block_timestamp(&self) -> u64 {
self.block_info.timestamp.load(Ordering::Relaxed)
}
/// Whether to ensure that the transaction's sender has enough balance to also cover the L1 gas
/// fee.
pub fn require_l1_data_gas_fee(self, require_l1_data_gas_fee: bool) -> Self {
Self { require_l1_data_gas_fee, ..self }
}
/// Returns whether this validator also requires the transaction's sender to have enough balance
/// to cover the L1 gas fee.
pub const fn requires_l1_data_gas_fee(&self) -> bool {
self.require_l1_data_gas_fee
}
}
impl<Client, Tx> OpTransactionValidator<Client, Tx>
where
Client: ChainSpecProvider<ChainSpec: OpHardforks> + StateProviderFactory + BlockReaderIdExt,
Tx: EthPoolTransaction + OpPooledTx,
{
/// Create a new [`OpTransactionValidator`].
pub fn new(inner: EthTransactionValidator<Client, Tx>) -> Self {
let this = Self::with_block_info(inner, OpL1BlockInfo::default());
if let Ok(Some(block)) =
this.inner.client().block_by_number_or_tag(alloy_eips::BlockNumberOrTag::Latest)
{
// genesis block has no txs, so we can't extract L1 info, we set the block info to empty
// so that we will accept txs into the pool before the first block
if block.header().number() == 0 {
this.block_info.timestamp.store(block.header().timestamp(), Ordering::Relaxed);
this.block_info.number.store(block.header().number(), Ordering::Relaxed);
} else {
this.update_l1_block_info(block.header(), block.body().transactions().first());
}
}
this
}
/// Create a new [`OpTransactionValidator`] with the given [`OpL1BlockInfo`].
pub fn with_block_info(
inner: EthTransactionValidator<Client, Tx>,
block_info: OpL1BlockInfo,
) -> Self {
Self {
inner: Arc::new(inner),
block_info: Arc::new(block_info),
require_l1_data_gas_fee: true,
supervisor_client: None,
fork_tracker: Arc::new(OpForkTracker { interop: AtomicBool::from(false) }),
}
}
/// Set the supervisor client and safety level
pub fn with_supervisor(mut self, supervisor_client: SupervisorClient) -> Self {
self.supervisor_client = Some(supervisor_client);
self
}
/// Update the L1 block info for the given header and system transaction, if any.
///
/// Note: this supports optional system transaction, in case this is used in a dev setup
pub fn update_l1_block_info<H, T>(&self, header: &H, tx: Option<&T>)
where
H: BlockHeader,
T: Transaction,
{
self.block_info.timestamp.store(header.timestamp(), Ordering::Relaxed);
self.block_info.number.store(header.number(), Ordering::Relaxed);
if let Some(Ok(cost_addition)) = tx.map(reth_optimism_evm::extract_l1_info_from_tx) {
*self.block_info.l1_block_info.write() = cost_addition;
}
if self.chain_spec().is_interop_active_at_timestamp(header.timestamp()) {
self.fork_tracker.interop.store(true, Ordering::Relaxed);
}
}
/// Validates a single transaction.
///
/// See also [`TransactionValidator::validate_transaction`]
///
/// This behaves the same as [`OpTransactionValidator::validate_one_with_state`], but creates
/// a new state provider internally.
pub async fn validate_one(
&self,
origin: TransactionOrigin,
transaction: Tx,
) -> TransactionValidationOutcome<Tx> {
self.validate_one_with_state(origin, transaction, &mut None).await
}
/// Validates a single transaction with a provided state provider.
///
/// This allows reusing the same state provider across multiple transaction validations.
///
/// See also [`TransactionValidator::validate_transaction`]
///
/// This behaves the same as [`EthTransactionValidator::validate_one_with_state`], but in
/// addition applies OP validity checks:
/// - ensures tx is not eip4844
/// - ensures cross chain transactions are valid wrt locally configured safety level
/// - ensures that the account has enough balance to cover the L1 gas cost
pub async fn validate_one_with_state(
&self,
origin: TransactionOrigin,
transaction: Tx,
state: &mut Option<Box<dyn AccountInfoReader>>,
) -> TransactionValidationOutcome<Tx> {
if transaction.is_eip4844() {
return TransactionValidationOutcome::Invalid(
transaction,
InvalidTransactionError::TxTypeNotSupported.into(),
)
}
// Interop cross tx validation
match self.is_valid_cross_tx(&transaction).await {
Some(Err(err)) => {
let err = match err {
InvalidCrossTx::CrossChainTxPreInterop => {
InvalidTransactionError::TxTypeNotSupported.into()
}
err => InvalidPoolTransactionError::Other(Box::new(err)),
};
return TransactionValidationOutcome::Invalid(transaction, err)
}
Some(Ok(_)) => {
// valid interop tx
transaction.set_interop_deadline(
self.block_timestamp() + TRANSACTION_VALIDITY_WINDOW_SECS,
);
}
_ => {}
}
let outcome = self.inner.validate_one_with_state(origin, transaction, state);
self.apply_op_checks(outcome)
}
/// Performs the necessary opstack specific checks based on top of the regular eth outcome.
fn apply_op_checks(
&self,
outcome: TransactionValidationOutcome<Tx>,
) -> TransactionValidationOutcome<Tx> {
if !self.requires_l1_data_gas_fee() {
// no need to check L1 gas fee
return outcome
}
// ensure that the account has enough balance to cover the L1 gas cost
if let TransactionValidationOutcome::Valid {
balance,
state_nonce,
transaction: valid_tx,
propagate,
bytecode_hash,
authorities,
} = outcome
{
let mut l1_block_info = self.block_info.l1_block_info.read().clone();
let encoded = valid_tx.transaction().encoded_2718();
let cost_addition = match l1_block_info.l1_tx_data_fee(
self.chain_spec(),
self.block_timestamp(),
&encoded,
false,
) {
Ok(cost) => cost,
Err(err) => {
return TransactionValidationOutcome::Error(*valid_tx.hash(), Box::new(err))
}
};
let cost = valid_tx.transaction().cost().saturating_add(cost_addition);
// Checks for max cost
if cost > balance {
return TransactionValidationOutcome::Invalid(
valid_tx.into_transaction(),
InvalidTransactionError::InsufficientFunds(
GotExpected { got: balance, expected: cost }.into(),
)
.into(),
)
}
return TransactionValidationOutcome::Valid {
balance,
state_nonce,
transaction: valid_tx,
propagate,
bytecode_hash,
authorities,
}
}
outcome
}
/// Wrapper for is valid cross tx
pub async fn is_valid_cross_tx(&self, tx: &Tx) -> Option<Result<(), InvalidCrossTx>> {
// We don't need to check for deposit transaction in here, because they won't come from
// txpool
self.supervisor_client
.as_ref()?
.is_valid_cross_tx(
tx.access_list(),
tx.hash(),
self.block_info.timestamp.load(Ordering::Relaxed),
Some(TRANSACTION_VALIDITY_WINDOW_SECS),
self.fork_tracker.is_interop_activated(),
)
.await
}
}
impl<Client, Tx> TransactionValidator for OpTransactionValidator<Client, Tx>
where
Client: ChainSpecProvider<ChainSpec: OpHardforks> + StateProviderFactory + BlockReaderIdExt,
Tx: EthPoolTransaction + OpPooledTx,
{
type Transaction = Tx;
async fn validate_transaction(
&self,
origin: TransactionOrigin,
transaction: Self::Transaction,
) -> TransactionValidationOutcome<Self::Transaction> {
self.validate_one(origin, transaction).await
}
async fn validate_transactions(
&self,
transactions: Vec<(TransactionOrigin, Self::Transaction)>,
) -> Vec<TransactionValidationOutcome<Self::Transaction>> {
futures_util::future::join_all(
transactions.into_iter().map(|(origin, tx)| self.validate_one(origin, tx)),
)
.await
}
async fn validate_transactions_with_origin(
&self,
origin: TransactionOrigin,
transactions: impl IntoIterator<Item = Self::Transaction> + Send,
) -> Vec<TransactionValidationOutcome<Self::Transaction>> {
futures_util::future::join_all(
transactions.into_iter().map(|tx| self.validate_one(origin, tx)),
)
.await
}
fn on_new_head_block<B>(&self, new_tip_block: &SealedBlock<B>)
where
B: Block,
{
self.inner.on_new_head_block(new_tip_block);
self.update_l1_block_info(
new_tip_block.header(),
new_tip_block.body().transactions().first(),
);
}
}
/// Keeps track of whether certain forks are activated
#[derive(Debug)]
pub(crate) struct OpForkTracker {
/// Tracks if interop is activated at the block's timestamp.
interop: AtomicBool,
}
impl OpForkTracker {
/// Returns `true` if Interop fork is activated.
pub(crate) fn is_interop_activated(&self) -> bool {
self.interop.load(Ordering::Relaxed)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/txpool/src/conditional.rs | crates/optimism/txpool/src/conditional.rs | //! Additional support for pooled transactions with [`TransactionConditional`]
use alloy_consensus::conditional::BlockConditionalAttributes;
use alloy_rpc_types_eth::erc4337::TransactionConditional;
/// Helper trait that allows attaching a [`TransactionConditional`].
pub trait MaybeConditionalTransaction {
/// Attach a [`TransactionConditional`].
fn set_conditional(&mut self, conditional: TransactionConditional);
/// Get attached [`TransactionConditional`] if any.
fn conditional(&self) -> Option<&TransactionConditional>;
/// Check if the conditional has exceeded the block attributes.
fn has_exceeded_block_attributes(&self, block_attr: &BlockConditionalAttributes) -> bool {
self.conditional().map(|tc| tc.has_exceeded_block_attributes(block_attr)).unwrap_or(false)
}
/// Helper that sets the conditional and returns the instance again
fn with_conditional(mut self, conditional: TransactionConditional) -> Self
where
Self: Sized,
{
self.set_conditional(conditional);
self
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/txpool/src/interop.rs | crates/optimism/txpool/src/interop.rs | //! Additional support for pooled interop transactions.
/// Helper trait that allows attaching an interop deadline.
pub trait MaybeInteropTransaction {
/// Attach an interop deadline
fn set_interop_deadline(&self, deadline: u64);
/// Get attached deadline if any.
fn interop_deadline(&self) -> Option<u64>;
/// Helper that sets the interop and returns the instance again
fn with_interop_deadline(self, interop: u64) -> Self
where
Self: Sized,
{
self.set_interop_deadline(interop);
self
}
}
/// Helper to keep track of cross transaction interop validity
/// Checks if provided timestamp fits into tx validation window
#[inline]
pub const fn is_valid_interop(timeout: u64, timestamp: u64) -> bool {
timestamp < timeout
}
/// Checks if transaction needs revalidation based on offset
#[inline]
pub const fn is_stale_interop(timeout: u64, timestamp: u64, offset: u64) -> bool {
timestamp + offset > timeout
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/txpool/src/estimated_da_size.rs | crates/optimism/txpool/src/estimated_da_size.rs | //! Additional support for estimating the data availability size of transactions.
/// Helper trait that allows attaching an estimated data availability size.
pub trait DataAvailabilitySized {
/// Get the estimated data availability size of the transaction.
///
/// Note: it is expected that this value will be cached internally.
fn estimated_da_size(&self) -> u64;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/txpool/src/transaction.rs | crates/optimism/txpool/src/transaction.rs | use crate::{
conditional::MaybeConditionalTransaction, estimated_da_size::DataAvailabilitySized,
interop::MaybeInteropTransaction,
};
use alloy_consensus::{transaction::Recovered, BlobTransactionValidationError, Typed2718};
use alloy_eips::{
eip2718::{Encodable2718, WithEncoded},
eip2930::AccessList,
eip7594::BlobTransactionSidecarVariant,
eip7702::SignedAuthorization,
};
use alloy_primitives::{Address, Bytes, TxHash, TxKind, B256, U256};
use alloy_rpc_types_eth::erc4337::TransactionConditional;
use c_kzg::KzgSettings;
use core::fmt::Debug;
use reth_optimism_primitives::OpTransactionSigned;
use reth_primitives_traits::{InMemorySize, SignedTransaction};
use reth_transaction_pool::{
EthBlobTransactionSidecar, EthPoolTransaction, EthPooledTransaction, PoolTransaction,
};
use std::{
borrow::Cow,
sync::{
atomic::{AtomicU64, Ordering},
Arc, OnceLock,
},
};
/// Marker for no-interop transactions
pub(crate) const NO_INTEROP_TX: u64 = 0;
/// Pool transaction for OP.
///
/// This type wraps the actual transaction and caches values that are frequently used by the pool.
/// For payload building this lazily tracks values that are required during payload building:
/// - Estimated compressed size of this transaction
#[derive(Debug, Clone, derive_more::Deref)]
pub struct OpPooledTransaction<
Cons = OpTransactionSigned,
Pooled = op_alloy_consensus::OpPooledTransaction,
> {
#[deref]
inner: EthPooledTransaction<Cons>,
/// The estimated size of this transaction, lazily computed.
estimated_tx_compressed_size: OnceLock<u64>,
/// The pooled transaction type.
_pd: core::marker::PhantomData<Pooled>,
/// Optional conditional attached to this transaction.
conditional: Option<Box<TransactionConditional>>,
/// Optional interop deadline attached to this transaction.
interop: Arc<AtomicU64>,
/// Cached EIP-2718 encoded bytes of the transaction, lazily computed.
encoded_2718: OnceLock<Bytes>,
}
impl<Cons: SignedTransaction, Pooled> OpPooledTransaction<Cons, Pooled> {
/// Create new instance of [Self].
pub fn new(transaction: Recovered<Cons>, encoded_length: usize) -> Self {
Self {
inner: EthPooledTransaction::new(transaction, encoded_length),
estimated_tx_compressed_size: Default::default(),
conditional: None,
interop: Arc::new(AtomicU64::new(NO_INTEROP_TX)),
_pd: core::marker::PhantomData,
encoded_2718: Default::default(),
}
}
/// Returns the estimated compressed size of a transaction in bytes.
/// This value is computed based on the following formula:
/// `max(minTransactionSize, intercept + fastlzCoef*fastlzSize) / 1e6`
/// Uses cached EIP-2718 encoded bytes to avoid recomputing the encoding for each estimation.
pub fn estimated_compressed_size(&self) -> u64 {
*self
.estimated_tx_compressed_size
.get_or_init(|| op_alloy_flz::tx_estimated_size_fjord_bytes(self.encoded_2718()))
}
/// Returns lazily computed EIP-2718 encoded bytes of the transaction.
pub fn encoded_2718(&self) -> &Bytes {
self.encoded_2718.get_or_init(|| self.inner.transaction().encoded_2718().into())
}
/// Conditional setter.
pub fn with_conditional(mut self, conditional: TransactionConditional) -> Self {
self.conditional = Some(Box::new(conditional));
self
}
}
impl<Cons, Pooled> MaybeConditionalTransaction for OpPooledTransaction<Cons, Pooled> {
fn set_conditional(&mut self, conditional: TransactionConditional) {
self.conditional = Some(Box::new(conditional))
}
fn conditional(&self) -> Option<&TransactionConditional> {
self.conditional.as_deref()
}
}
impl<Cons, Pooled> MaybeInteropTransaction for OpPooledTransaction<Cons, Pooled> {
fn set_interop_deadline(&self, deadline: u64) {
self.interop.store(deadline, Ordering::Relaxed);
}
fn interop_deadline(&self) -> Option<u64> {
let interop = self.interop.load(Ordering::Relaxed);
if interop > NO_INTEROP_TX {
return Some(interop)
}
None
}
}
impl<Cons: SignedTransaction, Pooled> DataAvailabilitySized for OpPooledTransaction<Cons, Pooled> {
fn estimated_da_size(&self) -> u64 {
self.estimated_compressed_size()
}
}
impl<Cons, Pooled> PoolTransaction for OpPooledTransaction<Cons, Pooled>
where
Cons: SignedTransaction + From<Pooled>,
Pooled: SignedTransaction + TryFrom<Cons, Error: core::error::Error>,
{
type TryFromConsensusError = <Pooled as TryFrom<Cons>>::Error;
type Consensus = Cons;
type Pooled = Pooled;
fn clone_into_consensus(&self) -> Recovered<Self::Consensus> {
self.inner.transaction().clone()
}
fn into_consensus(self) -> Recovered<Self::Consensus> {
self.inner.transaction
}
fn into_consensus_with2718(self) -> WithEncoded<Recovered<Self::Consensus>> {
let encoding = self.encoded_2718().clone();
self.inner.transaction.into_encoded_with(encoding)
}
fn from_pooled(tx: Recovered<Self::Pooled>) -> Self {
let encoded_len = tx.encode_2718_len();
Self::new(tx.convert(), encoded_len)
}
fn hash(&self) -> &TxHash {
self.inner.transaction.tx_hash()
}
fn sender(&self) -> Address {
self.inner.transaction.signer()
}
fn sender_ref(&self) -> &Address {
self.inner.transaction.signer_ref()
}
fn cost(&self) -> &U256 {
&self.inner.cost
}
fn encoded_length(&self) -> usize {
self.inner.encoded_length
}
}
impl<Cons: Typed2718, Pooled> Typed2718 for OpPooledTransaction<Cons, Pooled> {
fn ty(&self) -> u8 {
self.inner.ty()
}
}
impl<Cons: InMemorySize, Pooled> InMemorySize for OpPooledTransaction<Cons, Pooled> {
fn size(&self) -> usize {
self.inner.size()
}
}
impl<Cons, Pooled> alloy_consensus::Transaction for OpPooledTransaction<Cons, Pooled>
where
Cons: alloy_consensus::Transaction,
Pooled: Debug + Send + Sync + 'static,
{
fn chain_id(&self) -> Option<u64> {
self.inner.chain_id()
}
fn nonce(&self) -> u64 {
self.inner.nonce()
}
fn gas_limit(&self) -> u64 {
self.inner.gas_limit()
}
fn gas_price(&self) -> Option<u128> {
self.inner.gas_price()
}
fn max_fee_per_gas(&self) -> u128 {
self.inner.max_fee_per_gas()
}
fn max_priority_fee_per_gas(&self) -> Option<u128> {
self.inner.max_priority_fee_per_gas()
}
fn max_fee_per_blob_gas(&self) -> Option<u128> {
self.inner.max_fee_per_blob_gas()
}
fn priority_fee_or_price(&self) -> u128 {
self.inner.priority_fee_or_price()
}
fn effective_gas_price(&self, base_fee: Option<u64>) -> u128 {
self.inner.effective_gas_price(base_fee)
}
fn is_dynamic_fee(&self) -> bool {
self.inner.is_dynamic_fee()
}
fn kind(&self) -> TxKind {
self.inner.kind()
}
fn is_create(&self) -> bool {
self.inner.is_create()
}
fn value(&self) -> U256 {
self.inner.value()
}
fn input(&self) -> &Bytes {
self.inner.input()
}
fn access_list(&self) -> Option<&AccessList> {
self.inner.access_list()
}
fn blob_versioned_hashes(&self) -> Option<&[B256]> {
self.inner.blob_versioned_hashes()
}
fn authorization_list(&self) -> Option<&[SignedAuthorization]> {
self.inner.authorization_list()
}
}
impl<Cons, Pooled> EthPoolTransaction for OpPooledTransaction<Cons, Pooled>
where
Cons: SignedTransaction + From<Pooled>,
Pooled: SignedTransaction + TryFrom<Cons>,
<Pooled as TryFrom<Cons>>::Error: core::error::Error,
{
fn take_blob(&mut self) -> EthBlobTransactionSidecar {
EthBlobTransactionSidecar::None
}
fn try_into_pooled_eip4844(
self,
_sidecar: Arc<BlobTransactionSidecarVariant>,
) -> Option<Recovered<Self::Pooled>> {
None
}
fn try_from_eip4844(
_tx: Recovered<Self::Consensus>,
_sidecar: BlobTransactionSidecarVariant,
) -> Option<Self> {
None
}
fn validate_blob(
&self,
_sidecar: &BlobTransactionSidecarVariant,
_settings: &KzgSettings,
) -> Result<(), BlobTransactionValidationError> {
Err(BlobTransactionValidationError::NotBlobTransaction(self.ty()))
}
}
/// Helper trait to provide payload builder with access to conditionals and encoded bytes of
/// transaction.
pub trait OpPooledTx:
MaybeConditionalTransaction + MaybeInteropTransaction + PoolTransaction + DataAvailabilitySized
{
/// Returns the EIP-2718 encoded bytes of the transaction.
fn encoded_2718(&self) -> Cow<'_, Bytes>;
}
impl<Cons, Pooled> OpPooledTx for OpPooledTransaction<Cons, Pooled>
where
Cons: SignedTransaction + From<Pooled>,
Pooled: SignedTransaction + TryFrom<Cons>,
<Pooled as TryFrom<Cons>>::Error: core::error::Error,
{
fn encoded_2718(&self) -> Cow<'_, Bytes> {
Cow::Borrowed(self.encoded_2718())
}
}
#[cfg(test)]
mod tests {
use crate::{OpPooledTransaction, OpTransactionValidator};
use alloy_consensus::transaction::Recovered;
use alloy_eips::eip2718::Encodable2718;
use alloy_primitives::{TxKind, U256};
use op_alloy_consensus::TxDeposit;
use reth_optimism_chainspec::OP_MAINNET;
use reth_optimism_primitives::OpTransactionSigned;
use reth_provider::test_utils::MockEthProvider;
use reth_transaction_pool::{
blobstore::InMemoryBlobStore, validate::EthTransactionValidatorBuilder, TransactionOrigin,
TransactionValidationOutcome,
};
#[tokio::test]
async fn validate_optimism_transaction() {
let client = MockEthProvider::default().with_chain_spec(OP_MAINNET.clone());
let validator = EthTransactionValidatorBuilder::new(client)
.no_shanghai()
.no_cancun()
.build(InMemoryBlobStore::default());
let validator = OpTransactionValidator::new(validator);
let origin = TransactionOrigin::External;
let signer = Default::default();
let deposit_tx = TxDeposit {
source_hash: Default::default(),
from: signer,
to: TxKind::Create,
mint: 0,
value: U256::ZERO,
gas_limit: 0,
is_system_transaction: false,
input: Default::default(),
};
let signed_tx: OpTransactionSigned = deposit_tx.into();
let signed_recovered = Recovered::new_unchecked(signed_tx, signer);
let len = signed_recovered.encode_2718_len();
let pooled_tx: OpPooledTransaction = OpPooledTransaction::new(signed_recovered, len);
let outcome = validator.validate_one(origin, pooled_tx).await;
let err = match outcome {
TransactionValidationOutcome::Invalid(_, err) => err,
_ => panic!("Expected invalid transaction"),
};
assert_eq!(err.to_string(), "transaction type not supported");
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/txpool/src/supervisor/errors.rs | crates/optimism/txpool/src/supervisor/errors.rs | use alloy_json_rpc::RpcError;
use core::error;
use op_alloy_rpc_types::SuperchainDAError;
/// Failures occurring during validation of inbox entries.
#[derive(thiserror::Error, Debug)]
pub enum InteropTxValidatorError {
/// Inbox entry validation against the Supervisor took longer than allowed.
#[error("inbox entry validation timed out, timeout: {0} secs")]
Timeout(u64),
/// Message does not satisfy validation requirements
#[error(transparent)]
InvalidEntry(#[from] SuperchainDAError),
/// Catch-all variant.
#[error("supervisor server error: {0}")]
Other(Box<dyn error::Error + Send + Sync>),
}
impl InteropTxValidatorError {
/// Returns a new instance of [`Other`](Self::Other) error variant.
pub fn other<E>(err: E) -> Self
where
E: error::Error + Send + Sync + 'static,
{
Self::Other(Box::new(err))
}
/// This function will parse the error code to determine if it matches
/// one of the known Supervisor errors, and return the corresponding
/// error variant. Otherwise, it returns a generic [`Other`](Self::Other) error.
pub fn from_json_rpc<E>(err: RpcError<E>) -> Self
where
E: error::Error + Send + Sync + 'static,
{
// Try to extract error details from the RPC error
if let Some(error_payload) = err.as_error_resp() {
let code = error_payload.code as i32;
// Try to convert the error code to an SuperchainDAError variant
if let Ok(invalid_entry) = SuperchainDAError::try_from(code) {
return Self::InvalidEntry(invalid_entry);
}
}
// Default to generic error
Self::Other(Box::new(err))
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/txpool/src/supervisor/client.rs | crates/optimism/txpool/src/supervisor/client.rs | //! This is our custom implementation of validator struct
use crate::{
interop::MaybeInteropTransaction,
supervisor::{
metrics::SupervisorMetrics, parse_access_list_items_to_inbox_entries, ExecutingDescriptor,
InteropTxValidatorError,
},
InvalidCrossTx,
};
use alloy_consensus::Transaction;
use alloy_eips::eip2930::AccessList;
use alloy_primitives::{TxHash, B256};
use alloy_rpc_client::ReqwestClient;
use futures_util::{
future::BoxFuture,
stream::{self, StreamExt},
Stream,
};
use op_alloy_consensus::interop::SafetyLevel;
use reth_transaction_pool::PoolTransaction;
use std::{
borrow::Cow,
future::IntoFuture,
sync::Arc,
time::{Duration, Instant},
};
use tracing::trace;
/// Supervisor hosted by op-labs
// TODO: This should be changed to actual supervisor url
pub const DEFAULT_SUPERVISOR_URL: &str = "http://localhost:1337/";
/// The default request timeout to use
pub const DEFAULT_REQUEST_TIMEOUT: Duration = Duration::from_millis(100);
/// Implementation of the supervisor trait for the interop.
#[derive(Debug, Clone)]
pub struct SupervisorClient {
/// Stores type's data.
inner: Arc<SupervisorClientInner>,
}
impl SupervisorClient {
/// Returns a new [`SupervisorClientBuilder`].
pub fn builder(supervisor_endpoint: impl Into<String>) -> SupervisorClientBuilder {
SupervisorClientBuilder::new(supervisor_endpoint)
}
/// Returns configured timeout. See [`SupervisorClientInner`].
pub fn timeout(&self) -> Duration {
self.inner.timeout
}
/// Returns configured minimum safety level. See [`SupervisorClient`].
pub fn safety(&self) -> SafetyLevel {
self.inner.safety
}
/// Executes a `supervisor_checkAccessList` with the configured safety level.
pub fn check_access_list<'a>(
&self,
inbox_entries: &'a [B256],
executing_descriptor: ExecutingDescriptor,
) -> CheckAccessListRequest<'a> {
CheckAccessListRequest {
client: self.inner.client.clone(),
inbox_entries: Cow::Borrowed(inbox_entries),
executing_descriptor,
timeout: self.inner.timeout,
safety: self.inner.safety,
metrics: self.inner.metrics.clone(),
}
}
/// Extracts commitment from access list entries, pointing to 0x420..022 and validates them
/// against supervisor.
///
/// If commitment present pre-interop tx rejected.
///
/// Returns:
/// None - if tx is not cross chain,
/// Some(Ok(()) - if tx is valid cross chain,
/// Some(Err(e)) - if tx is not valid or interop is not active
pub async fn is_valid_cross_tx(
&self,
access_list: Option<&AccessList>,
hash: &TxHash,
timestamp: u64,
timeout: Option<u64>,
is_interop_active: bool,
) -> Option<Result<(), InvalidCrossTx>> {
// We don't need to check for deposit transaction in here, because they won't come from
// txpool
let access_list = access_list?;
let inbox_entries = parse_access_list_items_to_inbox_entries(access_list.iter())
.copied()
.collect::<Vec<_>>();
if inbox_entries.is_empty() {
return None;
}
// Interop check
if !is_interop_active {
// No cross chain tx allowed before interop
return Some(Err(InvalidCrossTx::CrossChainTxPreInterop))
}
if let Err(err) = self
.check_access_list(
inbox_entries.as_slice(),
ExecutingDescriptor::new(timestamp, timeout),
)
.await
{
self.inner.metrics.increment_metrics_for_error(&err);
trace!(target: "txpool", hash=%hash, err=%err, "Cross chain transaction invalid");
return Some(Err(InvalidCrossTx::ValidationError(err)));
}
Some(Ok(()))
}
/// Creates a stream that revalidates interop transactions against the supervisor.
/// Returns
/// An implementation of `Stream` that is `Send`-able and tied to the lifetime `'a` of `self`.
/// Each item yielded by the stream is a tuple `(TItem, Option<Result<(), InvalidCrossTx>>)`.
/// - The first element is the original `TItem` that was revalidated.
/// - The second element is the `Option<Result<(), InvalidCrossTx>>` describes the outcome
/// - `None`: Transaction was not identified as a cross-chain candidate by initial checks.
/// - `Some(Ok(()))`: Supervisor confirmed the transaction is valid.
/// - `Some(Err(InvalidCrossTx))`: Supervisor indicated the transaction is invalid.
pub fn revalidate_interop_txs_stream<'a, TItem, InputIter>(
&'a self,
txs_to_revalidate: InputIter,
current_timestamp: u64,
revalidation_window: u64,
max_concurrent_queries: usize,
) -> impl Stream<Item = (TItem, Option<Result<(), InvalidCrossTx>>)> + Send + 'a
where
InputIter: IntoIterator<Item = TItem> + Send + 'a,
InputIter::IntoIter: Send + 'a,
TItem:
MaybeInteropTransaction + PoolTransaction + Transaction + Clone + Send + Sync + 'static,
{
stream::iter(txs_to_revalidate.into_iter().map(move |tx_item| {
let client_for_async_task = self.clone();
async move {
let validation_result = client_for_async_task
.is_valid_cross_tx(
tx_item.access_list(),
tx_item.hash(),
current_timestamp,
Some(revalidation_window),
true,
)
.await;
// return the original transaction paired with its validation result.
(tx_item, validation_result)
}
}))
.buffered(max_concurrent_queries)
}
}
/// Holds supervisor data. Inner type of [`SupervisorClient`].
#[derive(Debug, Clone)]
pub struct SupervisorClientInner {
client: ReqwestClient,
/// The default
safety: SafetyLevel,
/// The default request timeout
timeout: Duration,
/// Metrics for tracking supervisor operations
metrics: SupervisorMetrics,
}
/// Builds [`SupervisorClient`].
#[derive(Debug)]
pub struct SupervisorClientBuilder {
/// Supervisor server's socket.
endpoint: String,
/// Timeout for requests.
///
/// NOTE: this timeout is only effective if it's shorter than the timeout configured for the
/// underlying [`ReqwestClient`].
timeout: Duration,
/// Minimum [`SafetyLevel`] of cross-chain transactions accepted by this client.
safety: SafetyLevel,
}
impl SupervisorClientBuilder {
/// Creates a new builder.
pub fn new(supervisor_endpoint: impl Into<String>) -> Self {
Self {
endpoint: supervisor_endpoint.into(),
timeout: DEFAULT_REQUEST_TIMEOUT,
safety: SafetyLevel::CrossUnsafe,
}
}
/// Configures a custom timeout
pub const fn timeout(mut self, timeout: Duration) -> Self {
self.timeout = timeout;
self
}
/// Sets minimum safety level to accept for cross chain transactions.
pub const fn minimum_safety(mut self, min_safety: SafetyLevel) -> Self {
self.safety = min_safety;
self
}
/// Creates a new supervisor validator.
pub async fn build(self) -> SupervisorClient {
let Self { endpoint, timeout, safety } = self;
let client = ReqwestClient::builder()
.connect(endpoint.as_str())
.await
.expect("building supervisor client");
SupervisorClient {
inner: Arc::new(SupervisorClientInner {
client,
safety,
timeout,
metrics: SupervisorMetrics::default(),
}),
}
}
}
/// A Request future that issues a `supervisor_checkAccessList` request.
#[derive(Debug, Clone)]
pub struct CheckAccessListRequest<'a> {
client: ReqwestClient,
inbox_entries: Cow<'a, [B256]>,
executing_descriptor: ExecutingDescriptor,
timeout: Duration,
safety: SafetyLevel,
metrics: SupervisorMetrics,
}
impl<'a> CheckAccessListRequest<'a> {
/// Configures the timeout to use for the request if any.
pub const fn with_timeout(mut self, timeout: Duration) -> Self {
self.timeout = timeout;
self
}
/// Configures the [`SafetyLevel`] for this request
pub const fn with_safety(mut self, safety: SafetyLevel) -> Self {
self.safety = safety;
self
}
}
impl<'a> IntoFuture for CheckAccessListRequest<'a> {
type Output = Result<(), InteropTxValidatorError>;
type IntoFuture = BoxFuture<'a, Self::Output>;
fn into_future(self) -> Self::IntoFuture {
let Self { client, inbox_entries, executing_descriptor, timeout, safety, metrics } = self;
Box::pin(async move {
let start = Instant::now();
let result = tokio::time::timeout(
timeout,
client.request(
"supervisor_checkAccessList",
(inbox_entries, safety, executing_descriptor),
),
)
.await;
metrics.record_supervisor_query(start.elapsed());
result
.map_err(|_| InteropTxValidatorError::Timeout(timeout.as_secs()))?
.map_err(InteropTxValidatorError::from_json_rpc)
})
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/txpool/src/supervisor/access_list.rs | crates/optimism/txpool/src/supervisor/access_list.rs | // Source: https://github.com/op-rs/kona
// Copyright © 2023 kona contributors Copyright © 2024 Optimism
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
// associated documentation files (the “Software”), to deal in the Software without restriction,
// including without limitation the rights to use, copy, modify, merge, publish, distribute,
// sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all copies or
// substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
// NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
use crate::supervisor::CROSS_L2_INBOX_ADDRESS;
use alloy_eips::eip2930::AccessListItem;
use alloy_primitives::B256;
/// Parses [`AccessListItem`]s to inbox entries.
///
/// Return flattened iterator with all inbox entries.
pub fn parse_access_list_items_to_inbox_entries<'a>(
access_list_items: impl Iterator<Item = &'a AccessListItem>,
) -> impl Iterator<Item = &'a B256> {
access_list_items.filter_map(parse_access_list_item_to_inbox_entries).flatten()
}
/// Parse [`AccessListItem`] to inbox entries, if any.
/// Max 3 inbox entries can exist per [`AccessListItem`] that points to [`CROSS_L2_INBOX_ADDRESS`].
///
/// Returns `Vec::new()` if [`AccessListItem`] address doesn't point to [`CROSS_L2_INBOX_ADDRESS`].
// Access-list spec: <https://github.com/ethereum-optimism/specs/blob/main/specs/interop/supervisor.md#access-list-contents>
fn parse_access_list_item_to_inbox_entries(
access_list_item: &AccessListItem,
) -> Option<impl Iterator<Item = &B256>> {
(access_list_item.address == CROSS_L2_INBOX_ADDRESS)
.then(|| access_list_item.storage_keys.iter())
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/txpool/src/supervisor/mod.rs | crates/optimism/txpool/src/supervisor/mod.rs | //! Supervisor support for interop
mod access_list;
pub use access_list::parse_access_list_items_to_inbox_entries;
pub use op_alloy_consensus::interop::*;
pub mod client;
pub use client::{SupervisorClient, SupervisorClientBuilder, DEFAULT_SUPERVISOR_URL};
mod errors;
pub use errors::InteropTxValidatorError;
mod message;
pub use message::ExecutingDescriptor;
pub mod metrics;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/txpool/src/supervisor/metrics.rs | crates/optimism/txpool/src/supervisor/metrics.rs | //! Optimism supervisor and sequencer metrics
use crate::supervisor::InteropTxValidatorError;
use op_alloy_rpc_types::SuperchainDAError;
use reth_metrics::{
metrics::{Counter, Histogram},
Metrics,
};
use std::time::Duration;
/// Optimism supervisor metrics
#[derive(Metrics, Clone)]
#[metrics(scope = "optimism_transaction_pool.supervisor")]
pub struct SupervisorMetrics {
/// How long it takes to query the supervisor in the Optimism transaction pool
pub(crate) supervisor_query_latency: Histogram,
/// Counter for the number of times data was skipped
pub(crate) skipped_data_count: Counter,
/// Counter for the number of times an unknown chain was encountered
pub(crate) unknown_chain_count: Counter,
/// Counter for the number of times conflicting data was encountered
pub(crate) conflicting_data_count: Counter,
/// Counter for the number of times ineffective data was encountered
pub(crate) ineffective_data_count: Counter,
/// Counter for the number of times data was out of order
pub(crate) out_of_order_count: Counter,
/// Counter for the number of times data was awaiting replacement
pub(crate) awaiting_replacement_count: Counter,
/// Counter for the number of times data was out of scope
pub(crate) out_of_scope_count: Counter,
/// Counter for the number of times there was no parent for the first block
pub(crate) no_parent_for_first_block_count: Counter,
/// Counter for the number of times future data was encountered
pub(crate) future_data_count: Counter,
/// Counter for the number of times data was missed
pub(crate) missed_data_count: Counter,
/// Counter for the number of times data corruption was encountered
pub(crate) data_corruption_count: Counter,
}
impl SupervisorMetrics {
/// Records the duration of supervisor queries
#[inline]
pub fn record_supervisor_query(&self, duration: Duration) {
self.supervisor_query_latency.record(duration.as_secs_f64());
}
/// Increments the metrics for the given error
pub fn increment_metrics_for_error(&self, error: &InteropTxValidatorError) {
if let InteropTxValidatorError::InvalidEntry(inner) = error {
match inner {
SuperchainDAError::SkippedData => self.skipped_data_count.increment(1),
SuperchainDAError::UnknownChain => self.unknown_chain_count.increment(1),
SuperchainDAError::ConflictingData => self.conflicting_data_count.increment(1),
SuperchainDAError::IneffectiveData => self.ineffective_data_count.increment(1),
SuperchainDAError::OutOfOrder => self.out_of_order_count.increment(1),
SuperchainDAError::AwaitingReplacement => {
self.awaiting_replacement_count.increment(1)
}
SuperchainDAError::OutOfScope => self.out_of_scope_count.increment(1),
SuperchainDAError::NoParentForFirstBlock => {
self.no_parent_for_first_block_count.increment(1)
}
SuperchainDAError::FutureData => self.future_data_count.increment(1),
SuperchainDAError::MissedData => self.missed_data_count.increment(1),
SuperchainDAError::DataCorruption => self.data_corruption_count.increment(1),
_ => {}
}
}
}
}
/// Optimism sequencer metrics
#[derive(Metrics, Clone)]
#[metrics(scope = "optimism_transaction_pool.sequencer")]
pub struct SequencerMetrics {
/// How long it takes to forward a transaction to the sequencer
pub(crate) sequencer_forward_latency: Histogram,
}
impl SequencerMetrics {
/// Records the duration it took to forward a transaction
#[inline]
pub fn record_forward_latency(&self, duration: Duration) {
self.sequencer_forward_latency.record(duration.as_secs_f64());
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/txpool/src/supervisor/message.rs | crates/optimism/txpool/src/supervisor/message.rs | //! Interop message primitives.
// Source: https://github.com/op-rs/kona
// Copyright © 2023 kona contributors Copyright © 2024 Optimism
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
// associated documentation files (the “Software”), to deal in the Software without restriction,
// including without limitation the rights to use, copy, modify, merge, publish, distribute,
// sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all copies or
// substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
// NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
/// An [`ExecutingDescriptor`] is a part of the payload to `supervisor_checkAccessList`
/// Spec: <https://github.com/ethereum-optimism/specs/blob/main/specs/interop/supervisor.md#executingdescriptor>
#[derive(Default, Debug, PartialEq, Eq, Clone, serde::Serialize, serde::Deserialize)]
pub struct ExecutingDescriptor {
/// The timestamp used to enforce timestamp [invariant](https://github.com/ethereum-optimism/specs/blob/main/specs/interop/derivation.md#invariants)
#[serde(with = "alloy_serde::quantity")]
timestamp: u64,
/// The timeout that requests verification to still hold at `timestamp+timeout`
/// (message expiry may drop previously valid messages).
#[serde(skip_serializing_if = "Option::is_none", with = "alloy_serde::quantity::opt")]
timeout: Option<u64>,
}
impl ExecutingDescriptor {
/// Create a new [`ExecutingDescriptor`] from the timestamp and timeout
pub const fn new(timestamp: u64, timeout: Option<u64>) -> Self {
Self { timestamp, timeout }
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/hardforks/src/lib.rs | crates/optimism/hardforks/src/lib.rs | //! OP-Reth hard forks.
//!
//! This defines the [`ChainHardforks`] for certain op chains.
//! It keeps L2 hardforks that correspond to L1 hardforks in sync by defining both at the same
//! activation timestamp, this includes:
//! - Canyon : Shanghai
//! - Ecotone : Cancun
//! - Isthmus : Prague
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![cfg_attr(not(feature = "std"), no_std)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
extern crate alloc;
// Re-export alloy-op-hardforks types.
pub use alloy_op_hardforks::{OpHardfork, OpHardforks};
use alloc::vec;
use alloy_primitives::U256;
use once_cell::sync::Lazy as LazyLock;
use reth_ethereum_forks::{ChainHardforks, EthereumHardfork, ForkCondition, Hardfork};
/// Dev hardforks
pub static DEV_HARDFORKS: LazyLock<ChainHardforks> = LazyLock::new(|| {
ChainHardforks::new(vec![
(EthereumHardfork::Frontier.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Homestead.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Dao.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Tangerine.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::SpuriousDragon.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Byzantium.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Constantinople.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Petersburg.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Istanbul.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Berlin.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::London.boxed(), ForkCondition::Block(0)),
(
EthereumHardfork::Paris.boxed(),
ForkCondition::TTD {
activation_block_number: 0,
fork_block: None,
total_difficulty: U256::ZERO,
},
),
(OpHardfork::Bedrock.boxed(), ForkCondition::Block(0)),
(OpHardfork::Regolith.boxed(), ForkCondition::Timestamp(0)),
(EthereumHardfork::Shanghai.boxed(), ForkCondition::Timestamp(0)),
(OpHardfork::Canyon.boxed(), ForkCondition::Timestamp(0)),
(EthereumHardfork::Cancun.boxed(), ForkCondition::Timestamp(0)),
(OpHardfork::Ecotone.boxed(), ForkCondition::Timestamp(0)),
(OpHardfork::Fjord.boxed(), ForkCondition::Timestamp(0)),
(OpHardfork::Granite.boxed(), ForkCondition::Timestamp(0)),
(EthereumHardfork::Prague.boxed(), ForkCondition::Timestamp(0)),
(OpHardfork::Isthmus.boxed(), ForkCondition::Timestamp(0)),
// (OpHardfork::Jovian.boxed(), ForkCondition::Timestamp(0)),
])
});
/// Optimism mainnet list of hardforks.
pub static OP_MAINNET_HARDFORKS: LazyLock<ChainHardforks> = LazyLock::new(|| {
ChainHardforks::new(vec![
(EthereumHardfork::Frontier.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Homestead.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Tangerine.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::SpuriousDragon.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Byzantium.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Constantinople.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Petersburg.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Istanbul.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::MuirGlacier.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Berlin.boxed(), ForkCondition::Block(3950000)),
(EthereumHardfork::London.boxed(), ForkCondition::Block(105235063)),
(EthereumHardfork::ArrowGlacier.boxed(), ForkCondition::Block(105235063)),
(EthereumHardfork::GrayGlacier.boxed(), ForkCondition::Block(105235063)),
(
EthereumHardfork::Paris.boxed(),
ForkCondition::TTD {
activation_block_number: 105235063,
fork_block: Some(105235063),
total_difficulty: U256::ZERO,
},
),
(OpHardfork::Bedrock.boxed(), ForkCondition::Block(105235063)),
(OpHardfork::Regolith.boxed(), ForkCondition::Timestamp(0)),
(EthereumHardfork::Shanghai.boxed(), ForkCondition::Timestamp(1704992401)),
(OpHardfork::Canyon.boxed(), ForkCondition::Timestamp(1704992401)),
(EthereumHardfork::Cancun.boxed(), ForkCondition::Timestamp(1710374401)),
(OpHardfork::Ecotone.boxed(), ForkCondition::Timestamp(1710374401)),
(OpHardfork::Fjord.boxed(), ForkCondition::Timestamp(1720627201)),
(OpHardfork::Granite.boxed(), ForkCondition::Timestamp(1726070401)),
(OpHardfork::Holocene.boxed(), ForkCondition::Timestamp(1736445601)),
(EthereumHardfork::Prague.boxed(), ForkCondition::Timestamp(1746806401)),
(OpHardfork::Isthmus.boxed(), ForkCondition::Timestamp(1746806401)),
// (OpHardfork::Jovian.boxed(), ForkCondition::Timestamp(u64::MAX)), /* TODO: Update
// timestamp when Jovian is planned */
])
});
/// Optimism Sepolia list of hardforks.
pub static OP_SEPOLIA_HARDFORKS: LazyLock<ChainHardforks> = LazyLock::new(|| {
ChainHardforks::new(vec![
(EthereumHardfork::Frontier.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Homestead.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Tangerine.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::SpuriousDragon.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Byzantium.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Constantinople.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Petersburg.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Istanbul.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::MuirGlacier.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Berlin.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::London.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::ArrowGlacier.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::GrayGlacier.boxed(), ForkCondition::Block(0)),
(
EthereumHardfork::Paris.boxed(),
ForkCondition::TTD {
activation_block_number: 0,
fork_block: Some(0),
total_difficulty: U256::ZERO,
},
),
(OpHardfork::Bedrock.boxed(), ForkCondition::Block(0)),
(OpHardfork::Regolith.boxed(), ForkCondition::Timestamp(0)),
(EthereumHardfork::Shanghai.boxed(), ForkCondition::Timestamp(1699981200)),
(OpHardfork::Canyon.boxed(), ForkCondition::Timestamp(1699981200)),
(EthereumHardfork::Cancun.boxed(), ForkCondition::Timestamp(1708534800)),
(OpHardfork::Ecotone.boxed(), ForkCondition::Timestamp(1708534800)),
(OpHardfork::Fjord.boxed(), ForkCondition::Timestamp(1716998400)),
(OpHardfork::Granite.boxed(), ForkCondition::Timestamp(1723478400)),
(OpHardfork::Holocene.boxed(), ForkCondition::Timestamp(1732633200)),
(EthereumHardfork::Prague.boxed(), ForkCondition::Timestamp(1744905600)),
(OpHardfork::Isthmus.boxed(), ForkCondition::Timestamp(1744905600)),
// (OpHardfork::Jovian.boxed(), ForkCondition::Timestamp(u64::MAX)), /* TODO: Update
// timestamp when Jovian is planned */
])
});
/// Base Sepolia list of hardforks.
pub static BASE_SEPOLIA_HARDFORKS: LazyLock<ChainHardforks> = LazyLock::new(|| {
ChainHardforks::new(vec![
(EthereumHardfork::Frontier.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Homestead.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Tangerine.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::SpuriousDragon.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Byzantium.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Constantinople.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Petersburg.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Istanbul.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::MuirGlacier.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Berlin.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::London.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::ArrowGlacier.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::GrayGlacier.boxed(), ForkCondition::Block(0)),
(
EthereumHardfork::Paris.boxed(),
ForkCondition::TTD {
activation_block_number: 0,
fork_block: Some(0),
total_difficulty: U256::ZERO,
},
),
(OpHardfork::Bedrock.boxed(), ForkCondition::Block(0)),
(OpHardfork::Regolith.boxed(), ForkCondition::Timestamp(0)),
(EthereumHardfork::Shanghai.boxed(), ForkCondition::Timestamp(1699981200)),
(OpHardfork::Canyon.boxed(), ForkCondition::Timestamp(1699981200)),
(EthereumHardfork::Cancun.boxed(), ForkCondition::Timestamp(1708534800)),
(OpHardfork::Ecotone.boxed(), ForkCondition::Timestamp(1708534800)),
(OpHardfork::Fjord.boxed(), ForkCondition::Timestamp(1716998400)),
(OpHardfork::Granite.boxed(), ForkCondition::Timestamp(1723478400)),
(OpHardfork::Holocene.boxed(), ForkCondition::Timestamp(1732633200)),
(EthereumHardfork::Prague.boxed(), ForkCondition::Timestamp(1744905600)),
(OpHardfork::Isthmus.boxed(), ForkCondition::Timestamp(1744905600)),
// (OpHardfork::Jovian.boxed(), ForkCondition::Timestamp(u64::MAX)), /* TODO: Update
// timestamp when Jovian is planned */
])
});
/// Base mainnet list of hardforks.
pub static BASE_MAINNET_HARDFORKS: LazyLock<ChainHardforks> = LazyLock::new(|| {
ChainHardforks::new(vec![
(EthereumHardfork::Frontier.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Homestead.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Tangerine.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::SpuriousDragon.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Byzantium.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Constantinople.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Petersburg.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Istanbul.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::MuirGlacier.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Berlin.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::London.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::ArrowGlacier.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::GrayGlacier.boxed(), ForkCondition::Block(0)),
(
EthereumHardfork::Paris.boxed(),
ForkCondition::TTD {
activation_block_number: 0,
fork_block: Some(0),
total_difficulty: U256::ZERO,
},
),
(OpHardfork::Bedrock.boxed(), ForkCondition::Block(0)),
(OpHardfork::Regolith.boxed(), ForkCondition::Timestamp(0)),
(EthereumHardfork::Shanghai.boxed(), ForkCondition::Timestamp(1704992401)),
(OpHardfork::Canyon.boxed(), ForkCondition::Timestamp(1704992401)),
(EthereumHardfork::Cancun.boxed(), ForkCondition::Timestamp(1710374401)),
(OpHardfork::Ecotone.boxed(), ForkCondition::Timestamp(1710374401)),
(OpHardfork::Fjord.boxed(), ForkCondition::Timestamp(1720627201)),
(OpHardfork::Granite.boxed(), ForkCondition::Timestamp(1726070401)),
(OpHardfork::Holocene.boxed(), ForkCondition::Timestamp(1736445601)),
(EthereumHardfork::Prague.boxed(), ForkCondition::Timestamp(1746806401)),
(OpHardfork::Isthmus.boxed(), ForkCondition::Timestamp(1746806401)),
// (OpHardfork::Jovian.boxed(), ForkCondition::Timestamp(u64::MAX)), /* TODO: Update
// timestamp when Jovian is planned */
])
});
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/node/src/node.rs | crates/optimism/node/src/node.rs | //! Optimism Node types config.
use crate::{
args::RollupArgs,
engine::OpEngineValidator,
txpool::{OpTransactionPool, OpTransactionValidator},
OpEngineApiBuilder, OpEngineTypes,
};
use op_alloy_consensus::{interop::SafetyLevel, OpPooledTransaction};
use op_alloy_rpc_types_engine::OpExecutionData;
use reth_chainspec::{ChainSpecProvider, EthChainSpec, Hardforks};
use reth_engine_local::LocalPayloadAttributesBuilder;
use reth_evm::ConfigureEvm;
use reth_network::{
types::BasicNetworkPrimitives, NetworkConfig, NetworkHandle, NetworkManager, NetworkPrimitives,
PeersInfo,
};
use reth_node_api::{
AddOnsContext, BuildNextEnv, EngineTypes, FullNodeComponents, HeaderTy, NodeAddOns,
NodePrimitives, PayloadAttributesBuilder, PayloadTypes, PrimitivesTy, TxTy,
};
use reth_node_builder::{
components::{
BasicPayloadServiceBuilder, ComponentsBuilder, ConsensusBuilder, ExecutorBuilder,
NetworkBuilder, PayloadBuilderBuilder, PoolBuilder, PoolBuilderConfigOverrides,
TxPoolBuilder,
},
node::{FullNodeTypes, NodeTypes},
rpc::{
BasicEngineValidatorBuilder, EngineApiBuilder, EngineValidatorAddOn,
EngineValidatorBuilder, EthApiBuilder, Identity, PayloadValidatorBuilder, RethRpcAddOns,
RethRpcMiddleware, RethRpcServerHandles, RpcAddOns, RpcContext, RpcHandle,
},
BuilderContext, DebugNode, Node, NodeAdapter, NodeComponentsBuilder,
};
use reth_optimism_chainspec::{OpChainSpec, OpHardfork};
use reth_optimism_consensus::OpBeaconConsensus;
use reth_optimism_evm::{OpEvmConfig, OpRethReceiptBuilder};
use reth_optimism_forks::OpHardforks;
use reth_optimism_payload_builder::{
builder::OpPayloadTransactions,
config::{OpBuilderConfig, OpDAConfig},
OpAttributes, OpBuiltPayload, OpPayloadPrimitives,
};
use reth_optimism_primitives::{DepositReceipt, OpPrimitives};
use reth_optimism_rpc::{
eth::{ext::OpEthExtApi, OpEthApiBuilder},
historical::{HistoricalRpc, HistoricalRpcClient},
miner::{MinerApiExtServer, OpMinerExtApi},
witness::{DebugExecutionWitnessApiServer, OpDebugWitnessApi},
SequencerClient,
};
use reth_optimism_storage::OpStorage;
use reth_optimism_txpool::{
supervisor::{SupervisorClient, DEFAULT_SUPERVISOR_URL},
OpPooledTx,
};
use reth_provider::{providers::ProviderFactoryBuilder, CanonStateSubscriptions};
use reth_rpc_api::{eth::RpcTypes, DebugApiServer, L2EthApiExtServer};
use reth_rpc_server_types::RethRpcModule;
use reth_tracing::tracing::{debug, info};
use reth_transaction_pool::{
blobstore::DiskFileBlobStore, EthPoolTransaction, PoolPooledTx, PoolTransaction,
TransactionPool, TransactionValidationTaskExecutor,
};
use reth_trie_common::KeccakKeyHasher;
use serde::de::DeserializeOwned;
use std::{marker::PhantomData, sync::Arc};
use url::Url;
/// Marker trait for Optimism node types with standard engine, chain spec, and primitives.
pub trait OpNodeTypes:
NodeTypes<Payload = OpEngineTypes, ChainSpec: OpHardforks + Hardforks, Primitives = OpPrimitives>
{
}
/// Blanket impl for all node types that conform to the Optimism spec.
impl<N> OpNodeTypes for N where
N: NodeTypes<
Payload = OpEngineTypes,
ChainSpec: OpHardforks + Hardforks,
Primitives = OpPrimitives,
>
{
}
/// Helper trait for Optimism node types with full configuration including storage and execution
/// data.
pub trait OpFullNodeTypes:
NodeTypes<
ChainSpec: OpHardforks,
Primitives: OpPayloadPrimitives,
Storage = OpStorage,
Payload: EngineTypes<ExecutionData = OpExecutionData>,
>
{
}
impl<N> OpFullNodeTypes for N where
N: NodeTypes<
ChainSpec: OpHardforks,
Primitives: OpPayloadPrimitives,
Storage = OpStorage,
Payload: EngineTypes<ExecutionData = OpExecutionData>,
>
{
}
/// Type configuration for a regular Optimism node.
#[derive(Debug, Default, Clone)]
#[non_exhaustive]
pub struct OpNode {
/// Additional Optimism args
pub args: RollupArgs,
/// Data availability configuration for the OP builder.
///
/// Used to throttle the size of the data availability payloads (configured by the batcher via
/// the `miner_` api).
///
/// By default no throttling is applied.
pub da_config: OpDAConfig,
}
/// A [`ComponentsBuilder`] with its generic arguments set to a stack of Optimism specific builders.
pub type OpNodeComponentBuilder<Node, Payload = OpPayloadBuilder> = ComponentsBuilder<
Node,
OpPoolBuilder,
BasicPayloadServiceBuilder<Payload>,
OpNetworkBuilder,
OpExecutorBuilder,
OpConsensusBuilder,
>;
impl OpNode {
/// Creates a new instance of the Optimism node type.
pub fn new(args: RollupArgs) -> Self {
Self { args, da_config: OpDAConfig::default() }
}
/// Configure the data availability configuration for the OP builder.
pub fn with_da_config(mut self, da_config: OpDAConfig) -> Self {
self.da_config = da_config;
self
}
/// Returns the components for the given [`RollupArgs`].
pub fn components<Node>(&self) -> OpNodeComponentBuilder<Node>
where
Node: FullNodeTypes<Types: OpNodeTypes>,
{
let RollupArgs { disable_txpool_gossip, compute_pending_block, discovery_v4, .. } =
self.args;
ComponentsBuilder::default()
.node_types::<Node>()
.pool(
OpPoolBuilder::default()
.with_enable_tx_conditional(self.args.enable_tx_conditional)
.with_supervisor(
self.args.supervisor_http.clone(),
self.args.supervisor_safety_level,
),
)
.executor(OpExecutorBuilder::default())
.payload(BasicPayloadServiceBuilder::new(
OpPayloadBuilder::new(compute_pending_block).with_da_config(self.da_config.clone()),
))
.network(OpNetworkBuilder::new(disable_txpool_gossip, !discovery_v4))
.consensus(OpConsensusBuilder::default())
}
/// Returns [`OpAddOnsBuilder`] with configured arguments.
pub fn add_ons_builder<NetworkT: RpcTypes>(&self) -> OpAddOnsBuilder<NetworkT> {
OpAddOnsBuilder::default()
.with_sequencer(self.args.sequencer.clone())
.with_sequencer_headers(self.args.sequencer_headers.clone())
.with_da_config(self.da_config.clone())
.with_enable_tx_conditional(self.args.enable_tx_conditional)
.with_min_suggested_priority_fee(self.args.min_suggested_priority_fee)
.with_historical_rpc(self.args.historical_rpc.clone())
.with_flashblocks(self.args.flashblocks_url.clone())
}
/// Instantiates the [`ProviderFactoryBuilder`] for an opstack node.
///
/// # Open a Providerfactory in read-only mode from a datadir
///
/// See also: [`ProviderFactoryBuilder`] and
/// [`ReadOnlyConfig`](reth_provider::providers::ReadOnlyConfig).
///
/// ```no_run
/// use reth_optimism_chainspec::BASE_MAINNET;
/// use reth_optimism_node::OpNode;
///
/// let factory =
/// OpNode::provider_factory_builder().open_read_only(BASE_MAINNET.clone(), "datadir").unwrap();
/// ```
///
/// # Open a Providerfactory manually with all required components
///
/// ```no_run
/// use reth_db::open_db_read_only;
/// use reth_optimism_chainspec::OpChainSpecBuilder;
/// use reth_optimism_node::OpNode;
/// use reth_provider::providers::StaticFileProvider;
/// use std::sync::Arc;
///
/// let factory = OpNode::provider_factory_builder()
/// .db(Arc::new(open_db_read_only("db", Default::default()).unwrap()))
/// .chainspec(OpChainSpecBuilder::base_mainnet().build().into())
/// .static_file(StaticFileProvider::read_only("db/static_files", false).unwrap())
/// .build_provider_factory();
/// ```
pub fn provider_factory_builder() -> ProviderFactoryBuilder<Self> {
ProviderFactoryBuilder::default()
}
}
impl<N> Node<N> for OpNode
where
N: FullNodeTypes<Types: OpFullNodeTypes + OpNodeTypes>,
{
type ComponentsBuilder = ComponentsBuilder<
N,
OpPoolBuilder,
BasicPayloadServiceBuilder<OpPayloadBuilder>,
OpNetworkBuilder,
OpExecutorBuilder,
OpConsensusBuilder,
>;
type AddOns = OpAddOns<
NodeAdapter<N, <Self::ComponentsBuilder as NodeComponentsBuilder<N>>::Components>,
OpEthApiBuilder,
OpEngineValidatorBuilder,
OpEngineApiBuilder<OpEngineValidatorBuilder>,
BasicEngineValidatorBuilder<OpEngineValidatorBuilder>,
>;
fn components_builder(&self) -> Self::ComponentsBuilder {
Self::components(self)
}
fn add_ons(&self) -> Self::AddOns {
self.add_ons_builder().build()
}
}
impl<N> DebugNode<N> for OpNode
where
N: FullNodeComponents<Types = Self>,
{
type RpcBlock = alloy_rpc_types_eth::Block<op_alloy_consensus::OpTxEnvelope>;
fn rpc_to_primitive_block(rpc_block: Self::RpcBlock) -> reth_node_api::BlockTy<Self> {
rpc_block.into_consensus()
}
fn local_payload_attributes_builder(
chain_spec: &Self::ChainSpec,
) -> impl PayloadAttributesBuilder<<Self::Payload as PayloadTypes>::PayloadAttributes> {
LocalPayloadAttributesBuilder::new(Arc::new(chain_spec.clone()))
}
}
impl NodeTypes for OpNode {
type Primitives = OpPrimitives;
type ChainSpec = OpChainSpec;
type Storage = OpStorage;
type Payload = OpEngineTypes;
}
/// Add-ons w.r.t. optimism.
///
/// This type provides optimism-specific addons to the node and exposes the RPC server and engine
/// API.
#[derive(Debug)]
pub struct OpAddOns<
N: FullNodeComponents,
EthB: EthApiBuilder<N>,
PVB,
EB = OpEngineApiBuilder<PVB>,
EVB = BasicEngineValidatorBuilder<PVB>,
RpcMiddleware = Identity,
> {
/// Rpc add-ons responsible for launching the RPC servers and instantiating the RPC handlers
/// and eth-api.
pub rpc_add_ons: RpcAddOns<N, EthB, PVB, EB, EVB, RpcMiddleware>,
/// Data availability configuration for the OP builder.
pub da_config: OpDAConfig,
/// Sequencer client, configured to forward submitted transactions to sequencer of given OP
/// network.
pub sequencer_url: Option<String>,
/// Headers to use for the sequencer client requests.
pub sequencer_headers: Vec<String>,
/// RPC endpoint for historical data.
///
/// This can be used to forward pre-bedrock rpc requests (op-mainnet).
pub historical_rpc: Option<String>,
/// Enable transaction conditionals.
enable_tx_conditional: bool,
min_suggested_priority_fee: u64,
}
impl<N, EthB, PVB, EB, EVB, RpcMiddleware> OpAddOns<N, EthB, PVB, EB, EVB, RpcMiddleware>
where
N: FullNodeComponents,
EthB: EthApiBuilder<N>,
{
/// Creates a new instance from components.
pub const fn new(
rpc_add_ons: RpcAddOns<N, EthB, PVB, EB, EVB, RpcMiddleware>,
da_config: OpDAConfig,
sequencer_url: Option<String>,
sequencer_headers: Vec<String>,
historical_rpc: Option<String>,
enable_tx_conditional: bool,
min_suggested_priority_fee: u64,
) -> Self {
Self {
rpc_add_ons,
da_config,
sequencer_url,
sequencer_headers,
historical_rpc,
enable_tx_conditional,
min_suggested_priority_fee,
}
}
}
impl<N> Default for OpAddOns<N, OpEthApiBuilder, OpEngineValidatorBuilder>
where
N: FullNodeComponents<Types: OpNodeTypes>,
OpEthApiBuilder: EthApiBuilder<N>,
{
fn default() -> Self {
Self::builder().build()
}
}
impl<N, NetworkT, RpcMiddleware>
OpAddOns<
N,
OpEthApiBuilder<NetworkT>,
OpEngineValidatorBuilder,
OpEngineApiBuilder<OpEngineValidatorBuilder>,
RpcMiddleware,
>
where
N: FullNodeComponents<Types: OpNodeTypes>,
OpEthApiBuilder<NetworkT>: EthApiBuilder<N>,
{
/// Build a [`OpAddOns`] using [`OpAddOnsBuilder`].
pub fn builder() -> OpAddOnsBuilder<NetworkT> {
OpAddOnsBuilder::default()
}
}
impl<N, EthB, PVB, EB, EVB, RpcMiddleware> OpAddOns<N, EthB, PVB, EB, EVB, RpcMiddleware>
where
N: FullNodeComponents,
EthB: EthApiBuilder<N>,
{
/// Maps the [`reth_node_builder::rpc::EngineApiBuilder`] builder type.
pub fn with_engine_api<T>(
self,
engine_api_builder: T,
) -> OpAddOns<N, EthB, PVB, T, EVB, RpcMiddleware> {
let Self {
rpc_add_ons,
da_config,
sequencer_url,
sequencer_headers,
historical_rpc,
enable_tx_conditional,
min_suggested_priority_fee,
..
} = self;
OpAddOns::new(
rpc_add_ons.with_engine_api(engine_api_builder),
da_config,
sequencer_url,
sequencer_headers,
historical_rpc,
enable_tx_conditional,
min_suggested_priority_fee,
)
}
/// Maps the [`PayloadValidatorBuilder`] builder type.
pub fn with_payload_validator<T>(
self,
payload_validator_builder: T,
) -> OpAddOns<N, EthB, T, EB, EVB, RpcMiddleware> {
let Self {
rpc_add_ons,
da_config,
sequencer_url,
sequencer_headers,
enable_tx_conditional,
min_suggested_priority_fee,
historical_rpc,
..
} = self;
OpAddOns::new(
rpc_add_ons.with_payload_validator(payload_validator_builder),
da_config,
sequencer_url,
sequencer_headers,
historical_rpc,
enable_tx_conditional,
min_suggested_priority_fee,
)
}
/// Sets the RPC middleware stack for processing RPC requests.
///
/// This method configures a custom middleware stack that will be applied to all RPC requests
/// across HTTP, `WebSocket`, and IPC transports. The middleware is applied to the RPC service
/// layer, allowing you to intercept, modify, or enhance RPC request processing.
///
/// See also [`RpcAddOns::with_rpc_middleware`].
pub fn with_rpc_middleware<T>(self, rpc_middleware: T) -> OpAddOns<N, EthB, PVB, EB, EVB, T> {
let Self {
rpc_add_ons,
da_config,
sequencer_url,
sequencer_headers,
enable_tx_conditional,
min_suggested_priority_fee,
historical_rpc,
..
} = self;
OpAddOns::new(
rpc_add_ons.with_rpc_middleware(rpc_middleware),
da_config,
sequencer_url,
sequencer_headers,
historical_rpc,
enable_tx_conditional,
min_suggested_priority_fee,
)
}
/// Sets the hook that is run once the rpc server is started.
pub fn on_rpc_started<F>(mut self, hook: F) -> Self
where
F: FnOnce(RpcContext<'_, N, EthB::EthApi>, RethRpcServerHandles) -> eyre::Result<()>
+ Send
+ 'static,
{
self.rpc_add_ons = self.rpc_add_ons.on_rpc_started(hook);
self
}
/// Sets the hook that is run to configure the rpc modules.
pub fn extend_rpc_modules<F>(mut self, hook: F) -> Self
where
F: FnOnce(RpcContext<'_, N, EthB::EthApi>) -> eyre::Result<()> + Send + 'static,
{
self.rpc_add_ons = self.rpc_add_ons.extend_rpc_modules(hook);
self
}
}
impl<N, EthB, PVB, EB, EVB, Attrs, RpcMiddleware> NodeAddOns<N>
for OpAddOns<N, EthB, PVB, EB, EVB, RpcMiddleware>
where
N: FullNodeComponents<
Types: NodeTypes<
ChainSpec: OpHardforks,
Primitives: OpPayloadPrimitives,
Payload: PayloadTypes<PayloadBuilderAttributes = Attrs>,
>,
Evm: ConfigureEvm<
NextBlockEnvCtx: BuildNextEnv<
Attrs,
HeaderTy<N::Types>,
<N::Types as NodeTypes>::ChainSpec,
>,
>,
Pool: TransactionPool<Transaction: OpPooledTx>,
>,
EthB: EthApiBuilder<N>,
PVB: Send,
EB: EngineApiBuilder<N>,
EVB: EngineValidatorBuilder<N>,
RpcMiddleware: RethRpcMiddleware,
Attrs: OpAttributes<Transaction = TxTy<N::Types>, RpcPayloadAttributes: DeserializeOwned>,
{
type Handle = RpcHandle<N, EthB::EthApi>;
async fn launch_add_ons(
self,
ctx: reth_node_api::AddOnsContext<'_, N>,
) -> eyre::Result<Self::Handle> {
let Self {
rpc_add_ons,
da_config,
sequencer_url,
sequencer_headers,
enable_tx_conditional,
historical_rpc,
..
} = self;
let maybe_pre_bedrock_historical_rpc = historical_rpc
.and_then(|historical_rpc| {
ctx.node
.provider()
.chain_spec()
.op_fork_activation(OpHardfork::Bedrock)
.block_number()
.filter(|activation| *activation > 0)
.map(|bedrock_block| (historical_rpc, bedrock_block))
})
.map(|(historical_rpc, bedrock_block)| -> eyre::Result<_> {
info!(target: "reth::cli", %bedrock_block, ?historical_rpc, "Using historical RPC endpoint pre bedrock");
let provider = ctx.node.provider().clone();
let client = HistoricalRpcClient::new(&historical_rpc)?;
let layer = HistoricalRpc::new(provider, client, bedrock_block);
Ok(layer)
})
.transpose()?
;
let rpc_add_ons = rpc_add_ons.option_layer_rpc_middleware(maybe_pre_bedrock_historical_rpc);
let builder = reth_optimism_payload_builder::OpPayloadBuilder::new(
ctx.node.pool().clone(),
ctx.node.provider().clone(),
ctx.node.evm_config().clone(),
);
// install additional OP specific rpc methods
let debug_ext = OpDebugWitnessApi::<_, _, _, Attrs>::new(
ctx.node.provider().clone(),
Box::new(ctx.node.task_executor().clone()),
builder,
);
let miner_ext = OpMinerExtApi::new(da_config);
let sequencer_client = if let Some(url) = sequencer_url {
Some(SequencerClient::new_with_headers(url, sequencer_headers).await?)
} else {
None
};
let tx_conditional_ext: OpEthExtApi<N::Pool, N::Provider> = OpEthExtApi::new(
sequencer_client,
ctx.node.pool().clone(),
ctx.node.provider().clone(),
);
rpc_add_ons
.launch_add_ons_with(ctx, move |container| {
let reth_node_builder::rpc::RpcModuleContainer { modules, auth_module, registry } =
container;
debug!(target: "reth::cli", "Installing debug payload witness rpc endpoint");
modules.merge_if_module_configured(RethRpcModule::Debug, debug_ext.into_rpc())?;
// extend the miner namespace if configured in the regular http server
modules.merge_if_module_configured(
RethRpcModule::Miner,
miner_ext.clone().into_rpc(),
)?;
// install the miner extension in the authenticated if configured
if modules.module_config().contains_any(&RethRpcModule::Miner) {
debug!(target: "reth::cli", "Installing miner DA rpc endpoint");
auth_module.merge_auth_methods(miner_ext.into_rpc())?;
}
// install the debug namespace in the authenticated if configured
if modules.module_config().contains_any(&RethRpcModule::Debug) {
debug!(target: "reth::cli", "Installing debug rpc endpoint");
auth_module.merge_auth_methods(registry.debug_api().into_rpc())?;
}
if enable_tx_conditional {
// extend the eth namespace if configured in the regular http server
modules.merge_if_module_configured(
RethRpcModule::Eth,
tx_conditional_ext.into_rpc(),
)?;
}
Ok(())
})
.await
}
}
impl<N, EthB, PVB, EB, EVB, Attrs, RpcMiddleware> RethRpcAddOns<N>
for OpAddOns<N, EthB, PVB, EB, EVB, RpcMiddleware>
where
N: FullNodeComponents<
Types: NodeTypes<
ChainSpec: OpHardforks,
Primitives: OpPayloadPrimitives,
Payload: PayloadTypes<PayloadBuilderAttributes = Attrs>,
>,
Evm: ConfigureEvm<
NextBlockEnvCtx: BuildNextEnv<
Attrs,
HeaderTy<N::Types>,
<N::Types as NodeTypes>::ChainSpec,
>,
>,
>,
<<N as FullNodeComponents>::Pool as TransactionPool>::Transaction: OpPooledTx,
EthB: EthApiBuilder<N>,
PVB: PayloadValidatorBuilder<N>,
EB: EngineApiBuilder<N>,
EVB: EngineValidatorBuilder<N>,
RpcMiddleware: RethRpcMiddleware,
Attrs: OpAttributes<Transaction = TxTy<N::Types>, RpcPayloadAttributes: DeserializeOwned>,
{
type EthApi = EthB::EthApi;
fn hooks_mut(&mut self) -> &mut reth_node_builder::rpc::RpcHooks<N, Self::EthApi> {
self.rpc_add_ons.hooks_mut()
}
}
impl<N, EthB, PVB, EB, EVB, RpcMiddleware> EngineValidatorAddOn<N>
for OpAddOns<N, EthB, PVB, EB, EVB, RpcMiddleware>
where
N: FullNodeComponents,
EthB: EthApiBuilder<N>,
PVB: Send,
EB: EngineApiBuilder<N>,
EVB: EngineValidatorBuilder<N>,
RpcMiddleware: Send,
{
type ValidatorBuilder = EVB;
fn engine_validator_builder(&self) -> Self::ValidatorBuilder {
EngineValidatorAddOn::engine_validator_builder(&self.rpc_add_ons)
}
}
/// A regular optimism evm and executor builder.
#[derive(Debug, Clone)]
#[non_exhaustive]
pub struct OpAddOnsBuilder<NetworkT, RpcMiddleware = Identity> {
/// Sequencer client, configured to forward submitted transactions to sequencer of given OP
/// network.
sequencer_url: Option<String>,
/// Headers to use for the sequencer client requests.
sequencer_headers: Vec<String>,
/// RPC endpoint for historical data.
historical_rpc: Option<String>,
/// Data availability configuration for the OP builder.
da_config: Option<OpDAConfig>,
/// Enable transaction conditionals.
enable_tx_conditional: bool,
/// Marker for network types.
_nt: PhantomData<NetworkT>,
/// Minimum suggested priority fee (tip)
min_suggested_priority_fee: u64,
/// RPC middleware to use
rpc_middleware: RpcMiddleware,
/// Optional tokio runtime to use for the RPC server.
tokio_runtime: Option<tokio::runtime::Handle>,
/// A URL pointing to a secure websocket service that streams out flashblocks.
flashblocks_url: Option<Url>,
}
impl<NetworkT> Default for OpAddOnsBuilder<NetworkT> {
fn default() -> Self {
Self {
sequencer_url: None,
sequencer_headers: Vec::new(),
historical_rpc: None,
da_config: None,
enable_tx_conditional: false,
min_suggested_priority_fee: 1_000_000,
_nt: PhantomData,
rpc_middleware: Identity::new(),
tokio_runtime: None,
flashblocks_url: None,
}
}
}
impl<NetworkT, RpcMiddleware> OpAddOnsBuilder<NetworkT, RpcMiddleware> {
/// With a [`SequencerClient`].
pub fn with_sequencer(mut self, sequencer_client: Option<String>) -> Self {
self.sequencer_url = sequencer_client;
self
}
/// With headers to use for the sequencer client requests.
pub fn with_sequencer_headers(mut self, sequencer_headers: Vec<String>) -> Self {
self.sequencer_headers = sequencer_headers;
self
}
/// Configure the data availability configuration for the OP builder.
pub fn with_da_config(mut self, da_config: OpDAConfig) -> Self {
self.da_config = Some(da_config);
self
}
/// Configure if transaction conditional should be enabled.
pub const fn with_enable_tx_conditional(mut self, enable_tx_conditional: bool) -> Self {
self.enable_tx_conditional = enable_tx_conditional;
self
}
/// Configure the minimum priority fee (tip)
pub const fn with_min_suggested_priority_fee(mut self, min: u64) -> Self {
self.min_suggested_priority_fee = min;
self
}
/// Configures the endpoint for historical RPC forwarding.
pub fn with_historical_rpc(mut self, historical_rpc: Option<String>) -> Self {
self.historical_rpc = historical_rpc;
self
}
/// Configures a custom tokio runtime for the RPC server.
///
/// Caution: This runtime must not be created from within asynchronous context.
pub fn with_tokio_runtime(mut self, tokio_runtime: Option<tokio::runtime::Handle>) -> Self {
self.tokio_runtime = tokio_runtime;
self
}
/// Configure the RPC middleware to use
pub fn with_rpc_middleware<T>(self, rpc_middleware: T) -> OpAddOnsBuilder<NetworkT, T> {
let Self {
sequencer_url,
sequencer_headers,
historical_rpc,
da_config,
enable_tx_conditional,
min_suggested_priority_fee,
tokio_runtime,
_nt,
flashblocks_url,
..
} = self;
OpAddOnsBuilder {
sequencer_url,
sequencer_headers,
historical_rpc,
da_config,
enable_tx_conditional,
min_suggested_priority_fee,
_nt,
rpc_middleware,
tokio_runtime,
flashblocks_url,
}
}
/// With a URL pointing to a flashblocks secure websocket subscription.
pub fn with_flashblocks(mut self, flashblocks_url: Option<Url>) -> Self {
self.flashblocks_url = flashblocks_url;
self
}
}
impl<NetworkT, RpcMiddleware> OpAddOnsBuilder<NetworkT, RpcMiddleware> {
/// Builds an instance of [`OpAddOns`].
pub fn build<N, PVB, EB, EVB>(
self,
) -> OpAddOns<N, OpEthApiBuilder<NetworkT>, PVB, EB, EVB, RpcMiddleware>
where
N: FullNodeComponents<Types: NodeTypes>,
OpEthApiBuilder<NetworkT>: EthApiBuilder<N>,
PVB: PayloadValidatorBuilder<N> + Default,
EB: Default,
EVB: Default,
{
let Self {
sequencer_url,
sequencer_headers,
da_config,
enable_tx_conditional,
min_suggested_priority_fee,
historical_rpc,
rpc_middleware,
tokio_runtime,
flashblocks_url,
..
} = self;
OpAddOns::new(
RpcAddOns::new(
OpEthApiBuilder::default()
.with_sequencer(sequencer_url.clone())
.with_sequencer_headers(sequencer_headers.clone())
.with_min_suggested_priority_fee(min_suggested_priority_fee)
.with_flashblocks(flashblocks_url),
PVB::default(),
EB::default(),
EVB::default(),
rpc_middleware,
)
.with_tokio_runtime(tokio_runtime),
da_config.unwrap_or_default(),
sequencer_url,
sequencer_headers,
historical_rpc,
enable_tx_conditional,
min_suggested_priority_fee,
)
}
}
/// A regular optimism evm and executor builder.
#[derive(Debug, Copy, Clone, Default)]
#[non_exhaustive]
pub struct OpExecutorBuilder;
impl<Node> ExecutorBuilder<Node> for OpExecutorBuilder
where
Node: FullNodeTypes<Types: NodeTypes<ChainSpec: OpHardforks, Primitives = OpPrimitives>>,
{
type EVM =
OpEvmConfig<<Node::Types as NodeTypes>::ChainSpec, <Node::Types as NodeTypes>::Primitives>;
async fn build_evm(self, ctx: &BuilderContext<Node>) -> eyre::Result<Self::EVM> {
let evm_config = OpEvmConfig::new(ctx.chain_spec(), OpRethReceiptBuilder::default());
Ok(evm_config)
}
}
/// A basic optimism transaction pool.
///
/// This contains various settings that can be configured and take precedence over the node's
/// config.
#[derive(Debug)]
pub struct OpPoolBuilder<T = crate::txpool::OpPooledTransaction> {
/// Enforced overrides that are applied to the pool config.
pub pool_config_overrides: PoolBuilderConfigOverrides,
/// Enable transaction conditionals.
pub enable_tx_conditional: bool,
/// Supervisor client url
pub supervisor_http: String,
/// Supervisor safety level
pub supervisor_safety_level: SafetyLevel,
/// Marker for the pooled transaction type.
_pd: core::marker::PhantomData<T>,
}
impl<T> Default for OpPoolBuilder<T> {
fn default() -> Self {
Self {
pool_config_overrides: Default::default(),
enable_tx_conditional: false,
supervisor_http: DEFAULT_SUPERVISOR_URL.to_string(),
supervisor_safety_level: SafetyLevel::CrossUnsafe,
_pd: Default::default(),
}
}
}
impl<T> Clone for OpPoolBuilder<T> {
fn clone(&self) -> Self {
Self {
pool_config_overrides: self.pool_config_overrides.clone(),
enable_tx_conditional: self.enable_tx_conditional,
supervisor_http: self.supervisor_http.clone(),
supervisor_safety_level: self.supervisor_safety_level,
_pd: core::marker::PhantomData,
}
}
}
impl<T> OpPoolBuilder<T> {
/// Sets the `enable_tx_conditional` flag on the pool builder.
pub const fn with_enable_tx_conditional(mut self, enable_tx_conditional: bool) -> Self {
self.enable_tx_conditional = enable_tx_conditional;
self
}
/// Sets the [`PoolBuilderConfigOverrides`] on the pool builder.
pub fn with_pool_config_overrides(
mut self,
pool_config_overrides: PoolBuilderConfigOverrides,
) -> Self {
self.pool_config_overrides = pool_config_overrides;
self
}
/// Sets the supervisor client
pub fn with_supervisor(
mut self,
supervisor_client: String,
supervisor_safety_level: SafetyLevel,
) -> Self {
self.supervisor_http = supervisor_client;
self.supervisor_safety_level = supervisor_safety_level;
self
}
}
impl<Node, T> PoolBuilder<Node> for OpPoolBuilder<T>
where
Node: FullNodeTypes<Types: NodeTypes<ChainSpec: OpHardforks>>,
T: EthPoolTransaction<Consensus = TxTy<Node::Types>> + OpPooledTx,
{
type Pool = OpTransactionPool<Node::Provider, DiskFileBlobStore, T>;
async fn build_pool(self, ctx: &BuilderContext<Node>) -> eyre::Result<Self::Pool> {
let Self { pool_config_overrides, .. } = self;
// supervisor used for interop
if ctx.chain_spec().is_interop_active_at_timestamp(ctx.head().timestamp) &&
self.supervisor_http == DEFAULT_SUPERVISOR_URL
{
info!(target: "reth::cli",
url=%DEFAULT_SUPERVISOR_URL,
"Default supervisor url is used, consider changing --rollup.supervisor-http."
);
}
let supervisor_client = SupervisorClient::builder(self.supervisor_http.clone())
.minimum_safety(self.supervisor_safety_level)
.build()
.await;
let blob_store = reth_node_builder::components::create_blob_store(ctx)?;
let validator = TransactionValidationTaskExecutor::eth_builder(ctx.provider().clone())
.no_eip4844()
.with_head_timestamp(ctx.head().timestamp)
.with_max_tx_input_bytes(ctx.config().txpool.max_tx_input_bytes)
.kzg_settings(ctx.kzg_settings()?)
.set_tx_fee_cap(ctx.config().rpc.rpc_tx_fee_cap)
.with_max_tx_gas_limit(ctx.config().txpool.max_tx_gas_limit)
.with_minimum_priority_fee(ctx.config().txpool.minimum_priority_fee)
.with_additional_tasks(
pool_config_overrides
.additional_validation_tasks
.unwrap_or_else(|| ctx.config().txpool.additional_validation_tasks),
)
.build_with_tasks(ctx.task_executor().clone(), blob_store.clone())
.map(|validator| {
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | true |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/node/src/engine.rs | crates/optimism/node/src/engine.rs | use alloy_consensus::BlockHeader;
use alloy_primitives::B256;
use alloy_rpc_types_engine::{ExecutionPayloadEnvelopeV2, ExecutionPayloadV1};
use op_alloy_rpc_types_engine::{
OpExecutionData, OpExecutionPayloadEnvelopeV3, OpExecutionPayloadEnvelopeV4,
OpPayloadAttributes,
};
use reth_consensus::ConsensusError;
use reth_node_api::{
payload::{
validate_parent_beacon_block_root_presence, EngineApiMessageVersion,
EngineObjectValidationError, MessageValidationKind, NewPayloadError, PayloadOrAttributes,
PayloadTypes, VersionSpecificValidationError,
},
validate_version_specific_fields, BuiltPayload, EngineApiValidator, EngineTypes,
NodePrimitives, PayloadValidator,
};
use reth_optimism_consensus::isthmus;
use reth_optimism_forks::OpHardforks;
use reth_optimism_payload_builder::{OpExecutionPayloadValidator, OpPayloadTypes};
use reth_optimism_primitives::{OpBlock, ADDRESS_L2_TO_L1_MESSAGE_PASSER};
use reth_primitives_traits::{Block, RecoveredBlock, SealedBlock, SignedTransaction};
use reth_provider::StateProviderFactory;
use reth_trie_common::{HashedPostState, KeyHasher};
use std::{marker::PhantomData, sync::Arc};
/// The types used in the optimism beacon consensus engine.
#[derive(Debug, Default, Clone, serde::Deserialize, serde::Serialize)]
#[non_exhaustive]
pub struct OpEngineTypes<T: PayloadTypes = OpPayloadTypes> {
_marker: PhantomData<T>,
}
impl<T: PayloadTypes<ExecutionData = OpExecutionData>> PayloadTypes for OpEngineTypes<T> {
type ExecutionData = T::ExecutionData;
type BuiltPayload = T::BuiltPayload;
type PayloadAttributes = T::PayloadAttributes;
type PayloadBuilderAttributes = T::PayloadBuilderAttributes;
fn block_to_payload(
block: SealedBlock<
<<Self::BuiltPayload as BuiltPayload>::Primitives as NodePrimitives>::Block,
>,
) -> <T as PayloadTypes>::ExecutionData {
OpExecutionData::from_block_unchecked(
block.hash(),
&block.into_block().into_ethereum_block(),
)
}
}
impl<T: PayloadTypes<ExecutionData = OpExecutionData>> EngineTypes for OpEngineTypes<T>
where
T::BuiltPayload: BuiltPayload<Primitives: NodePrimitives<Block = OpBlock>>
+ TryInto<ExecutionPayloadV1>
+ TryInto<ExecutionPayloadEnvelopeV2>
+ TryInto<OpExecutionPayloadEnvelopeV3>
+ TryInto<OpExecutionPayloadEnvelopeV4>,
{
type ExecutionPayloadEnvelopeV1 = ExecutionPayloadV1;
type ExecutionPayloadEnvelopeV2 = ExecutionPayloadEnvelopeV2;
type ExecutionPayloadEnvelopeV3 = OpExecutionPayloadEnvelopeV3;
type ExecutionPayloadEnvelopeV4 = OpExecutionPayloadEnvelopeV4;
type ExecutionPayloadEnvelopeV5 = OpExecutionPayloadEnvelopeV4;
}
/// Validator for Optimism engine API.
#[derive(Debug)]
pub struct OpEngineValidator<P, Tx, ChainSpec> {
inner: OpExecutionPayloadValidator<ChainSpec>,
provider: P,
hashed_addr_l2tol1_msg_passer: B256,
phantom: PhantomData<Tx>,
}
impl<P, Tx, ChainSpec> OpEngineValidator<P, Tx, ChainSpec> {
/// Instantiates a new validator.
pub fn new<KH: KeyHasher>(chain_spec: Arc<ChainSpec>, provider: P) -> Self {
let hashed_addr_l2tol1_msg_passer = KH::hash_key(ADDRESS_L2_TO_L1_MESSAGE_PASSER);
Self {
inner: OpExecutionPayloadValidator::new(chain_spec),
provider,
hashed_addr_l2tol1_msg_passer,
phantom: PhantomData,
}
}
}
impl<P, Tx, ChainSpec> Clone for OpEngineValidator<P, Tx, ChainSpec>
where
P: Clone,
ChainSpec: OpHardforks,
{
fn clone(&self) -> Self {
Self {
inner: OpExecutionPayloadValidator::new(self.inner.clone()),
provider: self.provider.clone(),
hashed_addr_l2tol1_msg_passer: self.hashed_addr_l2tol1_msg_passer,
phantom: Default::default(),
}
}
}
impl<P, Tx, ChainSpec> OpEngineValidator<P, Tx, ChainSpec>
where
ChainSpec: OpHardforks,
{
/// Returns the chain spec used by the validator.
#[inline]
pub fn chain_spec(&self) -> &ChainSpec {
self.inner.chain_spec()
}
}
impl<P, Tx, ChainSpec, Types> PayloadValidator<Types> for OpEngineValidator<P, Tx, ChainSpec>
where
P: StateProviderFactory + Unpin + 'static,
Tx: SignedTransaction + Unpin + 'static,
ChainSpec: OpHardforks + Send + Sync + 'static,
Types: PayloadTypes<ExecutionData = OpExecutionData>,
{
type Block = alloy_consensus::Block<Tx>;
fn ensure_well_formed_payload(
&self,
payload: OpExecutionData,
) -> Result<RecoveredBlock<Self::Block>, NewPayloadError> {
let sealed_block =
self.inner.ensure_well_formed_payload(payload).map_err(NewPayloadError::other)?;
sealed_block.try_recover().map_err(|e| NewPayloadError::Other(e.into()))
}
fn validate_block_post_execution_with_hashed_state(
&self,
state_updates: &HashedPostState,
block: &RecoveredBlock<Self::Block>,
) -> Result<(), ConsensusError> {
if self.chain_spec().is_isthmus_active_at_timestamp(block.timestamp()) {
let Ok(state) = self.provider.state_by_block_hash(block.parent_hash()) else {
// FIXME: we don't necessarily have access to the parent block here because the
// parent block isn't necessarily part of the canonical chain yet. Instead this
// function should receive the list of in memory blocks as input
return Ok(())
};
let predeploy_storage_updates = state_updates
.storages
.get(&self.hashed_addr_l2tol1_msg_passer)
.cloned()
.unwrap_or_default();
isthmus::verify_withdrawals_root_prehashed(
predeploy_storage_updates,
state,
block.header(),
)
.map_err(|err| {
ConsensusError::Other(format!("failed to verify block post-execution: {err}"))
})?
}
Ok(())
}
}
impl<Types, P, Tx, ChainSpec> EngineApiValidator<Types> for OpEngineValidator<P, Tx, ChainSpec>
where
Types: PayloadTypes<
PayloadAttributes = OpPayloadAttributes,
ExecutionData = OpExecutionData,
BuiltPayload: BuiltPayload<Primitives: NodePrimitives<SignedTx = Tx>>,
>,
P: StateProviderFactory + Unpin + 'static,
Tx: SignedTransaction + Unpin + 'static,
ChainSpec: OpHardforks + Send + Sync + 'static,
{
fn validate_version_specific_fields(
&self,
version: EngineApiMessageVersion,
payload_or_attrs: PayloadOrAttributes<
'_,
Types::ExecutionData,
<Types as PayloadTypes>::PayloadAttributes,
>,
) -> Result<(), EngineObjectValidationError> {
validate_withdrawals_presence(
self.chain_spec(),
version,
payload_or_attrs.message_validation_kind(),
payload_or_attrs.timestamp(),
payload_or_attrs.withdrawals().is_some(),
)?;
validate_parent_beacon_block_root_presence(
self.chain_spec(),
version,
payload_or_attrs.message_validation_kind(),
payload_or_attrs.timestamp(),
payload_or_attrs.parent_beacon_block_root().is_some(),
)
}
fn ensure_well_formed_attributes(
&self,
version: EngineApiMessageVersion,
attributes: &<Types as PayloadTypes>::PayloadAttributes,
) -> Result<(), EngineObjectValidationError> {
validate_version_specific_fields(
self.chain_spec(),
version,
PayloadOrAttributes::<OpExecutionData, OpPayloadAttributes>::PayloadAttributes(
attributes,
),
)?;
if attributes.gas_limit.is_none() {
return Err(EngineObjectValidationError::InvalidParams(
"MissingGasLimitInPayloadAttributes".to_string().into(),
));
}
if self
.chain_spec()
.is_holocene_active_at_timestamp(attributes.payload_attributes.timestamp)
{
let (elasticity, denominator) =
attributes.decode_eip_1559_params().ok_or_else(|| {
EngineObjectValidationError::InvalidParams(
"MissingEip1559ParamsInPayloadAttributes".to_string().into(),
)
})?;
if elasticity != 0 && denominator == 0 {
return Err(EngineObjectValidationError::InvalidParams(
"Eip1559ParamsDenominatorZero".to_string().into(),
));
}
}
Ok(())
}
}
/// Validates the presence of the `withdrawals` field according to the payload timestamp.
///
/// After Canyon, withdrawals field must be [Some].
/// Before Canyon, withdrawals field must be [None];
///
/// Canyon activates the Shanghai EIPs, see the Canyon specs for more details:
/// <https://github.com/ethereum-optimism/optimism/blob/ab926c5fd1e55b5c864341c44842d6d1ca679d99/specs/superchain-upgrades.md#canyon>
pub fn validate_withdrawals_presence(
chain_spec: impl OpHardforks,
version: EngineApiMessageVersion,
message_validation_kind: MessageValidationKind,
timestamp: u64,
has_withdrawals: bool,
) -> Result<(), EngineObjectValidationError> {
let is_shanghai = chain_spec.is_canyon_active_at_timestamp(timestamp);
match version {
EngineApiMessageVersion::V1 => {
if has_withdrawals {
return Err(message_validation_kind
.to_error(VersionSpecificValidationError::WithdrawalsNotSupportedInV1));
}
if is_shanghai {
return Err(message_validation_kind
.to_error(VersionSpecificValidationError::NoWithdrawalsPostShanghai));
}
}
EngineApiMessageVersion::V2 |
EngineApiMessageVersion::V3 |
EngineApiMessageVersion::V4 |
EngineApiMessageVersion::V5 => {
if is_shanghai && !has_withdrawals {
return Err(message_validation_kind
.to_error(VersionSpecificValidationError::NoWithdrawalsPostShanghai));
}
if !is_shanghai && has_withdrawals {
return Err(message_validation_kind
.to_error(VersionSpecificValidationError::HasWithdrawalsPreShanghai));
}
}
};
Ok(())
}
#[cfg(test)]
mod test {
use super::*;
use crate::engine;
use alloy_primitives::{b64, Address, B256, B64};
use alloy_rpc_types_engine::PayloadAttributes;
use reth_chainspec::ChainSpec;
use reth_optimism_chainspec::{OpChainSpec, BASE_SEPOLIA};
use reth_provider::noop::NoopProvider;
use reth_trie_common::KeccakKeyHasher;
fn get_chainspec() -> Arc<OpChainSpec> {
Arc::new(OpChainSpec {
inner: ChainSpec {
chain: BASE_SEPOLIA.inner.chain,
genesis: BASE_SEPOLIA.inner.genesis.clone(),
genesis_header: BASE_SEPOLIA.inner.genesis_header.clone(),
paris_block_and_final_difficulty: BASE_SEPOLIA
.inner
.paris_block_and_final_difficulty,
hardforks: BASE_SEPOLIA.inner.hardforks.clone(),
base_fee_params: BASE_SEPOLIA.inner.base_fee_params.clone(),
prune_delete_limit: 10000,
..Default::default()
},
})
}
const fn get_attributes(eip_1559_params: Option<B64>, timestamp: u64) -> OpPayloadAttributes {
OpPayloadAttributes {
gas_limit: Some(1000),
eip_1559_params,
transactions: None,
no_tx_pool: None,
payload_attributes: PayloadAttributes {
timestamp,
prev_randao: B256::ZERO,
suggested_fee_recipient: Address::ZERO,
withdrawals: Some(vec![]),
parent_beacon_block_root: Some(B256::ZERO),
},
}
}
#[test]
fn test_well_formed_attributes_pre_holocene() {
let validator =
OpEngineValidator::new::<KeccakKeyHasher>(get_chainspec(), NoopProvider::default());
let attributes = get_attributes(None, 1732633199);
let result = <engine::OpEngineValidator<_, _, _> as EngineApiValidator<
OpEngineTypes,
>>::ensure_well_formed_attributes(
&validator, EngineApiMessageVersion::V3, &attributes,
);
assert!(result.is_ok());
}
#[test]
fn test_well_formed_attributes_holocene_no_eip1559_params() {
let validator =
OpEngineValidator::new::<KeccakKeyHasher>(get_chainspec(), NoopProvider::default());
let attributes = get_attributes(None, 1732633200);
let result = <engine::OpEngineValidator<_, _, _> as EngineApiValidator<
OpEngineTypes,
>>::ensure_well_formed_attributes(
&validator, EngineApiMessageVersion::V3, &attributes,
);
assert!(matches!(result, Err(EngineObjectValidationError::InvalidParams(_))));
}
#[test]
fn test_well_formed_attributes_holocene_eip1559_params_zero_denominator() {
let validator =
OpEngineValidator::new::<KeccakKeyHasher>(get_chainspec(), NoopProvider::default());
let attributes = get_attributes(Some(b64!("0000000000000008")), 1732633200);
let result = <engine::OpEngineValidator<_, _, _> as EngineApiValidator<
OpEngineTypes,
>>::ensure_well_formed_attributes(
&validator, EngineApiMessageVersion::V3, &attributes,
);
assert!(matches!(result, Err(EngineObjectValidationError::InvalidParams(_))));
}
#[test]
fn test_well_formed_attributes_holocene_valid() {
let validator =
OpEngineValidator::new::<KeccakKeyHasher>(get_chainspec(), NoopProvider::default());
let attributes = get_attributes(Some(b64!("0000000800000008")), 1732633200);
let result = <engine::OpEngineValidator<_, _, _> as EngineApiValidator<
OpEngineTypes,
>>::ensure_well_formed_attributes(
&validator, EngineApiMessageVersion::V3, &attributes,
);
assert!(result.is_ok());
}
#[test]
fn test_well_formed_attributes_holocene_valid_all_zero() {
let validator =
OpEngineValidator::new::<KeccakKeyHasher>(get_chainspec(), NoopProvider::default());
let attributes = get_attributes(Some(b64!("0000000000000000")), 1732633200);
let result = <engine::OpEngineValidator<_, _, _> as EngineApiValidator<
OpEngineTypes,
>>::ensure_well_formed_attributes(
&validator, EngineApiMessageVersion::V3, &attributes,
);
assert!(result.is_ok());
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/node/src/lib.rs | crates/optimism/node/src/lib.rs | //! Standalone crate for Optimism-specific Reth configuration and builder types.
//!
//! # features
//! - `js-tracer`: Enable the `JavaScript` tracer for the `debug_trace` endpoints
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
/// CLI argument parsing for the optimism node.
pub mod args;
/// Exports optimism-specific implementations of the [`EngineTypes`](reth_node_api::EngineTypes)
/// trait.
pub mod engine;
pub use engine::OpEngineTypes;
pub mod node;
pub use node::*;
pub mod rpc;
pub use rpc::OpEngineApiBuilder;
pub mod version;
pub use version::OP_NAME_CLIENT;
pub use reth_optimism_txpool as txpool;
/// Helpers for running test node instances.
#[cfg(feature = "test-utils")]
pub mod utils;
pub use reth_optimism_payload_builder::{
self as payload, config::OpDAConfig, OpBuiltPayload, OpPayloadAttributes, OpPayloadBuilder,
OpPayloadBuilderAttributes, OpPayloadPrimitives, OpPayloadTypes,
};
pub use reth_optimism_evm::*;
pub use reth_optimism_storage::OpStorage;
use op_revm as _;
use revm as _;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/node/src/version.rs | crates/optimism/node/src/version.rs | //! Version information for op-reth.
/// The human readable name of the client
pub const OP_NAME_CLIENT: &str = "OP-Reth";
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/node/src/args.rs | crates/optimism/node/src/args.rs | //! Additional Node command arguments.
//! clap [Args](clap::Args) for optimism rollup configuration
use op_alloy_consensus::interop::SafetyLevel;
use reth_optimism_txpool::supervisor::DEFAULT_SUPERVISOR_URL;
use url::Url;
/// Parameters for rollup configuration
#[derive(Debug, Clone, PartialEq, Eq, clap::Args)]
#[command(next_help_heading = "Rollup")]
pub struct RollupArgs {
/// Endpoint for the sequencer mempool (can be both HTTP and WS)
#[arg(long = "rollup.sequencer", visible_aliases = ["rollup.sequencer-http", "rollup.sequencer-ws"])]
pub sequencer: Option<String>,
/// Disable transaction pool gossip
#[arg(long = "rollup.disable-tx-pool-gossip")]
pub disable_txpool_gossip: bool,
/// By default the pending block equals the latest block
/// to save resources and not leak txs from the tx-pool,
/// this flag enables computing of the pending block
/// from the tx-pool instead.
///
/// If `compute_pending_block` is not enabled, the payload builder
/// will use the payload attributes from the latest block. Note
/// that this flag is not yet functional.
#[arg(long = "rollup.compute-pending-block")]
pub compute_pending_block: bool,
/// enables discovery v4 if provided
#[arg(long = "rollup.discovery.v4", default_value = "false")]
pub discovery_v4: bool,
/// Enable transaction conditional support on sequencer
#[arg(long = "rollup.enable-tx-conditional", default_value = "false")]
pub enable_tx_conditional: bool,
/// HTTP endpoint for the supervisor
#[arg(
long = "rollup.supervisor-http",
value_name = "SUPERVISOR_HTTP_URL",
default_value = DEFAULT_SUPERVISOR_URL
)]
pub supervisor_http: String,
/// Safety level for the supervisor
#[arg(
long = "rollup.supervisor-safety-level",
default_value_t = SafetyLevel::CrossUnsafe,
)]
pub supervisor_safety_level: SafetyLevel,
/// Optional headers to use when connecting to the sequencer.
#[arg(long = "rollup.sequencer-headers", requires = "sequencer")]
pub sequencer_headers: Vec<String>,
/// RPC endpoint for historical data.
#[arg(
long = "rollup.historicalrpc",
alias = "rollup.historical-rpc",
value_name = "HISTORICAL_HTTP_URL"
)]
pub historical_rpc: Option<String>,
/// Minimum suggested priority fee (tip) in wei, default `1_000_000`
#[arg(long, default_value_t = 1_000_000)]
pub min_suggested_priority_fee: u64,
/// A URL pointing to a secure websocket subscription that streams out flashblocks.
///
/// If given, the flashblocks are received to build pending block. All request with "pending"
/// block tag will use the pending state based on flashblocks.
#[arg(long)]
pub flashblocks_url: Option<Url>,
}
impl Default for RollupArgs {
fn default() -> Self {
Self {
sequencer: None,
disable_txpool_gossip: false,
compute_pending_block: false,
discovery_v4: false,
enable_tx_conditional: false,
supervisor_http: DEFAULT_SUPERVISOR_URL.to_string(),
supervisor_safety_level: SafetyLevel::CrossUnsafe,
sequencer_headers: Vec::new(),
historical_rpc: None,
min_suggested_priority_fee: 1_000_000,
flashblocks_url: None,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use clap::{Args, Parser};
/// A helper type to parse Args more easily
#[derive(Parser)]
struct CommandParser<T: Args> {
#[command(flatten)]
args: T,
}
#[test]
fn test_parse_optimism_default_args() {
let default_args = RollupArgs::default();
let args = CommandParser::<RollupArgs>::parse_from(["reth"]).args;
assert_eq!(args, default_args);
}
#[test]
fn test_parse_optimism_compute_pending_block_args() {
let expected_args = RollupArgs { compute_pending_block: true, ..Default::default() };
let args =
CommandParser::<RollupArgs>::parse_from(["reth", "--rollup.compute-pending-block"])
.args;
assert_eq!(args, expected_args);
}
#[test]
fn test_parse_optimism_discovery_v4_args() {
let expected_args = RollupArgs { discovery_v4: true, ..Default::default() };
let args = CommandParser::<RollupArgs>::parse_from(["reth", "--rollup.discovery.v4"]).args;
assert_eq!(args, expected_args);
}
#[test]
fn test_parse_optimism_sequencer_http_args() {
let expected_args =
RollupArgs { sequencer: Some("http://host:port".into()), ..Default::default() };
let args = CommandParser::<RollupArgs>::parse_from([
"reth",
"--rollup.sequencer-http",
"http://host:port",
])
.args;
assert_eq!(args, expected_args);
}
#[test]
fn test_parse_optimism_disable_txpool_args() {
let expected_args = RollupArgs { disable_txpool_gossip: true, ..Default::default() };
let args =
CommandParser::<RollupArgs>::parse_from(["reth", "--rollup.disable-tx-pool-gossip"])
.args;
assert_eq!(args, expected_args);
}
#[test]
fn test_parse_optimism_enable_tx_conditional() {
let expected_args = RollupArgs { enable_tx_conditional: true, ..Default::default() };
let args =
CommandParser::<RollupArgs>::parse_from(["reth", "--rollup.enable-tx-conditional"])
.args;
assert_eq!(args, expected_args);
}
#[test]
fn test_parse_optimism_many_args() {
let expected_args = RollupArgs {
disable_txpool_gossip: true,
compute_pending_block: true,
enable_tx_conditional: true,
sequencer: Some("http://host:port".into()),
..Default::default()
};
let args = CommandParser::<RollupArgs>::parse_from([
"reth",
"--rollup.disable-tx-pool-gossip",
"--rollup.compute-pending-block",
"--rollup.enable-tx-conditional",
"--rollup.sequencer-http",
"http://host:port",
])
.args;
assert_eq!(args, expected_args);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/node/src/utils.rs | crates/optimism/node/src/utils.rs | use crate::{OpBuiltPayload, OpNode as OtherOpNode, OpPayloadBuilderAttributes};
use alloy_genesis::Genesis;
use alloy_primitives::{Address, B256};
use alloy_rpc_types_engine::PayloadAttributes;
use reth_e2e_test_utils::{
transaction::TransactionTestContext, wallet::Wallet, NodeHelperType, TmpDB,
};
use reth_node_api::NodeTypesWithDBAdapter;
use reth_optimism_chainspec::OpChainSpecBuilder;
use reth_payload_builder::EthPayloadBuilderAttributes;
use reth_provider::providers::BlockchainProvider;
use reth_tasks::TaskManager;
use std::sync::Arc;
use tokio::sync::Mutex;
/// Optimism Node Helper type
pub(crate) type OpNode =
NodeHelperType<OtherOpNode, BlockchainProvider<NodeTypesWithDBAdapter<OtherOpNode, TmpDB>>>;
/// Creates the initial setup with `num_nodes` of the node config, started and connected.
pub async fn setup(num_nodes: usize) -> eyre::Result<(Vec<OpNode>, TaskManager, Wallet)> {
let genesis: Genesis =
serde_json::from_str(include_str!("../tests/assets/genesis.json")).unwrap();
reth_e2e_test_utils::setup_engine(
num_nodes,
Arc::new(OpChainSpecBuilder::base_mainnet().genesis(genesis).ecotone_activated().build()),
false,
Default::default(),
optimism_payload_attributes,
)
.await
}
/// Advance the chain with sequential payloads returning them in the end.
pub async fn advance_chain(
length: usize,
node: &mut OpNode,
wallet: Arc<Mutex<Wallet>>,
) -> eyre::Result<Vec<OpBuiltPayload>> {
node.advance(length as u64, |_| {
let wallet = wallet.clone();
Box::pin(async move {
let mut wallet = wallet.lock().await;
let tx_fut = TransactionTestContext::optimism_l1_block_info_tx(
wallet.chain_id,
wallet.inner.clone(),
wallet.inner_nonce,
);
wallet.inner_nonce += 1;
tx_fut.await
})
})
.await
}
/// Helper function to create a new eth payload attributes
pub fn optimism_payload_attributes<T>(timestamp: u64) -> OpPayloadBuilderAttributes<T> {
let attributes = PayloadAttributes {
timestamp,
prev_randao: B256::ZERO,
suggested_fee_recipient: Address::ZERO,
withdrawals: Some(vec![]),
parent_beacon_block_root: Some(B256::ZERO),
};
OpPayloadBuilderAttributes {
payload_attributes: EthPayloadBuilderAttributes::new(B256::ZERO, attributes),
transactions: vec![],
no_tx_pool: false,
gas_limit: Some(30_000_000),
eip_1559_params: None,
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/node/src/rpc.rs | crates/optimism/node/src/rpc.rs | //! RPC component builder
//!
//! # Example
//!
//! Builds offline `TraceApi` with only EVM and database. This can be useful
//! for example when downloading a state snapshot (pre-synced node) from some mirror.
//!
//! ```rust
//! use alloy_rpc_types_eth::BlockId;
//! use op_alloy_network::Optimism;
//! use reth_db::test_utils::create_test_rw_db_with_path;
//! use reth_node_builder::{
//! components::ComponentsBuilder,
//! hooks::OnComponentInitializedHook,
//! rpc::{EthApiBuilder, EthApiCtx},
//! LaunchContext, NodeConfig, RethFullAdapter,
//! };
//! use reth_optimism_chainspec::OP_SEPOLIA;
//! use reth_optimism_evm::OpEvmConfig;
//! use reth_optimism_node::{OpExecutorBuilder, OpNetworkPrimitives, OpNode};
//! use reth_optimism_rpc::OpEthApiBuilder;
//! use reth_optimism_txpool::OpPooledTransaction;
//! use reth_provider::providers::BlockchainProvider;
//! use reth_rpc::TraceApi;
//! use reth_rpc_eth_types::{EthConfig, EthStateCache};
//! use reth_tasks::{pool::BlockingTaskGuard, TaskManager};
//! use std::sync::Arc;
//!
//! #[tokio::main]
//! async fn main() {
//! // build core node with all components disabled except EVM and state
//! let sepolia = NodeConfig::new(OP_SEPOLIA.clone());
//! let db = create_test_rw_db_with_path(sepolia.datadir());
//! let tasks = TaskManager::current();
//! let launch_ctx = LaunchContext::new(tasks.executor(), sepolia.datadir());
//! let node = launch_ctx
//! .with_loaded_toml_config(sepolia)
//! .unwrap()
//! .attach(Arc::new(db))
//! .with_provider_factory::<_, OpEvmConfig>()
//! .await
//! .unwrap()
//! .with_genesis()
//! .unwrap()
//! .with_metrics_task() // todo: shouldn't be req to set up blockchain db
//! .with_blockchain_db::<RethFullAdapter<_, OpNode>, _>(move |provider_factory| {
//! Ok(BlockchainProvider::new(provider_factory).unwrap())
//! })
//! .unwrap()
//! .with_components(
//! ComponentsBuilder::default()
//! .node_types::<RethFullAdapter<_, OpNode>>()
//! .noop_pool::<OpPooledTransaction>()
//! .noop_network::<OpNetworkPrimitives>()
//! .noop_consensus()
//! .executor(OpExecutorBuilder::default())
//! .noop_payload(),
//! Box::new(()) as Box<dyn OnComponentInitializedHook<_>>,
//! )
//! .await
//! .unwrap();
//!
//! // build `eth` namespace API
//! let config = EthConfig::default();
//! let cache = EthStateCache::spawn_with(
//! node.provider_factory().clone(),
//! config.cache,
//! node.task_executor().clone(),
//! );
//! let ctx = EthApiCtx { components: node.node_adapter(), config, cache };
//! let eth_api = OpEthApiBuilder::<Optimism>::default().build_eth_api(ctx).await.unwrap();
//!
//! // build `trace` namespace API
//! let trace_api = TraceApi::new(eth_api, BlockingTaskGuard::new(10), EthConfig::default());
//!
//! // fetch traces for latest block
//! let traces = trace_api.trace_block(BlockId::latest()).await.unwrap();
//! }
//! ```
pub use reth_optimism_rpc::{OpEngineApi, OpEthApi, OpEthApiBuilder};
use crate::OP_NAME_CLIENT;
use alloy_rpc_types_engine::ClientVersionV1;
use op_alloy_rpc_types_engine::OpExecutionData;
use reth_chainspec::EthereumHardforks;
use reth_node_api::{
AddOnsContext, EngineApiValidator, EngineTypes, FullNodeComponents, NodeTypes,
};
use reth_node_builder::rpc::{EngineApiBuilder, PayloadValidatorBuilder};
use reth_node_core::version::{version_metadata, CLIENT_CODE};
use reth_optimism_rpc::engine::OP_ENGINE_CAPABILITIES;
use reth_payload_builder::PayloadStore;
use reth_rpc_engine_api::{EngineApi, EngineCapabilities};
/// Builder for basic [`OpEngineApi`] implementation.
#[derive(Debug, Default, Clone)]
pub struct OpEngineApiBuilder<EV> {
engine_validator_builder: EV,
}
impl<N, EV> EngineApiBuilder<N> for OpEngineApiBuilder<EV>
where
N: FullNodeComponents<
Types: NodeTypes<
ChainSpec: EthereumHardforks,
Payload: EngineTypes<ExecutionData = OpExecutionData>,
>,
>,
EV: PayloadValidatorBuilder<N>,
EV::Validator: EngineApiValidator<<N::Types as NodeTypes>::Payload>,
{
type EngineApi = OpEngineApi<
N::Provider,
<N::Types as NodeTypes>::Payload,
N::Pool,
EV::Validator,
<N::Types as NodeTypes>::ChainSpec,
>;
async fn build_engine_api(self, ctx: &AddOnsContext<'_, N>) -> eyre::Result<Self::EngineApi> {
let Self { engine_validator_builder } = self;
let engine_validator = engine_validator_builder.build(ctx).await?;
let client = ClientVersionV1 {
code: CLIENT_CODE,
name: OP_NAME_CLIENT.to_string(),
version: version_metadata().cargo_pkg_version.to_string(),
commit: version_metadata().vergen_git_sha.to_string(),
};
let inner = EngineApi::new(
ctx.node.provider().clone(),
ctx.config.chain.clone(),
ctx.beacon_engine_handle.clone(),
PayloadStore::new(ctx.node.payload_builder_handle().clone()),
ctx.node.pool().clone(),
Box::new(ctx.node.task_executor().clone()),
client,
EngineCapabilities::new(OP_ENGINE_CAPABILITIES.iter().copied()),
engine_validator,
ctx.config.engine.accept_execution_requests_hash,
);
Ok(OpEngineApi::new(inner))
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/node/tests/it/builder.rs | crates/optimism/node/tests/it/builder.rs | //! Node builder setup tests.
use reth_db::test_utils::create_test_rw_db;
use reth_node_api::{FullNodeComponents, NodeTypesWithDBAdapter};
use reth_node_builder::{Node, NodeBuilder, NodeConfig};
use reth_optimism_chainspec::BASE_MAINNET;
use reth_optimism_node::{args::RollupArgs, OpNode};
use reth_provider::providers::BlockchainProvider;
#[test]
fn test_basic_setup() {
// parse CLI -> config
let config = NodeConfig::new(BASE_MAINNET.clone());
let db = create_test_rw_db();
let args = RollupArgs::default();
let op_node = OpNode::new(args);
let _builder = NodeBuilder::new(config)
.with_database(db)
.with_types_and_provider::<OpNode, BlockchainProvider<NodeTypesWithDBAdapter<OpNode, _>>>()
.with_components(op_node.components())
.with_add_ons(op_node.add_ons())
.on_component_initialized(move |ctx| {
let _provider = ctx.provider();
Ok(())
})
.on_node_started(|_full_node| Ok(()))
.on_rpc_started(|_ctx, handles| {
let _client = handles.rpc.http_client();
Ok(())
})
.extend_rpc_modules(|ctx| {
let _ = ctx.config();
let _ = ctx.node().provider();
Ok(())
})
.check_launch();
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/node/tests/it/main.rs | crates/optimism/node/tests/it/main.rs | #![allow(missing_docs)]
mod builder;
mod priority;
const fn main() {}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/node/tests/it/priority.rs | crates/optimism/node/tests/it/priority.rs | //! Node builder test that customizes priority of transactions in the block.
use alloy_consensus::{transaction::Recovered, SignableTransaction, Transaction, TxEip1559};
use alloy_genesis::Genesis;
use alloy_network::TxSignerSync;
use alloy_primitives::{Address, ChainId, TxKind};
use reth_chainspec::EthChainSpec;
use reth_db::test_utils::create_test_rw_db_with_path;
use reth_e2e_test_utils::{
node::NodeTestContext, transaction::TransactionTestContext, wallet::Wallet,
};
use reth_node_api::FullNodeTypes;
use reth_node_builder::{
components::{BasicPayloadServiceBuilder, ComponentsBuilder},
EngineNodeLauncher, Node, NodeBuilder, NodeConfig,
};
use reth_node_core::args::DatadirArgs;
use reth_optimism_chainspec::OpChainSpecBuilder;
use reth_optimism_node::{
args::RollupArgs,
node::{
OpConsensusBuilder, OpExecutorBuilder, OpNetworkBuilder, OpNodeComponentBuilder,
OpNodeTypes, OpPayloadBuilder, OpPoolBuilder,
},
txpool::OpPooledTransaction,
utils::optimism_payload_attributes,
OpNode,
};
use reth_optimism_payload_builder::builder::OpPayloadTransactions;
use reth_payload_util::{
BestPayloadTransactions, PayloadTransactions, PayloadTransactionsChain,
PayloadTransactionsFixed,
};
use reth_provider::providers::BlockchainProvider;
use reth_tasks::TaskManager;
use reth_transaction_pool::PoolTransaction;
use std::sync::Arc;
use tokio::sync::Mutex;
#[derive(Clone, Debug)]
struct CustomTxPriority {
chain_id: ChainId,
}
impl OpPayloadTransactions<OpPooledTransaction> for CustomTxPriority {
fn best_transactions<Pool>(
&self,
pool: Pool,
attr: reth_transaction_pool::BestTransactionsAttributes,
) -> impl PayloadTransactions<Transaction = OpPooledTransaction>
where
Pool: reth_transaction_pool::TransactionPool<Transaction = OpPooledTransaction>,
{
// Block composition:
// 1. Best transactions from the pool (up to 250k gas)
// 2. End-of-block transaction created by the node (up to 100k gas)
// End of block transaction should send a 0-value transfer to a random address.
let sender = Wallet::default().inner;
let mut end_of_block_tx = TxEip1559 {
chain_id: self.chain_id,
nonce: 1, // it will be 2nd tx after L1 block info tx that uses the same sender
gas_limit: 21000,
max_fee_per_gas: 20e9 as u128,
to: TxKind::Call(Address::random()),
value: 0.try_into().unwrap(),
..Default::default()
};
let signature = sender.sign_transaction_sync(&mut end_of_block_tx).unwrap();
let end_of_block_tx = OpPooledTransaction::from_pooled(Recovered::new_unchecked(
op_alloy_consensus::OpPooledTransaction::Eip1559(
end_of_block_tx.into_signed(signature),
),
sender.address(),
));
PayloadTransactionsChain::new(
BestPayloadTransactions::new(pool.best_transactions_with_attributes(attr)),
// Allow 250k gas for the transactions from the pool
Some(250_000),
PayloadTransactionsFixed::single(end_of_block_tx),
// Allow 100k gas for the end-of-block transaction
Some(100_000),
)
}
}
/// Builds the node with custom transaction priority service within default payload builder.
fn build_components<Node>(
chain_id: ChainId,
) -> OpNodeComponentBuilder<Node, OpPayloadBuilder<CustomTxPriority>>
where
Node: FullNodeTypes<Types: OpNodeTypes>,
{
let RollupArgs { disable_txpool_gossip, compute_pending_block, discovery_v4, .. } =
RollupArgs::default();
ComponentsBuilder::default()
.node_types::<Node>()
.pool(OpPoolBuilder::default())
.executor(OpExecutorBuilder::default())
.payload(BasicPayloadServiceBuilder::new(
OpPayloadBuilder::new(compute_pending_block)
.with_transactions(CustomTxPriority { chain_id }),
))
.network(OpNetworkBuilder::new(disable_txpool_gossip, !discovery_v4))
.consensus(OpConsensusBuilder::default())
}
#[tokio::test]
async fn test_custom_block_priority_config() {
reth_tracing::init_test_tracing();
let genesis: Genesis = serde_json::from_str(include_str!("../assets/genesis.json")).unwrap();
let chain_spec =
Arc::new(OpChainSpecBuilder::base_mainnet().genesis(genesis).ecotone_activated().build());
// This wallet is going to send:
// 1. L1 block info tx
// 2. End-of-block custom tx
let wallet = Arc::new(Mutex::new(Wallet::default().with_chain_id(chain_spec.chain().into())));
// Configure and launch the node.
let config = NodeConfig::new(chain_spec).with_datadir_args(DatadirArgs {
datadir: reth_db::test_utils::tempdir_path().into(),
..Default::default()
});
let db = create_test_rw_db_with_path(
config
.datadir
.datadir
.unwrap_or_chain_default(config.chain.chain(), config.datadir.clone())
.db(),
);
let tasks = TaskManager::current();
let node_handle = NodeBuilder::new(config.clone())
.with_database(db)
.with_types_and_provider::<OpNode, BlockchainProvider<_>>()
.with_components(build_components(config.chain.chain_id()))
.with_add_ons(OpNode::new(Default::default()).add_ons())
.launch_with_fn(|builder| {
let launcher = EngineNodeLauncher::new(
tasks.executor(),
builder.config.datadir(),
Default::default(),
);
builder.launch_with(launcher)
})
.await
.expect("Failed to launch node");
// Advance the chain with a single block.
let block_payloads = NodeTestContext::new(node_handle.node, optimism_payload_attributes)
.await
.unwrap()
.advance(1, |_| {
let wallet = wallet.clone();
Box::pin(async move {
let mut wallet = wallet.lock().await;
let tx_fut = TransactionTestContext::optimism_l1_block_info_tx(
wallet.chain_id,
wallet.inner.clone(),
// This doesn't matter in the current test (because it's only one block),
// but make sure you're not reusing the nonce from end-of-block tx
// if they have the same signer.
wallet.inner_nonce * 2,
);
wallet.inner_nonce += 1;
tx_fut.await
})
})
.await
.unwrap();
assert_eq!(block_payloads.len(), 1);
let block_payload = block_payloads.first().unwrap();
let block = block_payload.block();
assert_eq!(block.body().transactions.len(), 2); // L1 block info tx + end-of-block custom tx
// Check that last transaction in the block looks like a transfer to a random address.
let end_of_block_tx = block.body().transactions.last().unwrap();
let Some(tx) = end_of_block_tx.as_eip1559() else {
panic!("expected EIP-1559 transaction");
};
assert_eq!(tx.tx().nonce(), 1);
assert_eq!(tx.tx().gas_limit(), 21_000);
assert!(tx.tx().input().is_empty());
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/node/tests/e2e-testsuite/p2p.rs | crates/optimism/node/tests/e2e-testsuite/p2p.rs | use futures::StreamExt;
use reth_optimism_node::utils::{advance_chain, setup};
use std::sync::Arc;
use tokio::sync::Mutex;
#[tokio::test]
async fn can_sync() -> eyre::Result<()> {
reth_tracing::init_test_tracing();
let (mut nodes, _tasks, wallet) = setup(3).await?;
let wallet = Arc::new(Mutex::new(wallet));
let third_node = nodes.pop().unwrap();
let mut second_node = nodes.pop().unwrap();
let mut first_node = nodes.pop().unwrap();
let tip: usize = 90;
let tip_index: usize = tip - 1;
let reorg_depth = 2;
// On first node, create a chain up to block number 90a
let canonical_payload_chain = advance_chain(tip, &mut first_node, wallet.clone()).await?;
let canonical_chain =
canonical_payload_chain.iter().map(|p| p.block().hash()).collect::<Vec<_>>();
// On second node, sync optimistically up to block number 88a
second_node.update_optimistic_forkchoice(canonical_chain[tip_index - reorg_depth - 1]).await?;
second_node
.wait_block(
(tip - reorg_depth - 1) as u64,
canonical_chain[tip_index - reorg_depth - 1],
true,
)
.await?;
// We send FCU twice to ensure that pool receives canonical chain update on the second FCU
// This is required because notifications are not sent during backfill sync
second_node.update_optimistic_forkchoice(canonical_chain[tip_index - reorg_depth]).await?;
second_node
.wait_block((tip - reorg_depth) as u64, canonical_chain[tip_index - reorg_depth], false)
.await?;
second_node.canonical_stream.next().await.unwrap();
// Trigger backfill sync until block 80
third_node
.update_forkchoice(canonical_chain[tip_index - 10], canonical_chain[tip_index - 10])
.await?;
third_node.wait_block((tip - 10) as u64, canonical_chain[tip_index - 10], true).await?;
// Trigger live sync to block 90
third_node.update_optimistic_forkchoice(canonical_chain[tip_index]).await?;
third_node.wait_block(tip as u64, canonical_chain[tip_index], false).await?;
// On second node, create a side chain: 88a -> 89b -> 90b
wallet.lock().await.inner_nonce -= reorg_depth as u64;
second_node.payload.timestamp = first_node.payload.timestamp - reorg_depth as u64; // TODO: probably want to make it node agnostic
let side_payload_chain = advance_chain(reorg_depth, &mut second_node, wallet.clone()).await?;
let side_chain = side_payload_chain.iter().map(|p| p.block().hash()).collect::<Vec<_>>();
// Creates fork chain by submitting 89b payload.
// By returning Valid here, op-node will finally return a finalized hash
let _ = third_node.submit_payload(side_payload_chain[0].clone()).await;
// It will issue a pipeline reorg to 88a, and then make 89b canonical AND finalized.
third_node.update_forkchoice(side_chain[0], side_chain[0]).await?;
// Make sure we have the updated block
third_node.wait_unwind((tip - reorg_depth) as u64).await?;
third_node
.wait_block(
side_payload_chain[0].block().number,
side_payload_chain[0].block().hash(),
false,
)
.await?;
Ok(())
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/node/tests/e2e-testsuite/testsuite.rs | crates/optimism/node/tests/e2e-testsuite/testsuite.rs | use alloy_primitives::{Address, B256};
use eyre::Result;
use op_alloy_rpc_types_engine::OpPayloadAttributes;
use reth_e2e_test_utils::testsuite::{
actions::AssertMineBlock,
setup::{NetworkSetup, Setup},
TestBuilder,
};
use reth_optimism_chainspec::{OpChainSpecBuilder, OP_MAINNET};
use reth_optimism_node::{OpEngineTypes, OpNode};
use std::sync::Arc;
#[tokio::test]
async fn test_testsuite_op_assert_mine_block() -> Result<()> {
reth_tracing::init_test_tracing();
let setup = Setup::default()
.with_chain_spec(Arc::new(
OpChainSpecBuilder::default()
.chain(OP_MAINNET.chain)
.genesis(serde_json::from_str(include_str!("../assets/genesis.json")).unwrap())
.build()
.into(),
))
.with_network(NetworkSetup::single_node());
let test =
TestBuilder::new().with_setup(setup).with_action(AssertMineBlock::<OpEngineTypes>::new(
0,
vec![],
Some(B256::ZERO),
// TODO: refactor once we have actions to generate payload attributes.
OpPayloadAttributes {
payload_attributes: alloy_rpc_types_engine::PayloadAttributes {
timestamp: std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_secs(),
prev_randao: B256::random(),
suggested_fee_recipient: Address::random(),
withdrawals: None,
parent_beacon_block_root: None,
},
transactions: None,
no_tx_pool: None,
eip_1559_params: None,
gas_limit: Some(30_000_000),
},
));
test.run::<OpNode>().await?;
Ok(())
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/node/tests/e2e-testsuite/main.rs | crates/optimism/node/tests/e2e-testsuite/main.rs | #![allow(missing_docs)]
mod p2p;
mod testsuite;
const fn main() {}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/rpc/src/miner.rs | crates/optimism/rpc/src/miner.rs | //! Miner API extension for OP.
use alloy_primitives::U64;
use jsonrpsee_core::{async_trait, RpcResult};
pub use op_alloy_rpc_jsonrpsee::traits::MinerApiExtServer;
use reth_metrics::{metrics::Gauge, Metrics};
use reth_optimism_payload_builder::config::OpDAConfig;
use tracing::debug;
/// Miner API extension for OP, exposes settings for the data availability configuration via the
/// `miner_` API.
#[derive(Debug, Clone)]
pub struct OpMinerExtApi {
da_config: OpDAConfig,
metrics: OpMinerMetrics,
}
impl OpMinerExtApi {
/// Instantiate the miner API extension with the given, sharable data availability
/// configuration.
pub fn new(da_config: OpDAConfig) -> Self {
Self { da_config, metrics: OpMinerMetrics::default() }
}
}
#[async_trait]
impl MinerApiExtServer for OpMinerExtApi {
/// Handler for `miner_setMaxDASize` RPC method.
async fn set_max_da_size(&self, max_tx_size: U64, max_block_size: U64) -> RpcResult<bool> {
debug!(target: "rpc", "Setting max DA size: tx={}, block={}", max_tx_size, max_block_size);
self.da_config.set_max_da_size(max_tx_size.to(), max_block_size.to());
self.metrics.set_max_da_tx_size(max_tx_size.to());
self.metrics.set_max_da_block_size(max_block_size.to());
Ok(true)
}
}
/// Optimism miner metrics
#[derive(Metrics, Clone)]
#[metrics(scope = "optimism_rpc.miner")]
pub struct OpMinerMetrics {
/// Max DA tx size set on the miner
max_da_tx_size: Gauge,
/// Max DA block size set on the miner
max_da_block_size: Gauge,
}
impl OpMinerMetrics {
/// Sets the max DA tx size gauge value
#[inline]
pub fn set_max_da_tx_size(&self, size: u64) {
self.max_da_tx_size.set(size as f64);
}
/// Sets the max DA block size gauge value
#[inline]
pub fn set_max_da_block_size(&self, size: u64) {
self.max_da_block_size.set(size as f64);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/rpc/src/engine.rs | crates/optimism/rpc/src/engine.rs | //! Implements the Optimism engine API RPC methods.
use alloy_eips::eip7685::Requests;
use alloy_primitives::{BlockHash, B256, B64, U64};
use alloy_rpc_types_engine::{
ClientVersionV1, ExecutionPayloadBodiesV1, ExecutionPayloadInputV2, ExecutionPayloadV3,
ForkchoiceState, ForkchoiceUpdated, PayloadId, PayloadStatus,
};
use derive_more::Constructor;
use jsonrpsee::proc_macros::rpc;
use jsonrpsee_core::{server::RpcModule, RpcResult};
use op_alloy_rpc_types_engine::{
OpExecutionData, OpExecutionPayloadV4, ProtocolVersion, ProtocolVersionFormatV0,
SuperchainSignal,
};
use reth_chainspec::EthereumHardforks;
use reth_node_api::{EngineApiValidator, EngineTypes};
use reth_rpc_api::IntoEngineApiRpcModule;
use reth_rpc_engine_api::EngineApi;
use reth_storage_api::{BlockReader, HeaderProvider, StateProviderFactory};
use reth_transaction_pool::TransactionPool;
use tracing::{debug, info, trace};
/// The list of all supported Engine capabilities available over the engine endpoint.
///
/// Spec: <https://specs.optimism.io/protocol/exec-engine.html>
pub const OP_ENGINE_CAPABILITIES: &[&str] = &[
"engine_forkchoiceUpdatedV1",
"engine_forkchoiceUpdatedV2",
"engine_forkchoiceUpdatedV3",
"engine_getClientVersionV1",
"engine_getPayloadV2",
"engine_getPayloadV3",
"engine_getPayloadV4",
"engine_newPayloadV2",
"engine_newPayloadV3",
"engine_newPayloadV4",
"engine_getPayloadBodiesByHashV1",
"engine_getPayloadBodiesByRangeV1",
"engine_signalSuperchainV1",
];
/// OP Stack protocol version
/// See also: <https://github.com/ethereum-optimism/op-geth/blob/c3a989eb882d150a936df27bcfa791838b474d55/params/superchain.go#L13-L13>
pub const OP_STACK_SUPPORT: ProtocolVersion = ProtocolVersion::V0(ProtocolVersionFormatV0 {
build: B64::ZERO,
major: 9,
minor: 0,
patch: 0,
pre_release: 0,
});
/// Extension trait that gives access to Optimism engine API RPC methods.
///
/// Note:
/// > The provider should use a JWT authentication layer.
///
/// This follows the Optimism specs that can be found at:
/// <https://specs.optimism.io/protocol/exec-engine.html#engine-api>
#[cfg_attr(not(feature = "client"), rpc(server, namespace = "engine"), server_bounds(Engine::PayloadAttributes: jsonrpsee::core::DeserializeOwned))]
#[cfg_attr(feature = "client", rpc(server, client, namespace = "engine", client_bounds(Engine::PayloadAttributes: jsonrpsee::core::Serialize + Clone), server_bounds(Engine::PayloadAttributes: jsonrpsee::core::DeserializeOwned)))]
pub trait OpEngineApi<Engine: EngineTypes> {
/// Sends the given payload to the execution layer client, as specified for the Shanghai fork.
///
/// See also <https://github.com/ethereum/execution-apis/blob/584905270d8ad665718058060267061ecfd79ca5/src/engine/shanghai.md#engine_newpayloadv2>
///
/// No modifications needed for OP compatibility.
#[method(name = "newPayloadV2")]
async fn new_payload_v2(&self, payload: ExecutionPayloadInputV2) -> RpcResult<PayloadStatus>;
/// Sends the given payload to the execution layer client, as specified for the Cancun fork.
///
/// See also <https://github.com/ethereum/execution-apis/blob/main/src/engine/cancun.md#engine_newpayloadv3>
///
/// OP modifications:
/// - expected versioned hashes MUST be an empty array: therefore the `versioned_hashes`
/// parameter is removed.
/// - parent beacon block root MUST be the parent beacon block root from the L1 origin block of
/// the L2 block.
/// - blob versioned hashes MUST be empty list.
#[method(name = "newPayloadV3")]
async fn new_payload_v3(
&self,
payload: ExecutionPayloadV3,
versioned_hashes: Vec<B256>,
parent_beacon_block_root: B256,
) -> RpcResult<PayloadStatus>;
/// Sends the given payload to the execution layer client, as specified for the Prague fork.
///
/// See also <https://github.com/ethereum/execution-apis/blob/03911ffc053b8b806123f1fc237184b0092a485a/src/engine/prague.md#engine_newpayloadv4>
///
/// - blob versioned hashes MUST be empty list.
/// - execution layer requests MUST be empty list.
#[method(name = "newPayloadV4")]
async fn new_payload_v4(
&self,
payload: OpExecutionPayloadV4,
versioned_hashes: Vec<B256>,
parent_beacon_block_root: B256,
execution_requests: Requests,
) -> RpcResult<PayloadStatus>;
/// See also <https://github.com/ethereum/execution-apis/blob/6709c2a795b707202e93c4f2867fa0bf2640a84f/src/engine/paris.md#engine_forkchoiceupdatedv1>
///
/// This exists because it is used by op-node: <https://github.com/ethereum-optimism/optimism/blob/0bc5fe8d16155dc68bcdf1fa5733abc58689a618/op-node/rollup/types.go#L615-L617>
///
/// Caution: This should not accept the `withdrawals` field in the payload attributes.
#[method(name = "forkchoiceUpdatedV1")]
async fn fork_choice_updated_v1(
&self,
fork_choice_state: ForkchoiceState,
payload_attributes: Option<Engine::PayloadAttributes>,
) -> RpcResult<ForkchoiceUpdated>;
/// Updates the execution layer client with the given fork choice, as specified for the Shanghai
/// fork.
///
/// Caution: This should not accept the `parentBeaconBlockRoot` field in the payload attributes.
///
/// See also <https://github.com/ethereum/execution-apis/blob/6709c2a795b707202e93c4f2867fa0bf2640a84f/src/engine/shanghai.md#engine_forkchoiceupdatedv2>
///
/// OP modifications:
/// - The `payload_attributes` parameter is extended with the [`EngineTypes::PayloadAttributes`](EngineTypes) type as described in <https://specs.optimism.io/protocol/exec-engine.html#extended-payloadattributesv2>
#[method(name = "forkchoiceUpdatedV2")]
async fn fork_choice_updated_v2(
&self,
fork_choice_state: ForkchoiceState,
payload_attributes: Option<Engine::PayloadAttributes>,
) -> RpcResult<ForkchoiceUpdated>;
/// Updates the execution layer client with the given fork choice, as specified for the Cancun
/// fork.
///
/// See also <https://github.com/ethereum/execution-apis/blob/main/src/engine/cancun.md#engine_forkchoiceupdatedv3>
///
/// OP modifications:
/// - Must be called with an Ecotone payload
/// - Attributes must contain the parent beacon block root field
/// - The `payload_attributes` parameter is extended with the [`EngineTypes::PayloadAttributes`](EngineTypes) type as described in <https://specs.optimism.io/protocol/exec-engine.html#extended-payloadattributesv2>
#[method(name = "forkchoiceUpdatedV3")]
async fn fork_choice_updated_v3(
&self,
fork_choice_state: ForkchoiceState,
payload_attributes: Option<Engine::PayloadAttributes>,
) -> RpcResult<ForkchoiceUpdated>;
/// Retrieves an execution payload from a previously started build process, as specified for the
/// Shanghai fork.
///
/// See also <https://github.com/ethereum/execution-apis/blob/6709c2a795b707202e93c4f2867fa0bf2640a84f/src/engine/shanghai.md#engine_getpayloadv2>
///
/// Note:
/// > Provider software MAY stop the corresponding build process after serving this call.
///
/// No modifications needed for OP compatibility.
#[method(name = "getPayloadV2")]
async fn get_payload_v2(
&self,
payload_id: PayloadId,
) -> RpcResult<Engine::ExecutionPayloadEnvelopeV2>;
/// Retrieves an execution payload from a previously started build process, as specified for the
/// Cancun fork.
///
/// See also <https://github.com/ethereum/execution-apis/blob/main/src/engine/cancun.md#engine_getpayloadv3>
///
/// Note:
/// > Provider software MAY stop the corresponding build process after serving this call.
///
/// OP modifications:
/// - the response type is extended to [`EngineTypes::ExecutionPayloadEnvelopeV3`].
#[method(name = "getPayloadV3")]
async fn get_payload_v3(
&self,
payload_id: PayloadId,
) -> RpcResult<Engine::ExecutionPayloadEnvelopeV3>;
/// Returns the most recent version of the payload that is available in the corresponding
/// payload build process at the time of receiving this call.
///
/// See also <https://github.com/ethereum/execution-apis/blob/main/src/engine/prague.md#engine_getpayloadv4>
///
/// Note:
/// > Provider software MAY stop the corresponding build process after serving this call.
///
/// OP modifications:
/// - the response type is extended to [`EngineTypes::ExecutionPayloadEnvelopeV4`].
#[method(name = "getPayloadV4")]
async fn get_payload_v4(
&self,
payload_id: PayloadId,
) -> RpcResult<Engine::ExecutionPayloadEnvelopeV4>;
/// Returns the execution payload bodies by the given hash.
///
/// See also <https://github.com/ethereum/execution-apis/blob/6452a6b194d7db269bf1dbd087a267251d3cc7f8/src/engine/shanghai.md#engine_getpayloadbodiesbyhashv1>
#[method(name = "getPayloadBodiesByHashV1")]
async fn get_payload_bodies_by_hash_v1(
&self,
block_hashes: Vec<BlockHash>,
) -> RpcResult<ExecutionPayloadBodiesV1>;
/// Returns the execution payload bodies by the range starting at `start`, containing `count`
/// blocks.
///
/// WARNING: This method is associated with the `BeaconBlocksByRange` message in the consensus
/// layer p2p specification, meaning the input should be treated as untrusted or potentially
/// adversarial.
///
/// Implementers should take care when acting on the input to this method, specifically
/// ensuring that the range is limited properly, and that the range boundaries are computed
/// correctly and without panics.
///
/// See also <https://github.com/ethereum/execution-apis/blob/6452a6b194d7db269bf1dbd087a267251d3cc7f8/src/engine/shanghai.md#engine_getpayloadbodiesbyrangev1>
#[method(name = "getPayloadBodiesByRangeV1")]
async fn get_payload_bodies_by_range_v1(
&self,
start: U64,
count: U64,
) -> RpcResult<ExecutionPayloadBodiesV1>;
/// Signals superchain information to the Engine.
/// Returns the latest supported OP-Stack protocol version of the execution engine.
/// See also <https://specs.optimism.io/protocol/exec-engine.html#engine_signalsuperchainv1>
#[method(name = "engine_signalSuperchainV1")]
async fn signal_superchain_v1(&self, _signal: SuperchainSignal) -> RpcResult<ProtocolVersion>;
/// Returns the execution client version information.
///
/// Note:
/// > The `client_version` parameter identifies the consensus client.
///
/// See also <https://github.com/ethereum/execution-apis/blob/main/src/engine/identification.md#engine_getclientversionv1>
#[method(name = "getClientVersionV1")]
async fn get_client_version_v1(
&self,
client_version: ClientVersionV1,
) -> RpcResult<Vec<ClientVersionV1>>;
/// Returns the list of Engine API methods supported by the execution layer client software.
///
/// See also <https://github.com/ethereum/execution-apis/blob/6452a6b194d7db269bf1dbd087a267251d3cc7f8/src/engine/common.md#capabilities>
#[method(name = "exchangeCapabilities")]
async fn exchange_capabilities(&self, capabilities: Vec<String>) -> RpcResult<Vec<String>>;
}
/// The Engine API implementation that grants the Consensus layer access to data and
/// functions in the Execution layer that are crucial for the consensus process.
#[derive(Debug, Constructor)]
pub struct OpEngineApi<Provider, EngineT: EngineTypes, Pool, Validator, ChainSpec> {
inner: EngineApi<Provider, EngineT, Pool, Validator, ChainSpec>,
}
impl<Provider, PayloadT, Pool, Validator, ChainSpec> Clone
for OpEngineApi<Provider, PayloadT, Pool, Validator, ChainSpec>
where
PayloadT: EngineTypes,
{
fn clone(&self) -> Self {
Self { inner: self.inner.clone() }
}
}
#[async_trait::async_trait]
impl<Provider, EngineT, Pool, Validator, ChainSpec> OpEngineApiServer<EngineT>
for OpEngineApi<Provider, EngineT, Pool, Validator, ChainSpec>
where
Provider: HeaderProvider + BlockReader + StateProviderFactory + 'static,
EngineT: EngineTypes<ExecutionData = OpExecutionData>,
Pool: TransactionPool + 'static,
Validator: EngineApiValidator<EngineT>,
ChainSpec: EthereumHardforks + Send + Sync + 'static,
{
async fn new_payload_v2(&self, payload: ExecutionPayloadInputV2) -> RpcResult<PayloadStatus> {
trace!(target: "rpc::engine", "Serving engine_newPayloadV2");
let payload = OpExecutionData::v2(payload);
Ok(self.inner.new_payload_v2_metered(payload).await?)
}
async fn new_payload_v3(
&self,
payload: ExecutionPayloadV3,
versioned_hashes: Vec<B256>,
parent_beacon_block_root: B256,
) -> RpcResult<PayloadStatus> {
trace!(target: "rpc::engine", "Serving engine_newPayloadV3");
let payload = OpExecutionData::v3(payload, versioned_hashes, parent_beacon_block_root);
Ok(self.inner.new_payload_v3_metered(payload).await?)
}
async fn new_payload_v4(
&self,
payload: OpExecutionPayloadV4,
versioned_hashes: Vec<B256>,
parent_beacon_block_root: B256,
execution_requests: Requests,
) -> RpcResult<PayloadStatus> {
trace!(target: "rpc::engine", "Serving engine_newPayloadV4");
let payload = OpExecutionData::v4(
payload,
versioned_hashes,
parent_beacon_block_root,
execution_requests,
);
Ok(self.inner.new_payload_v4_metered(payload).await?)
}
async fn fork_choice_updated_v1(
&self,
fork_choice_state: ForkchoiceState,
payload_attributes: Option<EngineT::PayloadAttributes>,
) -> RpcResult<ForkchoiceUpdated> {
Ok(self.inner.fork_choice_updated_v1_metered(fork_choice_state, payload_attributes).await?)
}
async fn fork_choice_updated_v2(
&self,
fork_choice_state: ForkchoiceState,
payload_attributes: Option<EngineT::PayloadAttributes>,
) -> RpcResult<ForkchoiceUpdated> {
trace!(target: "rpc::engine", "Serving engine_forkchoiceUpdatedV2");
Ok(self.inner.fork_choice_updated_v2_metered(fork_choice_state, payload_attributes).await?)
}
async fn fork_choice_updated_v3(
&self,
fork_choice_state: ForkchoiceState,
payload_attributes: Option<EngineT::PayloadAttributes>,
) -> RpcResult<ForkchoiceUpdated> {
trace!(target: "rpc::engine", "Serving engine_forkchoiceUpdatedV3");
Ok(self.inner.fork_choice_updated_v3_metered(fork_choice_state, payload_attributes).await?)
}
async fn get_payload_v2(
&self,
payload_id: PayloadId,
) -> RpcResult<EngineT::ExecutionPayloadEnvelopeV2> {
debug!(target: "rpc::engine", id = %payload_id, "Serving engine_getPayloadV2");
Ok(self.inner.get_payload_v2_metered(payload_id).await?)
}
async fn get_payload_v3(
&self,
payload_id: PayloadId,
) -> RpcResult<EngineT::ExecutionPayloadEnvelopeV3> {
trace!(target: "rpc::engine", "Serving engine_getPayloadV3");
Ok(self.inner.get_payload_v3_metered(payload_id).await?)
}
async fn get_payload_v4(
&self,
payload_id: PayloadId,
) -> RpcResult<EngineT::ExecutionPayloadEnvelopeV4> {
trace!(target: "rpc::engine", "Serving engine_getPayloadV4");
Ok(self.inner.get_payload_v4_metered(payload_id).await?)
}
async fn get_payload_bodies_by_hash_v1(
&self,
block_hashes: Vec<BlockHash>,
) -> RpcResult<ExecutionPayloadBodiesV1> {
trace!(target: "rpc::engine", "Serving engine_getPayloadBodiesByHashV1");
Ok(self.inner.get_payload_bodies_by_hash_v1_metered(block_hashes).await?)
}
async fn get_payload_bodies_by_range_v1(
&self,
start: U64,
count: U64,
) -> RpcResult<ExecutionPayloadBodiesV1> {
trace!(target: "rpc::engine", "Serving engine_getPayloadBodiesByRangeV1");
Ok(self.inner.get_payload_bodies_by_range_v1_metered(start.to(), count.to()).await?)
}
async fn signal_superchain_v1(&self, signal: SuperchainSignal) -> RpcResult<ProtocolVersion> {
trace!(target: "rpc::engine", "Serving signal_superchain_v1");
info!(
target: "rpc::engine",
"Received superchain version signal local={:?} required={:?} recommended={:?}",
OP_STACK_SUPPORT,
signal.required,
signal.recommended
);
Ok(OP_STACK_SUPPORT)
}
async fn get_client_version_v1(
&self,
client: ClientVersionV1,
) -> RpcResult<Vec<ClientVersionV1>> {
trace!(target: "rpc::engine", "Serving engine_getClientVersionV1");
Ok(self.inner.get_client_version_v1(client)?)
}
async fn exchange_capabilities(&self, _capabilities: Vec<String>) -> RpcResult<Vec<String>> {
Ok(self.inner.capabilities().list())
}
}
impl<Provider, EngineT, Pool, Validator, ChainSpec> IntoEngineApiRpcModule
for OpEngineApi<Provider, EngineT, Pool, Validator, ChainSpec>
where
EngineT: EngineTypes,
Self: OpEngineApiServer<EngineT>,
{
fn into_rpc_module(self) -> RpcModule<()> {
self.into_rpc().remove_context()
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/rpc/src/lib.rs | crates/optimism/rpc/src/lib.rs | //! OP-Reth RPC support.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
pub mod engine;
pub mod error;
pub mod eth;
pub mod historical;
pub mod miner;
pub mod sequencer;
pub mod witness;
#[cfg(feature = "client")]
pub use engine::OpEngineApiClient;
pub use engine::{OpEngineApi, OpEngineApiServer, OP_ENGINE_CAPABILITIES};
pub use error::{OpEthApiError, OpInvalidTransactionError, SequencerClientError};
pub use eth::{OpEthApi, OpEthApiBuilder, OpReceiptBuilder};
pub use sequencer::SequencerClient;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/rpc/src/sequencer.rs | crates/optimism/rpc/src/sequencer.rs | //! Helpers for optimism specific RPC implementations.
use crate::SequencerClientError;
use alloy_json_rpc::{RpcRecv, RpcSend};
use alloy_primitives::{hex, B256};
use alloy_rpc_client::{BuiltInConnectionString, ClientBuilder, RpcClient as Client};
use alloy_rpc_types_eth::erc4337::TransactionConditional;
use alloy_transport_http::Http;
use reth_optimism_txpool::supervisor::metrics::SequencerMetrics;
use std::{str::FromStr, sync::Arc, time::Instant};
use thiserror::Error;
use tracing::warn;
/// Sequencer client error
#[derive(Error, Debug)]
pub enum Error {
/// Invalid scheme
#[error("Invalid scheme of sequencer url: {0}")]
InvalidScheme(String),
/// Invalid header or value provided.
#[error("Invalid header: {0}")]
InvalidHeader(String),
/// Invalid url
#[error("Invalid sequencer url: {0}")]
InvalidUrl(String),
/// Establishing a connection to the sequencer endpoint resulted in an error.
#[error("Failed to connect to sequencer: {0}")]
TransportError(
#[from]
#[source]
alloy_transport::TransportError,
),
/// Reqwest failed to init client
#[error("Failed to init reqwest client for sequencer: {0}")]
ReqwestError(
#[from]
#[source]
reqwest::Error,
),
}
/// A client to interact with a Sequencer
#[derive(Debug, Clone)]
pub struct SequencerClient {
inner: Arc<SequencerClientInner>,
}
impl SequencerClientInner {
/// Creates a new instance with the given endpoint and client.
pub(crate) fn new(sequencer_endpoint: String, client: Client) -> Self {
let metrics = SequencerMetrics::default();
Self { sequencer_endpoint, client, metrics }
}
}
impl SequencerClient {
/// Creates a new [`SequencerClient`] for the given URL.
///
/// If the URL is a websocket endpoint we connect a websocket instance.
pub async fn new(sequencer_endpoint: impl Into<String>) -> Result<Self, Error> {
Self::new_with_headers(sequencer_endpoint, Default::default()).await
}
/// Creates a new `SequencerClient` for the given URL with the given headers
///
/// This expects headers in the form: `header=value`
pub async fn new_with_headers(
sequencer_endpoint: impl Into<String>,
headers: Vec<String>,
) -> Result<Self, Error> {
let sequencer_endpoint = sequencer_endpoint.into();
let endpoint = BuiltInConnectionString::from_str(&sequencer_endpoint)?;
if let BuiltInConnectionString::Http(url) = endpoint {
let mut builder = reqwest::Client::builder()
// we force use tls to prevent native issues
.use_rustls_tls();
if !headers.is_empty() {
let mut header_map = reqwest::header::HeaderMap::new();
for header in headers {
if let Some((key, value)) = header.split_once('=') {
header_map.insert(
key.trim()
.parse::<reqwest::header::HeaderName>()
.map_err(|err| Error::InvalidHeader(err.to_string()))?,
value
.trim()
.parse::<reqwest::header::HeaderValue>()
.map_err(|err| Error::InvalidHeader(err.to_string()))?,
);
}
}
builder = builder.default_headers(header_map);
}
let client = builder.build()?;
Self::with_http_client(url, client)
} else {
let client = ClientBuilder::default().connect_with(endpoint).await?;
let inner = SequencerClientInner::new(sequencer_endpoint, client);
Ok(Self { inner: Arc::new(inner) })
}
}
/// Creates a new [`SequencerClient`] with http transport with the given http client.
pub fn with_http_client(
sequencer_endpoint: impl Into<String>,
client: reqwest::Client,
) -> Result<Self, Error> {
let sequencer_endpoint: String = sequencer_endpoint.into();
let url = sequencer_endpoint
.parse()
.map_err(|_| Error::InvalidUrl(sequencer_endpoint.clone()))?;
let http_client = Http::with_client(client, url);
let is_local = http_client.guess_local();
let client = ClientBuilder::default().transport(http_client, is_local);
let inner = SequencerClientInner::new(sequencer_endpoint, client);
Ok(Self { inner: Arc::new(inner) })
}
/// Returns the network of the client
pub fn endpoint(&self) -> &str {
&self.inner.sequencer_endpoint
}
/// Returns the client
pub fn client(&self) -> &Client {
&self.inner.client
}
/// Returns a reference to the [`SequencerMetrics`] for tracking client metrics.
fn metrics(&self) -> &SequencerMetrics {
&self.inner.metrics
}
/// Sends a [`alloy_rpc_client::RpcCall`] request to the sequencer endpoint.
pub async fn request<Params: RpcSend, Resp: RpcRecv>(
&self,
method: &str,
params: Params,
) -> Result<Resp, SequencerClientError> {
let resp =
self.client().request::<Params, Resp>(method.to_string(), params).await.inspect_err(
|err| {
warn!(
target: "rpc::sequencer",
%err,
"HTTP request to sequencer failed",
);
},
)?;
Ok(resp)
}
/// Forwards a transaction to the sequencer endpoint.
pub async fn forward_raw_transaction(&self, tx: &[u8]) -> Result<B256, SequencerClientError> {
let start = Instant::now();
let rlp_hex = hex::encode_prefixed(tx);
let tx_hash =
self.request("eth_sendRawTransaction", (rlp_hex,)).await.inspect_err(|err| {
warn!(
target: "rpc::eth",
%err,
"Failed to forward transaction to sequencer",
);
})?;
self.metrics().record_forward_latency(start.elapsed());
Ok(tx_hash)
}
/// Forwards a transaction conditional to the sequencer endpoint.
pub async fn forward_raw_transaction_conditional(
&self,
tx: &[u8],
condition: TransactionConditional,
) -> Result<B256, SequencerClientError> {
let start = Instant::now();
let rlp_hex = hex::encode_prefixed(tx);
let tx_hash = self
.request("eth_sendRawTransactionConditional", (rlp_hex, condition))
.await
.inspect_err(|err| {
warn!(
target: "rpc::eth",
%err,
"Failed to forward transaction conditional for sequencer",
);
})?;
self.metrics().record_forward_latency(start.elapsed());
Ok(tx_hash)
}
}
#[derive(Debug)]
struct SequencerClientInner {
/// The endpoint of the sequencer
sequencer_endpoint: String,
/// The client
client: Client,
// Metrics for tracking sequencer forwarding
metrics: SequencerMetrics,
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_primitives::U64;
#[tokio::test]
async fn test_http_body_str() {
let client = SequencerClient::new("http://localhost:8545").await.unwrap();
let request = client
.client()
.make_request("eth_getBlockByNumber", (U64::from(10),))
.serialize()
.unwrap()
.take_request();
let body = request.get();
assert_eq!(
body,
r#"{"method":"eth_getBlockByNumber","params":["0xa"],"id":0,"jsonrpc":"2.0"}"#
);
let condition = TransactionConditional::default();
let request = client
.client()
.make_request(
"eth_sendRawTransactionConditional",
(format!("0x{}", hex::encode("abcd")), condition),
)
.serialize()
.unwrap()
.take_request();
let body = request.get();
assert_eq!(
body,
r#"{"method":"eth_sendRawTransactionConditional","params":["0x61626364",{"knownAccounts":{}}],"id":1,"jsonrpc":"2.0"}"#
);
}
#[tokio::test]
#[ignore = "Start if WS is reachable at ws://localhost:8546"]
async fn test_ws_body_str() {
let client = SequencerClient::new("ws://localhost:8546").await.unwrap();
let request = client
.client()
.make_request("eth_getBlockByNumber", (U64::from(10),))
.serialize()
.unwrap()
.take_request();
let body = request.get();
assert_eq!(
body,
r#"{"method":"eth_getBlockByNumber","params":["0xa"],"id":0,"jsonrpc":"2.0"}"#
);
let condition = TransactionConditional::default();
let request = client
.client()
.make_request(
"eth_sendRawTransactionConditional",
(format!("0x{}", hex::encode("abcd")), condition),
)
.serialize()
.unwrap()
.take_request();
let body = request.get();
assert_eq!(
body,
r#"{"method":"eth_sendRawTransactionConditional","params":["0x61626364",{"knownAccounts":{}}],"id":1,"jsonrpc":"2.0"}"#
);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/rpc/src/witness.rs | crates/optimism/rpc/src/witness.rs | //! Support for optimism specific witness RPCs.
use alloy_primitives::B256;
use alloy_rpc_types_debug::ExecutionWitness;
use jsonrpsee_core::{async_trait, RpcResult};
use reth_chainspec::ChainSpecProvider;
use reth_evm::ConfigureEvm;
use reth_node_api::{BuildNextEnv, NodePrimitives};
use reth_optimism_forks::OpHardforks;
use reth_optimism_payload_builder::{OpAttributes, OpPayloadBuilder, OpPayloadPrimitives};
use reth_optimism_txpool::OpPooledTx;
use reth_primitives_traits::{SealedHeader, TxTy};
pub use reth_rpc_api::DebugExecutionWitnessApiServer;
use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult};
use reth_storage_api::{
errors::{ProviderError, ProviderResult},
BlockReaderIdExt, NodePrimitivesProvider, StateProviderFactory,
};
use reth_tasks::TaskSpawner;
use reth_transaction_pool::TransactionPool;
use std::{fmt::Debug, sync::Arc};
use tokio::sync::{oneshot, Semaphore};
/// An extension to the `debug_` namespace of the RPC API.
pub struct OpDebugWitnessApi<Pool, Provider, EvmConfig, Attrs> {
inner: Arc<OpDebugWitnessApiInner<Pool, Provider, EvmConfig, Attrs>>,
}
impl<Pool, Provider, EvmConfig, Attrs> OpDebugWitnessApi<Pool, Provider, EvmConfig, Attrs> {
/// Creates a new instance of the `OpDebugWitnessApi`.
pub fn new(
provider: Provider,
task_spawner: Box<dyn TaskSpawner>,
builder: OpPayloadBuilder<Pool, Provider, EvmConfig, (), Attrs>,
) -> Self {
let semaphore = Arc::new(Semaphore::new(3));
let inner = OpDebugWitnessApiInner { provider, builder, task_spawner, semaphore };
Self { inner: Arc::new(inner) }
}
}
impl<Pool, Provider, EvmConfig, Attrs> OpDebugWitnessApi<Pool, Provider, EvmConfig, Attrs>
where
EvmConfig: ConfigureEvm,
Provider: NodePrimitivesProvider<Primitives: NodePrimitives<BlockHeader = Provider::Header>>
+ BlockReaderIdExt,
{
/// Fetches the parent header by hash.
fn parent_header(
&self,
parent_block_hash: B256,
) -> ProviderResult<SealedHeader<Provider::Header>> {
self.inner
.provider
.sealed_header_by_hash(parent_block_hash)?
.ok_or_else(|| ProviderError::HeaderNotFound(parent_block_hash.into()))
}
}
#[async_trait]
impl<Pool, Provider, EvmConfig, Attrs> DebugExecutionWitnessApiServer<Attrs::RpcPayloadAttributes>
for OpDebugWitnessApi<Pool, Provider, EvmConfig, Attrs>
where
Pool: TransactionPool<
Transaction: OpPooledTx<Consensus = <Provider::Primitives as NodePrimitives>::SignedTx>,
> + 'static,
Provider: BlockReaderIdExt<Header = <Provider::Primitives as NodePrimitives>::BlockHeader>
+ NodePrimitivesProvider<Primitives: OpPayloadPrimitives>
+ StateProviderFactory
+ ChainSpecProvider<ChainSpec: OpHardforks>
+ Clone
+ 'static,
EvmConfig: ConfigureEvm<
Primitives = Provider::Primitives,
NextBlockEnvCtx: BuildNextEnv<Attrs, Provider::Header, Provider::ChainSpec>,
> + 'static,
Attrs: OpAttributes<Transaction = TxTy<EvmConfig::Primitives>>,
{
async fn execute_payload(
&self,
parent_block_hash: B256,
attributes: Attrs::RpcPayloadAttributes,
) -> RpcResult<ExecutionWitness> {
let _permit = self.inner.semaphore.acquire().await;
let parent_header = self.parent_header(parent_block_hash).to_rpc_result()?;
let (tx, rx) = oneshot::channel();
let this = self.clone();
self.inner.task_spawner.spawn_blocking(Box::pin(async move {
let res = this.inner.builder.payload_witness(parent_header, attributes);
let _ = tx.send(res);
}));
rx.await
.map_err(|err| internal_rpc_err(err.to_string()))?
.map_err(|err| internal_rpc_err(err.to_string()))
}
}
impl<Pool, Provider, EvmConfig, Attrs> Clone
for OpDebugWitnessApi<Pool, Provider, EvmConfig, Attrs>
{
fn clone(&self) -> Self {
Self { inner: Arc::clone(&self.inner) }
}
}
impl<Pool, Provider, EvmConfig, Attrs> Debug
for OpDebugWitnessApi<Pool, Provider, EvmConfig, Attrs>
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("OpDebugWitnessApi").finish_non_exhaustive()
}
}
struct OpDebugWitnessApiInner<Pool, Provider, EvmConfig, Attrs> {
provider: Provider,
builder: OpPayloadBuilder<Pool, Provider, EvmConfig, (), Attrs>,
task_spawner: Box<dyn TaskSpawner>,
semaphore: Arc<Semaphore>,
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/rpc/src/error.rs | crates/optimism/rpc/src/error.rs | //! RPC errors specific to OP.
use alloy_json_rpc::ErrorPayload;
use alloy_rpc_types_eth::{error::EthRpcErrorCode, BlockError};
use alloy_transport::{RpcError, TransportErrorKind};
use jsonrpsee_types::error::{INTERNAL_ERROR_CODE, INVALID_PARAMS_CODE};
use op_revm::{OpHaltReason, OpTransactionError};
use reth_evm::execute::ProviderError;
use reth_optimism_evm::OpBlockExecutionError;
use reth_rpc_eth_api::{AsEthApiError, EthTxEnvError, TransactionConversionError};
use reth_rpc_eth_types::{error::api::FromEvmHalt, EthApiError};
use reth_rpc_server_types::result::{internal_rpc_err, rpc_err};
use revm::context_interface::result::{EVMError, InvalidTransaction};
use std::{convert::Infallible, fmt::Display};
/// Optimism specific errors, that extend [`EthApiError`].
#[derive(Debug, thiserror::Error)]
pub enum OpEthApiError {
/// L1 ethereum error.
#[error(transparent)]
Eth(#[from] EthApiError),
/// EVM error originating from invalid optimism data.
#[error(transparent)]
Evm(#[from] OpBlockExecutionError),
/// Thrown when calculating L1 gas fee.
#[error("failed to calculate l1 gas fee")]
L1BlockFeeError,
/// Thrown when calculating L1 gas used
#[error("failed to calculate l1 gas used")]
L1BlockGasError,
/// Wrapper for [`revm_primitives::InvalidTransaction`](InvalidTransaction).
#[error(transparent)]
InvalidTransaction(#[from] OpInvalidTransactionError),
/// Sequencer client error.
#[error(transparent)]
Sequencer(#[from] SequencerClientError),
}
impl AsEthApiError for OpEthApiError {
fn as_err(&self) -> Option<&EthApiError> {
match self {
Self::Eth(err) => Some(err),
_ => None,
}
}
}
impl From<OpEthApiError> for jsonrpsee_types::error::ErrorObject<'static> {
fn from(err: OpEthApiError) -> Self {
match err {
OpEthApiError::Eth(err) => err.into(),
OpEthApiError::InvalidTransaction(err) => err.into(),
OpEthApiError::Evm(_) |
OpEthApiError::L1BlockFeeError |
OpEthApiError::L1BlockGasError => internal_rpc_err(err.to_string()),
OpEthApiError::Sequencer(err) => err.into(),
}
}
}
/// Optimism specific invalid transaction errors
#[derive(thiserror::Error, Debug)]
pub enum OpInvalidTransactionError {
/// A deposit transaction was submitted as a system transaction post-regolith.
#[error("no system transactions allowed after regolith")]
DepositSystemTxPostRegolith,
/// A deposit transaction halted post-regolith
#[error("deposit transaction halted after regolith")]
HaltedDepositPostRegolith,
/// Transaction conditional errors.
#[error(transparent)]
TxConditionalErr(#[from] TxConditionalErr),
}
impl From<OpInvalidTransactionError> for jsonrpsee_types::error::ErrorObject<'static> {
fn from(err: OpInvalidTransactionError) -> Self {
match err {
OpInvalidTransactionError::DepositSystemTxPostRegolith |
OpInvalidTransactionError::HaltedDepositPostRegolith => {
rpc_err(EthRpcErrorCode::TransactionRejected.code(), err.to_string(), None)
}
OpInvalidTransactionError::TxConditionalErr(_) => err.into(),
}
}
}
impl TryFrom<OpTransactionError> for OpInvalidTransactionError {
type Error = InvalidTransaction;
fn try_from(err: OpTransactionError) -> Result<Self, Self::Error> {
match err {
OpTransactionError::DepositSystemTxPostRegolith => {
Ok(Self::DepositSystemTxPostRegolith)
}
OpTransactionError::HaltedDepositPostRegolith => Ok(Self::HaltedDepositPostRegolith),
OpTransactionError::Base(err) => Err(err),
}
}
}
/// Transaction conditional related errors.
#[derive(Debug, thiserror::Error)]
pub enum TxConditionalErr {
/// Transaction conditional cost exceeded maximum allowed
#[error("conditional cost exceeded maximum allowed")]
ConditionalCostExceeded,
/// Invalid conditional parameters
#[error("invalid conditional parameters")]
InvalidCondition,
/// Internal error
#[error("internal error: {0}")]
Internal(String),
/// Thrown if the conditional's storage value doesn't match the latest state's.
#[error("storage value mismatch")]
StorageValueMismatch,
/// Thrown when the conditional's storage root doesn't match the latest state's root.
#[error("storage root mismatch")]
StorageRootMismatch,
}
impl TxConditionalErr {
/// Creates an internal error variant
pub fn internal<E: Display>(err: E) -> Self {
Self::Internal(err.to_string())
}
}
impl From<TxConditionalErr> for jsonrpsee_types::error::ErrorObject<'static> {
fn from(err: TxConditionalErr) -> Self {
let code = match &err {
TxConditionalErr::Internal(_) => INTERNAL_ERROR_CODE,
_ => INVALID_PARAMS_CODE,
};
jsonrpsee_types::error::ErrorObject::owned(code, err.to_string(), None::<String>)
}
}
/// Error type when interacting with the Sequencer
#[derive(Debug, thiserror::Error)]
pub enum SequencerClientError {
/// Wrapper around an [`RpcError<TransportErrorKind>`].
#[error(transparent)]
HttpError(#[from] RpcError<TransportErrorKind>),
}
impl From<SequencerClientError> for jsonrpsee_types::error::ErrorObject<'static> {
fn from(err: SequencerClientError) -> Self {
match err {
SequencerClientError::HttpError(RpcError::ErrorResp(ErrorPayload {
code,
message,
data,
})) => jsonrpsee_types::error::ErrorObject::owned(code as i32, message, data),
err => jsonrpsee_types::error::ErrorObject::owned(
INTERNAL_ERROR_CODE,
err.to_string(),
None::<String>,
),
}
}
}
impl<T> From<EVMError<T, OpTransactionError>> for OpEthApiError
where
T: Into<EthApiError>,
{
fn from(error: EVMError<T, OpTransactionError>) -> Self {
match error {
EVMError::Transaction(err) => match err.try_into() {
Ok(err) => Self::InvalidTransaction(err),
Err(err) => Self::Eth(EthApiError::InvalidTransaction(err.into())),
},
EVMError::Database(err) => Self::Eth(err.into()),
EVMError::Header(err) => Self::Eth(err.into()),
EVMError::Custom(err) => Self::Eth(EthApiError::EvmCustom(err)),
}
}
}
impl FromEvmHalt<OpHaltReason> for OpEthApiError {
fn from_evm_halt(halt: OpHaltReason, gas_limit: u64) -> Self {
match halt {
OpHaltReason::FailedDeposit => {
OpInvalidTransactionError::HaltedDepositPostRegolith.into()
}
OpHaltReason::Base(halt) => EthApiError::from_evm_halt(halt, gas_limit).into(),
}
}
}
impl From<TransactionConversionError> for OpEthApiError {
fn from(value: TransactionConversionError) -> Self {
Self::Eth(EthApiError::from(value))
}
}
impl From<EthTxEnvError> for OpEthApiError {
fn from(value: EthTxEnvError) -> Self {
Self::Eth(EthApiError::from(value))
}
}
impl From<ProviderError> for OpEthApiError {
fn from(value: ProviderError) -> Self {
Self::Eth(EthApiError::from(value))
}
}
impl From<BlockError> for OpEthApiError {
fn from(value: BlockError) -> Self {
Self::Eth(EthApiError::from(value))
}
}
impl From<Infallible> for OpEthApiError {
fn from(value: Infallible) -> Self {
match value {}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/rpc/src/historical.rs | crates/optimism/rpc/src/historical.rs | //! Client support for optimism historical RPC requests.
use crate::sequencer::Error;
use alloy_eips::BlockId;
use alloy_json_rpc::{RpcRecv, RpcSend};
use alloy_primitives::{BlockNumber, B256};
use alloy_rpc_client::RpcClient;
use jsonrpsee_core::{
middleware::{Batch, Notification, RpcServiceT},
server::MethodResponse,
};
use jsonrpsee_types::{Params, Request};
use reth_storage_api::{BlockReaderIdExt, TransactionsProvider};
use std::{future::Future, sync::Arc};
use tracing::{debug, warn};
/// A client that can be used to forward RPC requests for historical data to an endpoint.
///
/// This is intended to be used for OP-Mainnet pre-bedrock data, allowing users to query historical
/// state.
#[derive(Debug, Clone)]
pub struct HistoricalRpcClient {
inner: Arc<HistoricalRpcClientInner>,
}
impl HistoricalRpcClient {
/// Constructs a new historical RPC client with the given endpoint URL.
pub fn new(endpoint: &str) -> Result<Self, Error> {
let client = RpcClient::new_http(
endpoint.parse::<reqwest::Url>().map_err(|err| Error::InvalidUrl(err.to_string()))?,
);
Ok(Self {
inner: Arc::new(HistoricalRpcClientInner {
historical_endpoint: endpoint.to_string(),
client,
}),
})
}
/// Returns a reference to the underlying RPC client
fn client(&self) -> &RpcClient {
&self.inner.client
}
/// Forwards a JSON-RPC request to the historical endpoint
pub async fn request<Params: RpcSend, Resp: RpcRecv>(
&self,
method: &str,
params: Params,
) -> Result<Resp, Error> {
let resp =
self.client().request::<Params, Resp>(method.to_string(), params).await.inspect_err(
|err| {
warn!(
target: "rpc::historical",
%err,
"HTTP request to historical endpoint failed"
);
},
)?;
Ok(resp)
}
/// Returns the configured historical endpoint URL
pub fn endpoint(&self) -> &str {
&self.inner.historical_endpoint
}
}
#[derive(Debug)]
struct HistoricalRpcClientInner {
historical_endpoint: String,
client: RpcClient,
}
/// A layer that provides historical RPC forwarding functionality for a given service.
#[derive(Debug, Clone)]
pub struct HistoricalRpc<P> {
inner: Arc<HistoricalRpcInner<P>>,
}
impl<P> HistoricalRpc<P> {
/// Constructs a new historical RPC layer with the given provider, client and bedrock block
/// number.
pub fn new(provider: P, client: HistoricalRpcClient, bedrock_block: BlockNumber) -> Self {
let inner = Arc::new(HistoricalRpcInner { provider, client, bedrock_block });
Self { inner }
}
}
impl<S, P> tower::Layer<S> for HistoricalRpc<P> {
type Service = HistoricalRpcService<S, P>;
fn layer(&self, inner: S) -> Self::Service {
HistoricalRpcService::new(inner, self.inner.clone())
}
}
/// A service that intercepts RPC calls and forwards pre-bedrock historical requests
/// to a dedicated endpoint.
///
/// This checks if the request is for a pre-bedrock block and forwards it via the configured
/// historical RPC client.
#[derive(Debug, Clone)]
pub struct HistoricalRpcService<S, P> {
/// The inner service that handles regular RPC requests
inner: S,
/// The context required to forward historical requests.
historical: Arc<HistoricalRpcInner<P>>,
}
impl<S, P> HistoricalRpcService<S, P> {
/// Constructs a new historical RPC service with the given inner service, historical client,
/// provider, and bedrock block number.
const fn new(inner: S, historical: Arc<HistoricalRpcInner<P>>) -> Self {
Self { inner, historical }
}
}
impl<S, P> RpcServiceT for HistoricalRpcService<S, P>
where
S: RpcServiceT<MethodResponse = MethodResponse> + Send + Sync + Clone + 'static,
P: BlockReaderIdExt + TransactionsProvider + Send + Sync + Clone + 'static,
{
type MethodResponse = S::MethodResponse;
type NotificationResponse = S::NotificationResponse;
type BatchResponse = S::BatchResponse;
fn call<'a>(&self, req: Request<'a>) -> impl Future<Output = Self::MethodResponse> + Send + 'a {
let inner_service = self.inner.clone();
let historical = self.historical.clone();
Box::pin(async move {
// Check if request should be forwarded to historical endpoint
if let Some(response) = historical.maybe_forward_request(&req).await {
return response
}
// Handle the request with the inner service
inner_service.call(req).await
})
}
fn batch<'a>(&self, req: Batch<'a>) -> impl Future<Output = Self::BatchResponse> + Send + 'a {
self.inner.batch(req)
}
fn notification<'a>(
&self,
n: Notification<'a>,
) -> impl Future<Output = Self::NotificationResponse> + Send + 'a {
self.inner.notification(n)
}
}
#[derive(Debug)]
struct HistoricalRpcInner<P> {
/// Provider used to determine if a block is pre-bedrock
provider: P,
/// Client used to forward historical requests
client: HistoricalRpcClient,
/// Bedrock transition block number
bedrock_block: BlockNumber,
}
impl<P> HistoricalRpcInner<P>
where
P: BlockReaderIdExt + TransactionsProvider + Send + Sync + Clone,
{
/// Checks if a request should be forwarded to the historical endpoint and returns
/// the response if it was forwarded.
async fn maybe_forward_request(&self, req: &Request<'_>) -> Option<MethodResponse> {
let should_forward = match req.method_name() {
"debug_traceTransaction" => self.should_forward_transaction(req),
method => self.should_forward_block_request(method, req),
};
if should_forward {
return self.forward_to_historical(req).await
}
None
}
/// Determines if a transaction request should be forwarded
fn should_forward_transaction(&self, req: &Request<'_>) -> bool {
parse_transaction_hash_from_params(&req.params())
.ok()
.map(|tx_hash| {
// Check if we can find the transaction locally and get its metadata
match self.provider.transaction_by_hash_with_meta(tx_hash) {
Ok(Some((_, meta))) => {
// Transaction found - check if it's pre-bedrock based on block number
let is_pre_bedrock = meta.block_number < self.bedrock_block;
if is_pre_bedrock {
debug!(
target: "rpc::historical",
?tx_hash,
block_num = meta.block_number,
bedrock = self.bedrock_block,
"transaction found in pre-bedrock block, forwarding to historical endpoint"
);
}
is_pre_bedrock
}
_ => {
// Transaction not found locally, optimistically forward to historical endpoint
debug!(
target: "rpc::historical",
?tx_hash,
"transaction not found locally, forwarding to historical endpoint"
);
true
}
}
})
.unwrap_or(false)
}
/// Determines if a block-based request should be forwarded
fn should_forward_block_request(&self, method: &str, req: &Request<'_>) -> bool {
let maybe_block_id = extract_block_id_for_method(method, &req.params());
maybe_block_id.map(|block_id| self.is_pre_bedrock(block_id)).unwrap_or(false)
}
/// Checks if a block ID refers to a pre-bedrock block
fn is_pre_bedrock(&self, block_id: BlockId) -> bool {
match self.provider.block_number_for_id(block_id) {
Ok(Some(num)) => {
debug!(
target: "rpc::historical",
?block_id,
block_num=num,
bedrock=self.bedrock_block,
"found block number"
);
num < self.bedrock_block
}
Ok(None) if block_id.is_hash() => {
debug!(
target: "rpc::historical",
?block_id,
"block hash not found locally, assuming pre-bedrock"
);
true
}
_ => {
debug!(
target: "rpc::historical",
?block_id,
"could not determine block number; not forwarding"
);
false
}
}
}
/// Forwards a request to the historical endpoint
async fn forward_to_historical(&self, req: &Request<'_>) -> Option<MethodResponse> {
debug!(
target: "rpc::historical",
method = %req.method_name(),
params=?req.params(),
"forwarding request to historical endpoint"
);
let params = req.params();
let params_str = params.as_str().unwrap_or("[]");
let params = serde_json::from_str::<serde_json::Value>(params_str).ok()?;
let raw =
self.client.request::<_, serde_json::Value>(req.method_name(), params).await.ok()?;
let payload = jsonrpsee_types::ResponsePayload::success(raw).into();
Some(MethodResponse::response(req.id.clone(), payload, usize::MAX))
}
}
/// Error type for parameter parsing
#[derive(Debug)]
enum ParseError {
InvalidFormat,
MissingParameter,
}
/// Extracts the block ID from request parameters based on the method name
fn extract_block_id_for_method(method: &str, params: &Params<'_>) -> Option<BlockId> {
match method {
"eth_getBlockByNumber" |
"eth_getBlockByHash" |
"debug_traceBlockByNumber" |
"debug_traceBlockByHash" => parse_block_id_from_params(params, 0),
"eth_getBalance" |
"eth_getCode" |
"eth_getTransactionCount" |
"eth_call" |
"eth_estimateGas" |
"eth_createAccessList" |
"debug_traceCall" => parse_block_id_from_params(params, 1),
"eth_getStorageAt" | "eth_getProof" => parse_block_id_from_params(params, 2),
_ => None,
}
}
/// Parses a `BlockId` from the given parameters at the specified position.
fn parse_block_id_from_params(params: &Params<'_>, position: usize) -> Option<BlockId> {
let values: Vec<serde_json::Value> = params.parse().ok()?;
let val = values.into_iter().nth(position)?;
serde_json::from_value::<BlockId>(val).ok()
}
/// Parses a transaction hash from the first parameter.
fn parse_transaction_hash_from_params(params: &Params<'_>) -> Result<B256, ParseError> {
let values: Vec<serde_json::Value> = params.parse().map_err(|_| ParseError::InvalidFormat)?;
let val = values.into_iter().next().ok_or(ParseError::MissingParameter)?;
serde_json::from_value::<B256>(val).map_err(|_| ParseError::InvalidFormat)
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_eips::{BlockId, BlockNumberOrTag};
use jsonrpsee::types::Params;
use jsonrpsee_core::middleware::layer::Either;
use reth_node_builder::rpc::RethRpcMiddleware;
use reth_storage_api::noop::NoopProvider;
use tower::layer::util::Identity;
#[test]
fn check_historical_rpc() {
fn assert_historical_rpc<T: RethRpcMiddleware>() {}
assert_historical_rpc::<HistoricalRpc<NoopProvider>>();
assert_historical_rpc::<Either<HistoricalRpc<NoopProvider>, Identity>>();
}
/// Tests that various valid id types can be parsed from the first parameter.
#[test]
fn parses_block_id_from_first_param() {
// Test with a block number
let params_num = Params::new(Some(r#"["0x64"]"#)); // 100
assert_eq!(
parse_block_id_from_params(¶ms_num, 0).unwrap(),
BlockId::Number(BlockNumberOrTag::Number(100))
);
// Test with the "earliest" tag
let params_tag = Params::new(Some(r#"["earliest"]"#));
assert_eq!(
parse_block_id_from_params(¶ms_tag, 0).unwrap(),
BlockId::Number(BlockNumberOrTag::Earliest)
);
}
/// Tests that the function correctly parses from a position other than 0.
#[test]
fn parses_block_id_from_second_param() {
let params =
Params::new(Some(r#"["0x0000000000000000000000000000000000000000", "latest"]"#));
let result = parse_block_id_from_params(¶ms, 1).unwrap();
assert_eq!(result, BlockId::Number(BlockNumberOrTag::Latest));
}
/// Tests that the function returns nothing if the parameter is missing or empty.
#[test]
fn defaults_to_latest_when_param_is_missing() {
let params = Params::new(Some(r#"["0x0000000000000000000000000000000000000000"]"#));
let result = parse_block_id_from_params(¶ms, 1);
assert!(result.is_none());
}
/// Tests that the function doesn't parse anything if the parameter is not a valid block id.
#[test]
fn returns_error_for_invalid_input() {
let params = Params::new(Some(r#"[true]"#));
let result = parse_block_id_from_params(¶ms, 0);
assert!(result.is_none());
}
/// Tests that transaction hashes can be parsed from params.
#[test]
fn parses_transaction_hash_from_params() {
let hash = "0xdbdfa0f88b2cf815fdc1621bd20c2bd2b0eed4f0c56c9be2602957b5a60ec702";
let params_str = format!(r#"["{hash}"]"#);
let params = Params::new(Some(¶ms_str));
let result = parse_transaction_hash_from_params(¶ms);
assert!(result.is_ok());
let parsed_hash = result.unwrap();
assert_eq!(format!("{parsed_hash:?}"), hash);
}
/// Tests that invalid transaction hash returns error.
#[test]
fn returns_error_for_invalid_tx_hash() {
let params = Params::new(Some(r#"["not_a_hash"]"#));
let result = parse_transaction_hash_from_params(¶ms);
assert!(result.is_err());
assert!(matches!(result.unwrap_err(), ParseError::InvalidFormat));
}
/// Tests that missing parameter returns appropriate error.
#[test]
fn returns_error_for_missing_parameter() {
let params = Params::new(Some(r#"[]"#));
let result = parse_transaction_hash_from_params(¶ms);
assert!(result.is_err());
assert!(matches!(result.unwrap_err(), ParseError::MissingParameter));
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/rpc/src/eth/pending_block.rs | crates/optimism/rpc/src/eth/pending_block.rs | //! Loads OP pending block for a RPC response.
use std::sync::Arc;
use crate::{OpEthApi, OpEthApiError};
use alloy_eips::BlockNumberOrTag;
use reth_primitives_traits::RecoveredBlock;
use reth_rpc_eth_api::{
helpers::{pending_block::PendingEnvBuilder, LoadPendingBlock},
FromEvmError, RpcConvert, RpcNodeCore,
};
use reth_rpc_eth_types::{builder::config::PendingBlockKind, EthApiError, PendingBlock};
use reth_storage_api::{
BlockReader, BlockReaderIdExt, ProviderBlock, ProviderReceipt, ReceiptProvider,
};
impl<N, Rpc> LoadPendingBlock for OpEthApi<N, Rpc>
where
N: RpcNodeCore,
OpEthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<Primitives = N::Primitives>,
{
#[inline]
fn pending_block(&self) -> &tokio::sync::Mutex<Option<PendingBlock<N::Primitives>>> {
self.inner.eth_api.pending_block()
}
#[inline]
fn pending_env_builder(&self) -> &dyn PendingEnvBuilder<Self::Evm> {
self.inner.eth_api.pending_env_builder()
}
#[inline]
fn pending_block_kind(&self) -> PendingBlockKind {
self.inner.eth_api.pending_block_kind()
}
/// Returns the locally built pending block
async fn local_pending_block(
&self,
) -> Result<
Option<(
Arc<RecoveredBlock<ProviderBlock<Self::Provider>>>,
Arc<Vec<ProviderReceipt<Self::Provider>>>,
)>,
Self::Error,
> {
if let Ok(Some(block)) = self.pending_flashblock() {
return Ok(Some(block));
}
// See: <https://github.com/ethereum-optimism/op-geth/blob/f2e69450c6eec9c35d56af91389a1c47737206ca/miner/worker.go#L367-L375>
let latest = self
.provider()
.latest_header()?
.ok_or(EthApiError::HeaderNotFound(BlockNumberOrTag::Latest.into()))?;
let block_id = latest.hash().into();
let block = self
.provider()
.recovered_block(block_id, Default::default())?
.ok_or(EthApiError::HeaderNotFound(block_id.into()))?;
let receipts = self
.provider()
.receipts_by_block(block_id)?
.ok_or(EthApiError::ReceiptsNotFound(block_id.into()))?;
Ok(Some((Arc::new(block), Arc::new(receipts))))
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/rpc/src/eth/ext.rs | crates/optimism/rpc/src/eth/ext.rs | //! Eth API extension.
use crate::{error::TxConditionalErr, OpEthApiError, SequencerClient};
use alloy_consensus::BlockHeader;
use alloy_eips::BlockNumberOrTag;
use alloy_primitives::{Bytes, StorageKey, B256, U256};
use alloy_rpc_types_eth::erc4337::{AccountStorage, TransactionConditional};
use jsonrpsee_core::RpcResult;
use reth_optimism_txpool::conditional::MaybeConditionalTransaction;
use reth_rpc_eth_api::L2EthApiExtServer;
use reth_rpc_eth_types::utils::recover_raw_transaction;
use reth_storage_api::{BlockReaderIdExt, StateProviderFactory};
use reth_transaction_pool::{
AddedTransactionOutcome, PoolTransaction, TransactionOrigin, TransactionPool,
};
use std::sync::Arc;
use tokio::sync::Semaphore;
/// Maximum execution const for conditional transactions.
const MAX_CONDITIONAL_EXECUTION_COST: u64 = 5000;
const MAX_CONCURRENT_CONDITIONAL_VALIDATIONS: usize = 3;
/// OP-Reth `Eth` API extensions implementation.
///
/// Separate from [`super::OpEthApi`] to allow to enable it conditionally,
#[derive(Clone, Debug)]
pub struct OpEthExtApi<Pool, Provider> {
/// Sequencer client, configured to forward submitted transactions to sequencer of given OP
/// network.
sequencer_client: Option<SequencerClient>,
inner: Arc<OpEthExtApiInner<Pool, Provider>>,
}
impl<Pool, Provider> OpEthExtApi<Pool, Provider>
where
Provider: BlockReaderIdExt + StateProviderFactory + Clone + 'static,
{
/// Creates a new [`OpEthExtApi`].
pub fn new(sequencer_client: Option<SequencerClient>, pool: Pool, provider: Provider) -> Self {
let inner = Arc::new(OpEthExtApiInner::new(pool, provider));
Self { sequencer_client, inner }
}
/// Returns the configured sequencer client, if any.
const fn sequencer_client(&self) -> Option<&SequencerClient> {
self.sequencer_client.as_ref()
}
#[inline]
fn pool(&self) -> &Pool {
self.inner.pool()
}
#[inline]
fn provider(&self) -> &Provider {
self.inner.provider()
}
/// Validates the conditional's `known accounts` settings against the current state.
async fn validate_known_accounts(
&self,
condition: &TransactionConditional,
) -> Result<(), TxConditionalErr> {
if condition.known_accounts.is_empty() {
return Ok(());
}
let _permit =
self.inner.validation_semaphore.acquire().await.map_err(TxConditionalErr::internal)?;
let state = self
.provider()
.state_by_block_number_or_tag(BlockNumberOrTag::Latest)
.map_err(TxConditionalErr::internal)?;
for (address, storage) in &condition.known_accounts {
match storage {
AccountStorage::Slots(slots) => {
for (slot, expected_value) in slots {
let current = state
.storage(*address, StorageKey::from(*slot))
.map_err(TxConditionalErr::internal)?
.unwrap_or_default();
if current != U256::from_be_bytes(**expected_value) {
return Err(TxConditionalErr::StorageValueMismatch);
}
}
}
AccountStorage::RootHash(expected_root) => {
let actual_root = state
.storage_root(*address, Default::default())
.map_err(TxConditionalErr::internal)?;
if *expected_root != actual_root {
return Err(TxConditionalErr::StorageRootMismatch);
}
}
}
}
Ok(())
}
}
#[async_trait::async_trait]
impl<Pool, Provider> L2EthApiExtServer for OpEthExtApi<Pool, Provider>
where
Provider: BlockReaderIdExt + StateProviderFactory + Clone + 'static,
Pool: TransactionPool<Transaction: MaybeConditionalTransaction> + 'static,
{
async fn send_raw_transaction_conditional(
&self,
bytes: Bytes,
condition: TransactionConditional,
) -> RpcResult<B256> {
// calculate and validate cost
let cost = condition.cost();
if cost > MAX_CONDITIONAL_EXECUTION_COST {
return Err(TxConditionalErr::ConditionalCostExceeded.into());
}
let recovered_tx = recover_raw_transaction(&bytes).map_err(|_| {
OpEthApiError::Eth(reth_rpc_eth_types::EthApiError::FailedToDecodeSignedTransaction)
})?;
let mut tx = <Pool as TransactionPool>::Transaction::from_pooled(recovered_tx);
// get current header
let header_not_found = || {
OpEthApiError::Eth(reth_rpc_eth_types::EthApiError::HeaderNotFound(
alloy_eips::BlockId::Number(BlockNumberOrTag::Latest),
))
};
let header = self
.provider()
.latest_header()
.map_err(|_| header_not_found())?
.ok_or_else(header_not_found)?;
// Ensure that the condition can still be met by checking the max bounds
if condition.has_exceeded_block_number(header.header().number()) ||
condition.has_exceeded_timestamp(header.header().timestamp())
{
return Err(TxConditionalErr::InvalidCondition.into());
}
// Validate Account
self.validate_known_accounts(&condition).await?;
if let Some(sequencer) = self.sequencer_client() {
// If we have a sequencer client, forward the transaction
let _ = sequencer
.forward_raw_transaction_conditional(bytes.as_ref(), condition)
.await
.map_err(OpEthApiError::Sequencer)?;
Ok(*tx.hash())
} else {
// otherwise, add to pool with the appended conditional
tx.set_conditional(condition);
let AddedTransactionOutcome { hash, .. } =
self.pool().add_transaction(TransactionOrigin::Private, tx).await.map_err(|e| {
OpEthApiError::Eth(reth_rpc_eth_types::EthApiError::PoolError(e.into()))
})?;
Ok(hash)
}
}
}
#[derive(Debug)]
struct OpEthExtApiInner<Pool, Provider> {
/// The transaction pool of the node.
pool: Pool,
/// The provider type used to interact with the node.
provider: Provider,
/// The semaphore used to limit the number of concurrent conditional validations.
validation_semaphore: Semaphore,
}
impl<Pool, Provider> OpEthExtApiInner<Pool, Provider> {
fn new(pool: Pool, provider: Provider) -> Self {
Self {
pool,
provider,
validation_semaphore: Semaphore::new(MAX_CONCURRENT_CONDITIONAL_VALIDATIONS),
}
}
#[inline]
const fn pool(&self) -> &Pool {
&self.pool
}
#[inline]
const fn provider(&self) -> &Provider {
&self.provider
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/rpc/src/eth/call.rs | crates/optimism/rpc/src/eth/call.rs | use crate::{eth::RpcNodeCore, OpEthApi, OpEthApiError};
use reth_evm::{SpecFor, TxEnvFor};
use reth_rpc_eth_api::{
helpers::{estimate::EstimateCall, Call, EthCall},
FromEvmError, RpcConvert,
};
impl<N, Rpc> EthCall for OpEthApi<N, Rpc>
where
N: RpcNodeCore,
OpEthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<
Primitives = N::Primitives,
Error = OpEthApiError,
TxEnv = TxEnvFor<N::Evm>,
Spec = SpecFor<N::Evm>,
>,
{
}
impl<N, Rpc> EstimateCall for OpEthApi<N, Rpc>
where
N: RpcNodeCore,
OpEthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<
Primitives = N::Primitives,
Error = OpEthApiError,
TxEnv = TxEnvFor<N::Evm>,
Spec = SpecFor<N::Evm>,
>,
{
}
impl<N, Rpc> Call for OpEthApi<N, Rpc>
where
N: RpcNodeCore,
OpEthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<
Primitives = N::Primitives,
Error = OpEthApiError,
TxEnv = TxEnvFor<N::Evm>,
Spec = SpecFor<N::Evm>,
>,
{
#[inline]
fn call_gas_limit(&self) -> u64 {
self.inner.eth_api.gas_cap()
}
#[inline]
fn max_simulate_blocks(&self) -> u64 {
self.inner.eth_api.max_simulate_blocks()
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/rpc/src/eth/block.rs | crates/optimism/rpc/src/eth/block.rs | //! Loads and formats OP block RPC response.
use crate::{eth::RpcNodeCore, OpEthApi, OpEthApiError};
use reth_rpc_eth_api::{
helpers::{EthBlocks, LoadBlock},
FromEvmError, RpcConvert,
};
impl<N, Rpc> EthBlocks for OpEthApi<N, Rpc>
where
N: RpcNodeCore,
OpEthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<Primitives = N::Primitives, Error = OpEthApiError>,
{
}
impl<N, Rpc> LoadBlock for OpEthApi<N, Rpc>
where
N: RpcNodeCore,
OpEthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<Primitives = N::Primitives, Error = OpEthApiError>,
{
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/rpc/src/eth/receipt.rs | crates/optimism/rpc/src/eth/receipt.rs | //! Loads and formats OP receipt RPC response.
use crate::{eth::RpcNodeCore, OpEthApi, OpEthApiError};
use alloy_eips::eip2718::Encodable2718;
use alloy_rpc_types_eth::{Log, TransactionReceipt};
use op_alloy_consensus::{
OpDepositReceipt, OpDepositReceiptWithBloom, OpReceiptEnvelope, OpTransaction,
};
use op_alloy_rpc_types::{L1BlockInfo, OpTransactionReceipt, OpTransactionReceiptFields};
use reth_chainspec::ChainSpecProvider;
use reth_node_api::NodePrimitives;
use reth_optimism_evm::RethL1BlockInfo;
use reth_optimism_forks::OpHardforks;
use reth_optimism_primitives::OpReceipt;
use reth_primitives_traits::Block;
use reth_rpc_eth_api::{
helpers::LoadReceipt,
transaction::{ConvertReceiptInput, ReceiptConverter},
RpcConvert,
};
use reth_rpc_eth_types::{receipt::build_receipt, EthApiError};
use reth_storage_api::BlockReader;
use std::fmt::Debug;
impl<N, Rpc> LoadReceipt for OpEthApi<N, Rpc>
where
N: RpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives, Error = OpEthApiError>,
{
}
/// Converter for OP receipts.
#[derive(Debug, Clone)]
pub struct OpReceiptConverter<Provider> {
provider: Provider,
}
impl<Provider> OpReceiptConverter<Provider> {
/// Creates a new [`OpReceiptConverter`].
pub const fn new(provider: Provider) -> Self {
Self { provider }
}
}
impl<Provider, N> ReceiptConverter<N> for OpReceiptConverter<Provider>
where
N: NodePrimitives<SignedTx: OpTransaction, Receipt = OpReceipt>,
Provider: BlockReader + ChainSpecProvider<ChainSpec: OpHardforks> + Debug + 'static,
{
type RpcReceipt = OpTransactionReceipt;
type Error = OpEthApiError;
fn convert_receipts(
&self,
inputs: Vec<ConvertReceiptInput<'_, N>>,
) -> Result<Vec<Self::RpcReceipt>, Self::Error> {
let Some(block_number) = inputs.first().map(|r| r.meta.block_number) else {
return Ok(Vec::new());
};
let block = self
.provider
.block_by_number(block_number)?
.ok_or(EthApiError::HeaderNotFound(block_number.into()))?;
let mut l1_block_info = match reth_optimism_evm::extract_l1_info(block.body()) {
Ok(l1_block_info) => l1_block_info,
Err(err) => {
// If it is the genesis block (i.e block number is 0), there is no L1 info, so
// we return an empty l1_block_info.
if block_number == 0 {
return Ok(vec![]);
}
return Err(err.into());
}
};
let mut receipts = Vec::with_capacity(inputs.len());
for input in inputs {
// We must clear this cache as different L2 transactions can have different
// L1 costs. A potential improvement here is to only clear the cache if the
// new transaction input has changed, since otherwise the L1 cost wouldn't.
l1_block_info.clear_tx_l1_cost();
receipts.push(
OpReceiptBuilder::new(&self.provider.chain_spec(), input, &mut l1_block_info)?
.build(),
);
}
Ok(receipts)
}
}
/// L1 fee and data gas for a non-deposit transaction, or deposit nonce and receipt version for a
/// deposit transaction.
#[derive(Debug, Clone)]
pub struct OpReceiptFieldsBuilder {
/// Block number.
pub block_number: u64,
/// Block timestamp.
pub block_timestamp: u64,
/// The L1 fee for transaction.
pub l1_fee: Option<u128>,
/// L1 gas used by transaction.
pub l1_data_gas: Option<u128>,
/// L1 fee scalar.
pub l1_fee_scalar: Option<f64>,
/* ---------------------------------------- Bedrock ---------------------------------------- */
/// The base fee of the L1 origin block.
pub l1_base_fee: Option<u128>,
/* --------------------------------------- Regolith ---------------------------------------- */
/// Deposit nonce, if this is a deposit transaction.
pub deposit_nonce: Option<u64>,
/* ---------------------------------------- Canyon ----------------------------------------- */
/// Deposit receipt version, if this is a deposit transaction.
pub deposit_receipt_version: Option<u64>,
/* ---------------------------------------- Ecotone ---------------------------------------- */
/// The current L1 fee scalar.
pub l1_base_fee_scalar: Option<u128>,
/// The current L1 blob base fee.
pub l1_blob_base_fee: Option<u128>,
/// The current L1 blob base fee scalar.
pub l1_blob_base_fee_scalar: Option<u128>,
/// The current operator fee scalar.
pub operator_fee_scalar: Option<u128>,
/// The current L1 blob base fee scalar.
pub operator_fee_constant: Option<u128>,
}
impl OpReceiptFieldsBuilder {
/// Returns a new builder.
pub const fn new(block_timestamp: u64, block_number: u64) -> Self {
Self {
block_number,
block_timestamp,
l1_fee: None,
l1_data_gas: None,
l1_fee_scalar: None,
l1_base_fee: None,
deposit_nonce: None,
deposit_receipt_version: None,
l1_base_fee_scalar: None,
l1_blob_base_fee: None,
l1_blob_base_fee_scalar: None,
operator_fee_scalar: None,
operator_fee_constant: None,
}
}
/// Applies [`L1BlockInfo`](op_revm::L1BlockInfo).
pub fn l1_block_info<T: Encodable2718 + OpTransaction>(
mut self,
chain_spec: &impl OpHardforks,
tx: &T,
l1_block_info: &mut op_revm::L1BlockInfo,
) -> Result<Self, OpEthApiError> {
let raw_tx = tx.encoded_2718();
let timestamp = self.block_timestamp;
self.l1_fee = Some(
l1_block_info
.l1_tx_data_fee(chain_spec, timestamp, &raw_tx, tx.is_deposit())
.map_err(|_| OpEthApiError::L1BlockFeeError)?
.saturating_to(),
);
self.l1_data_gas = Some(
l1_block_info
.l1_data_gas(chain_spec, timestamp, &raw_tx)
.map_err(|_| OpEthApiError::L1BlockGasError)?
.saturating_add(l1_block_info.l1_fee_overhead.unwrap_or_default())
.saturating_to(),
);
self.l1_fee_scalar = (!chain_spec.is_ecotone_active_at_timestamp(timestamp))
.then_some(f64::from(l1_block_info.l1_base_fee_scalar) / 1_000_000.0);
self.l1_base_fee = Some(l1_block_info.l1_base_fee.saturating_to());
self.l1_base_fee_scalar = Some(l1_block_info.l1_base_fee_scalar.saturating_to());
self.l1_blob_base_fee = l1_block_info.l1_blob_base_fee.map(|fee| fee.saturating_to());
self.l1_blob_base_fee_scalar =
l1_block_info.l1_blob_base_fee_scalar.map(|scalar| scalar.saturating_to());
// If the operator fee params are both set to 0, we don't add them to the receipt.
let operator_fee_scalar_has_non_zero_value: bool =
l1_block_info.operator_fee_scalar.is_some_and(|scalar| !scalar.is_zero());
let operator_fee_constant_has_non_zero_value =
l1_block_info.operator_fee_constant.is_some_and(|constant| !constant.is_zero());
if operator_fee_scalar_has_non_zero_value || operator_fee_constant_has_non_zero_value {
self.operator_fee_scalar =
l1_block_info.operator_fee_scalar.map(|scalar| scalar.saturating_to());
self.operator_fee_constant =
l1_block_info.operator_fee_constant.map(|constant| constant.saturating_to());
}
Ok(self)
}
/// Applies deposit transaction metadata: deposit nonce.
pub const fn deposit_nonce(mut self, nonce: Option<u64>) -> Self {
self.deposit_nonce = nonce;
self
}
/// Applies deposit transaction metadata: deposit receipt version.
pub const fn deposit_version(mut self, version: Option<u64>) -> Self {
self.deposit_receipt_version = version;
self
}
/// Builds the [`OpTransactionReceiptFields`] object.
pub const fn build(self) -> OpTransactionReceiptFields {
let Self {
block_number: _, // used to compute other fields
block_timestamp: _, // used to compute other fields
l1_fee,
l1_data_gas: l1_gas_used,
l1_fee_scalar,
l1_base_fee: l1_gas_price,
deposit_nonce,
deposit_receipt_version,
l1_base_fee_scalar,
l1_blob_base_fee,
l1_blob_base_fee_scalar,
operator_fee_scalar,
operator_fee_constant,
} = self;
OpTransactionReceiptFields {
l1_block_info: L1BlockInfo {
l1_gas_price,
l1_gas_used,
l1_fee,
l1_fee_scalar,
l1_base_fee_scalar,
l1_blob_base_fee,
l1_blob_base_fee_scalar,
operator_fee_scalar,
operator_fee_constant,
},
deposit_nonce,
deposit_receipt_version,
}
}
}
/// Builds an [`OpTransactionReceipt`].
#[derive(Debug)]
pub struct OpReceiptBuilder {
/// Core receipt, has all the fields of an L1 receipt and is the basis for the OP receipt.
pub core_receipt: TransactionReceipt<OpReceiptEnvelope<Log>>,
/// Additional OP receipt fields.
pub op_receipt_fields: OpTransactionReceiptFields,
}
impl OpReceiptBuilder {
/// Returns a new builder.
pub fn new<N>(
chain_spec: &impl OpHardforks,
input: ConvertReceiptInput<'_, N>,
l1_block_info: &mut op_revm::L1BlockInfo,
) -> Result<Self, OpEthApiError>
where
N: NodePrimitives<SignedTx: OpTransaction, Receipt = OpReceipt>,
{
let timestamp = input.meta.timestamp;
let block_number = input.meta.block_number;
let tx_signed = *input.tx.inner();
let core_receipt =
build_receipt(&input, None, |receipt_with_bloom| match input.receipt.as_ref() {
OpReceipt::Legacy(_) => OpReceiptEnvelope::Legacy(receipt_with_bloom),
OpReceipt::Eip2930(_) => OpReceiptEnvelope::Eip2930(receipt_with_bloom),
OpReceipt::Eip1559(_) => OpReceiptEnvelope::Eip1559(receipt_with_bloom),
OpReceipt::Eip7702(_) => OpReceiptEnvelope::Eip7702(receipt_with_bloom),
OpReceipt::Deposit(receipt) => {
OpReceiptEnvelope::Deposit(OpDepositReceiptWithBloom {
receipt: OpDepositReceipt {
inner: receipt_with_bloom.receipt,
deposit_nonce: receipt.deposit_nonce,
deposit_receipt_version: receipt.deposit_receipt_version,
},
logs_bloom: receipt_with_bloom.logs_bloom,
})
}
});
let op_receipt_fields = OpReceiptFieldsBuilder::new(timestamp, block_number)
.l1_block_info(chain_spec, tx_signed, l1_block_info)?
.build();
Ok(Self { core_receipt, op_receipt_fields })
}
/// Builds [`OpTransactionReceipt`] by combining core (l1) receipt fields and additional OP
/// receipt fields.
pub fn build(self) -> OpTransactionReceipt {
let Self { core_receipt: inner, op_receipt_fields } = self;
let OpTransactionReceiptFields { l1_block_info, .. } = op_receipt_fields;
OpTransactionReceipt { inner, l1_block_info }
}
}
#[cfg(test)]
mod test {
use super::*;
use alloy_consensus::{Block, BlockBody};
use alloy_primitives::{hex, U256};
use op_alloy_network::eip2718::Decodable2718;
use reth_optimism_chainspec::{BASE_MAINNET, OP_MAINNET};
use reth_optimism_primitives::OpTransactionSigned;
/// OP Mainnet transaction at index 0 in block 124665056.
///
/// <https://optimistic.etherscan.io/tx/0x312e290cf36df704a2217b015d6455396830b0ce678b860ebfcc30f41403d7b1>
const TX_SET_L1_BLOCK_OP_MAINNET_BLOCK_124665056: [u8; 251] = hex!(
"7ef8f8a0683079df94aa5b9cf86687d739a60a9b4f0835e520ec4d664e2e415dca17a6df94deaddeaddeaddeaddeaddeaddeaddeaddead00019442000000000000000000000000000000000000158080830f424080b8a4440a5e200000146b000f79c500000000000000040000000066d052e700000000013ad8a3000000000000000000000000000000000000000000000000000000003ef1278700000000000000000000000000000000000000000000000000000000000000012fdf87b89884a61e74b322bbcf60386f543bfae7827725efaaf0ab1de2294a590000000000000000000000006887246668a3b87f54deb3b94ba47a6f63f32985"
);
/// OP Mainnet transaction at index 1 in block 124665056.
///
/// <https://optimistic.etherscan.io/tx/0x1059e8004daff32caa1f1b1ef97fe3a07a8cf40508f5b835b66d9420d87c4a4a>
const TX_1_OP_MAINNET_BLOCK_124665056: [u8; 1176] = hex!(
"02f904940a8303fba78401d6d2798401db2b6d830493e0943e6f4f7866654c18f536170780344aa8772950b680b904246a761202000000000000000000000000087000a300de7200382b55d40045000000e5d60e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000014000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003a0000000000000000000000000000000000000000000000000000000000000022482ad56cb0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000120000000000000000000000000dc6ff44d5d932cbd77b52e5612ba0529dc6226f1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000044095ea7b300000000000000000000000021c4928109acb0659a88ae5329b5374a3024694c0000000000000000000000000000000000000000000000049b9ca9a6943400000000000000000000000000000000000000000000000000000000000000000000000000000000000021c4928109acb0659a88ae5329b5374a3024694c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000024b6b55f250000000000000000000000000000000000000000000000049b9ca9a694340000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000415ec214a3950bea839a7e6fbb0ba1540ac2076acd50820e2d5ef83d0902cdffb24a47aff7de5190290769c4f0a9c6fabf63012986a0d590b1b571547a8c7050ea1b00000000000000000000000000000000000000000000000000000000000000c080a06db770e6e25a617fe9652f0958bd9bd6e49281a53036906386ed39ec48eadf63a07f47cf51a4a40b4494cf26efc686709a9b03939e20ee27e59682f5faa536667e"
);
/// Timestamp of OP mainnet block 124665056.
///
/// <https://optimistic.etherscan.io/block/124665056>
const BLOCK_124665056_TIMESTAMP: u64 = 1724928889;
/// L1 block info for transaction at index 1 in block 124665056.
///
/// <https://optimistic.etherscan.io/tx/0x1059e8004daff32caa1f1b1ef97fe3a07a8cf40508f5b835b66d9420d87c4a4a>
const TX_META_TX_1_OP_MAINNET_BLOCK_124665056: OpTransactionReceiptFields =
OpTransactionReceiptFields {
l1_block_info: L1BlockInfo {
l1_gas_price: Some(1055991687), // since bedrock l1 base fee
l1_gas_used: Some(4471),
l1_fee: Some(24681034813),
l1_fee_scalar: None,
l1_base_fee_scalar: Some(5227),
l1_blob_base_fee: Some(1),
l1_blob_base_fee_scalar: Some(1014213),
operator_fee_scalar: None,
operator_fee_constant: None,
},
deposit_nonce: None,
deposit_receipt_version: None,
};
#[test]
fn op_receipt_fields_from_block_and_tx() {
// rig
let tx_0 = OpTransactionSigned::decode_2718(
&mut TX_SET_L1_BLOCK_OP_MAINNET_BLOCK_124665056.as_slice(),
)
.unwrap();
let tx_1 =
OpTransactionSigned::decode_2718(&mut TX_1_OP_MAINNET_BLOCK_124665056.as_slice())
.unwrap();
let block: Block<OpTransactionSigned> = Block {
body: BlockBody { transactions: [tx_0, tx_1.clone()].to_vec(), ..Default::default() },
..Default::default()
};
let mut l1_block_info =
reth_optimism_evm::extract_l1_info(&block.body).expect("should extract l1 info");
// test
assert!(OP_MAINNET.is_fjord_active_at_timestamp(BLOCK_124665056_TIMESTAMP));
let receipt_meta = OpReceiptFieldsBuilder::new(BLOCK_124665056_TIMESTAMP, 124665056)
.l1_block_info(&*OP_MAINNET, &tx_1, &mut l1_block_info)
.expect("should parse revm l1 info")
.build();
let L1BlockInfo {
l1_gas_price,
l1_gas_used,
l1_fee,
l1_fee_scalar,
l1_base_fee_scalar,
l1_blob_base_fee,
l1_blob_base_fee_scalar,
operator_fee_scalar,
operator_fee_constant,
} = receipt_meta.l1_block_info;
assert_eq!(
l1_gas_price, TX_META_TX_1_OP_MAINNET_BLOCK_124665056.l1_block_info.l1_gas_price,
"incorrect l1 base fee (former gas price)"
);
assert_eq!(
l1_gas_used, TX_META_TX_1_OP_MAINNET_BLOCK_124665056.l1_block_info.l1_gas_used,
"incorrect l1 gas used"
);
assert_eq!(
l1_fee, TX_META_TX_1_OP_MAINNET_BLOCK_124665056.l1_block_info.l1_fee,
"incorrect l1 fee"
);
assert_eq!(
l1_fee_scalar, TX_META_TX_1_OP_MAINNET_BLOCK_124665056.l1_block_info.l1_fee_scalar,
"incorrect l1 fee scalar"
);
assert_eq!(
l1_base_fee_scalar,
TX_META_TX_1_OP_MAINNET_BLOCK_124665056.l1_block_info.l1_base_fee_scalar,
"incorrect l1 base fee scalar"
);
assert_eq!(
l1_blob_base_fee,
TX_META_TX_1_OP_MAINNET_BLOCK_124665056.l1_block_info.l1_blob_base_fee,
"incorrect l1 blob base fee"
);
assert_eq!(
l1_blob_base_fee_scalar,
TX_META_TX_1_OP_MAINNET_BLOCK_124665056.l1_block_info.l1_blob_base_fee_scalar,
"incorrect l1 blob base fee scalar"
);
assert_eq!(
operator_fee_scalar,
TX_META_TX_1_OP_MAINNET_BLOCK_124665056.l1_block_info.operator_fee_scalar,
"incorrect operator fee scalar"
);
assert_eq!(
operator_fee_constant,
TX_META_TX_1_OP_MAINNET_BLOCK_124665056.l1_block_info.operator_fee_constant,
"incorrect operator fee constant"
);
}
#[test]
fn op_non_zero_operator_fee_params_included_in_receipt() {
let tx_1 =
OpTransactionSigned::decode_2718(&mut TX_1_OP_MAINNET_BLOCK_124665056.as_slice())
.unwrap();
let mut l1_block_info = op_revm::L1BlockInfo::default();
l1_block_info.operator_fee_scalar = Some(U256::ZERO);
l1_block_info.operator_fee_constant = Some(U256::from(2));
let receipt_meta = OpReceiptFieldsBuilder::new(BLOCK_124665056_TIMESTAMP, 124665056)
.l1_block_info(&*OP_MAINNET, &tx_1, &mut l1_block_info)
.expect("should parse revm l1 info")
.build();
let L1BlockInfo { operator_fee_scalar, operator_fee_constant, .. } =
receipt_meta.l1_block_info;
assert_eq!(operator_fee_scalar, Some(0), "incorrect operator fee scalar");
assert_eq!(operator_fee_constant, Some(2), "incorrect operator fee constant");
}
#[test]
fn op_zero_operator_fee_params_not_included_in_receipt() {
let tx_1 =
OpTransactionSigned::decode_2718(&mut TX_1_OP_MAINNET_BLOCK_124665056.as_slice())
.unwrap();
let mut l1_block_info = op_revm::L1BlockInfo::default();
l1_block_info.operator_fee_scalar = Some(U256::ZERO);
l1_block_info.operator_fee_constant = Some(U256::ZERO);
let receipt_meta = OpReceiptFieldsBuilder::new(BLOCK_124665056_TIMESTAMP, 124665056)
.l1_block_info(&*OP_MAINNET, &tx_1, &mut l1_block_info)
.expect("should parse revm l1 info")
.build();
let L1BlockInfo { operator_fee_scalar, operator_fee_constant, .. } =
receipt_meta.l1_block_info;
assert_eq!(operator_fee_scalar, None, "incorrect operator fee scalar");
assert_eq!(operator_fee_constant, None, "incorrect operator fee constant");
}
// <https://github.com/paradigmxyz/reth/issues/12177>
#[test]
fn base_receipt_gas_fields() {
// https://basescan.org/tx/0x510fd4c47d78ba9f97c91b0f2ace954d5384c169c9545a77a373cf3ef8254e6e
let system = hex!(
"7ef8f8a0389e292420bcbf9330741f72074e39562a09ff5a00fd22e4e9eee7e34b81bca494deaddeaddeaddeaddeaddeaddeaddeaddead00019442000000000000000000000000000000000000158080830f424080b8a4440a5e20000008dd00101c120000000000000004000000006721035b00000000014189960000000000000000000000000000000000000000000000000000000349b4dcdc000000000000000000000000000000000000000000000000000000004ef9325cc5991ce750960f636ca2ffbb6e209bb3ba91412f21dd78c14ff154d1930f1f9a0000000000000000000000005050f69a9786f081509234f1a7f4684b5e5b76c9"
);
let tx_0 = OpTransactionSigned::decode_2718(&mut &system[..]).unwrap();
let block: alloy_consensus::Block<OpTransactionSigned> = Block {
body: BlockBody { transactions: vec![tx_0], ..Default::default() },
..Default::default()
};
let mut l1_block_info =
reth_optimism_evm::extract_l1_info(&block.body).expect("should extract l1 info");
// https://basescan.org/tx/0xf9420cbaf66a2dda75a015488d37262cbfd4abd0aad7bb2be8a63e14b1fa7a94
let tx = hex!(
"02f86c8221058034839a4ae283021528942f16386bb37709016023232523ff6d9daf444be380841249c58bc080a001b927eda2af9b00b52a57be0885e0303c39dd2831732e14051c2336470fd468a0681bf120baf562915841a48601c2b54a6742511e535cf8f71c95115af7ff63bd"
);
let tx_1 = OpTransactionSigned::decode_2718(&mut &tx[..]).unwrap();
let receipt_meta = OpReceiptFieldsBuilder::new(1730216981, 21713817)
.l1_block_info(&*BASE_MAINNET, &tx_1, &mut l1_block_info)
.expect("should parse revm l1 info")
.build();
let L1BlockInfo {
l1_gas_price,
l1_gas_used,
l1_fee,
l1_fee_scalar,
l1_base_fee_scalar,
l1_blob_base_fee,
l1_blob_base_fee_scalar,
operator_fee_scalar,
operator_fee_constant,
} = receipt_meta.l1_block_info;
assert_eq!(l1_gas_price, Some(14121491676), "incorrect l1 base fee (former gas price)");
assert_eq!(l1_gas_used, Some(1600), "incorrect l1 gas used");
assert_eq!(l1_fee, Some(191150293412), "incorrect l1 fee");
assert!(l1_fee_scalar.is_none(), "incorrect l1 fee scalar");
assert_eq!(l1_base_fee_scalar, Some(2269), "incorrect l1 base fee scalar");
assert_eq!(l1_blob_base_fee, Some(1324954204), "incorrect l1 blob base fee");
assert_eq!(l1_blob_base_fee_scalar, Some(1055762), "incorrect l1 blob base fee scalar");
assert_eq!(operator_fee_scalar, None, "incorrect operator fee scalar");
assert_eq!(operator_fee_constant, None, "incorrect operator fee constant");
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/rpc/src/eth/mod.rs | crates/optimism/rpc/src/eth/mod.rs | //! OP-Reth `eth_` endpoint implementation.
pub mod ext;
pub mod receipt;
pub mod transaction;
mod block;
mod call;
mod pending_block;
use crate::{
eth::{receipt::OpReceiptConverter, transaction::OpTxInfoMapper},
OpEthApiError, SequencerClient,
};
use alloy_consensus::BlockHeader;
use alloy_primitives::U256;
use eyre::WrapErr;
use op_alloy_network::Optimism;
pub use receipt::{OpReceiptBuilder, OpReceiptFieldsBuilder};
use reqwest::Url;
use reth_evm::ConfigureEvm;
use reth_node_api::{FullNodeComponents, FullNodeTypes, HeaderTy};
use reth_node_builder::rpc::{EthApiBuilder, EthApiCtx};
use reth_optimism_flashblocks::{
ExecutionPayloadBaseV1, FlashBlockCompleteSequenceRx, FlashBlockService, PendingBlockRx,
WsFlashBlockStream,
};
use reth_rpc::eth::{core::EthApiInner, DevSigner};
use reth_rpc_eth_api::{
helpers::{
pending_block::BuildPendingEnv, spec::SignersForApi, AddDevSigners, EthApiSpec, EthFees,
EthState, LoadFee, LoadPendingBlock, LoadState, SpawnBlocking, Trace,
},
EthApiTypes, FromEvmError, FullEthApiServer, RpcConvert, RpcConverter, RpcNodeCore,
RpcNodeCoreExt, RpcTypes, SignableTxRequest,
};
use reth_rpc_eth_types::{
pending_block::PendingBlockAndReceipts, EthStateCache, FeeHistoryCache, GasPriceOracle,
PendingBlockEnvOrigin,
};
use reth_storage_api::{ProviderHeader, ProviderTx};
use reth_tasks::{
pool::{BlockingTaskGuard, BlockingTaskPool},
TaskSpawner,
};
use std::{fmt, fmt::Formatter, marker::PhantomData, sync::Arc, time::Instant};
use tokio::sync::watch;
use tracing::info;
/// Adapter for [`EthApiInner`], which holds all the data required to serve core `eth_` API.
pub type EthApiNodeBackend<N, Rpc> = EthApiInner<N, Rpc>;
/// OP-Reth `Eth` API implementation.
///
/// This type provides the functionality for handling `eth_` related requests.
///
/// This wraps a default `Eth` implementation, and provides additional functionality where the
/// optimism spec deviates from the default (ethereum) spec, e.g. transaction forwarding to the
/// sequencer, receipts, additional RPC fields for transaction receipts.
///
/// This type implements the [`FullEthApi`](reth_rpc_eth_api::helpers::FullEthApi) by implemented
/// all the `Eth` helper traits and prerequisite traits.
pub struct OpEthApi<N: RpcNodeCore, Rpc: RpcConvert> {
/// Gateway to node's core components.
inner: Arc<OpEthApiInner<N, Rpc>>,
}
impl<N: RpcNodeCore, Rpc: RpcConvert> Clone for OpEthApi<N, Rpc> {
fn clone(&self) -> Self {
Self { inner: self.inner.clone() }
}
}
impl<N: RpcNodeCore, Rpc: RpcConvert> OpEthApi<N, Rpc> {
/// Creates a new `OpEthApi`.
pub fn new(
eth_api: EthApiNodeBackend<N, Rpc>,
sequencer_client: Option<SequencerClient>,
min_suggested_priority_fee: U256,
pending_block_rx: Option<PendingBlockRx<N::Primitives>>,
flashblock_rx: Option<FlashBlockCompleteSequenceRx>,
) -> Self {
let inner = Arc::new(OpEthApiInner {
eth_api,
sequencer_client,
min_suggested_priority_fee,
pending_block_rx,
flashblock_rx,
});
Self { inner }
}
/// Returns a reference to the [`EthApiNodeBackend`].
pub fn eth_api(&self) -> &EthApiNodeBackend<N, Rpc> {
self.inner.eth_api()
}
/// Returns the configured sequencer client, if any.
pub fn sequencer_client(&self) -> Option<&SequencerClient> {
self.inner.sequencer_client()
}
/// Returns a cloned pending block receiver, if any.
pub fn pending_block_rx(&self) -> Option<PendingBlockRx<N::Primitives>> {
self.inner.pending_block_rx.clone()
}
/// Returns a flashblock receiver, if any, by resubscribing to it.
pub fn flashblock_rx(&self) -> Option<FlashBlockCompleteSequenceRx> {
self.inner.flashblock_rx.as_ref().map(|rx| rx.resubscribe())
}
/// Build a [`OpEthApi`] using [`OpEthApiBuilder`].
pub const fn builder() -> OpEthApiBuilder<Rpc> {
OpEthApiBuilder::new()
}
/// Returns a [`PendingBlockAndReceipts`] that is built out of flashblocks.
///
/// If flashblocks receiver is not set, then it always returns `None`.
pub fn pending_flashblock(&self) -> eyre::Result<Option<PendingBlockAndReceipts<N::Primitives>>>
where
Self: LoadPendingBlock,
{
let pending = self.pending_block_env_and_cfg()?;
let parent = match pending.origin {
PendingBlockEnvOrigin::ActualPending(..) => return Ok(None),
PendingBlockEnvOrigin::DerivedFromLatest(parent) => parent,
};
let Some(rx) = self.inner.pending_block_rx.as_ref() else { return Ok(None) };
let pending_block = rx.borrow();
let Some(pending_block) = pending_block.as_ref() else { return Ok(None) };
let now = Instant::now();
// Is the pending block not expired and latest is its parent?
if pending.evm_env.block_env.number == U256::from(pending_block.block().number()) &&
parent.hash() == pending_block.block().parent_hash() &&
now <= pending_block.expires_at
{
return Ok(Some(pending_block.to_block_and_receipts()));
}
Ok(None)
}
}
impl<N, Rpc> EthApiTypes for OpEthApi<N, Rpc>
where
N: RpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives>,
{
type Error = OpEthApiError;
type NetworkTypes = Rpc::Network;
type RpcConvert = Rpc;
fn tx_resp_builder(&self) -> &Self::RpcConvert {
self.inner.eth_api.tx_resp_builder()
}
}
impl<N, Rpc> RpcNodeCore for OpEthApi<N, Rpc>
where
N: RpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives>,
{
type Primitives = N::Primitives;
type Provider = N::Provider;
type Pool = N::Pool;
type Evm = N::Evm;
type Network = N::Network;
#[inline]
fn pool(&self) -> &Self::Pool {
self.inner.eth_api.pool()
}
#[inline]
fn evm_config(&self) -> &Self::Evm {
self.inner.eth_api.evm_config()
}
#[inline]
fn network(&self) -> &Self::Network {
self.inner.eth_api.network()
}
#[inline]
fn provider(&self) -> &Self::Provider {
self.inner.eth_api.provider()
}
}
impl<N, Rpc> RpcNodeCoreExt for OpEthApi<N, Rpc>
where
N: RpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives>,
{
#[inline]
fn cache(&self) -> &EthStateCache<N::Primitives> {
self.inner.eth_api.cache()
}
}
impl<N, Rpc> EthApiSpec for OpEthApi<N, Rpc>
where
N: RpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives>,
{
type Transaction = ProviderTx<Self::Provider>;
type Rpc = Rpc::Network;
#[inline]
fn starting_block(&self) -> U256 {
self.inner.eth_api.starting_block()
}
#[inline]
fn signers(&self) -> &SignersForApi<Self> {
self.inner.eth_api.signers()
}
}
impl<N, Rpc> SpawnBlocking for OpEthApi<N, Rpc>
where
N: RpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives>,
{
#[inline]
fn io_task_spawner(&self) -> impl TaskSpawner {
self.inner.eth_api.task_spawner()
}
#[inline]
fn tracing_task_pool(&self) -> &BlockingTaskPool {
self.inner.eth_api.blocking_task_pool()
}
#[inline]
fn tracing_task_guard(&self) -> &BlockingTaskGuard {
self.inner.eth_api.blocking_task_guard()
}
}
impl<N, Rpc> LoadFee for OpEthApi<N, Rpc>
where
N: RpcNodeCore,
OpEthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<Primitives = N::Primitives, Error = OpEthApiError>,
{
#[inline]
fn gas_oracle(&self) -> &GasPriceOracle<Self::Provider> {
self.inner.eth_api.gas_oracle()
}
#[inline]
fn fee_history_cache(&self) -> &FeeHistoryCache<ProviderHeader<N::Provider>> {
self.inner.eth_api.fee_history_cache()
}
async fn suggested_priority_fee(&self) -> Result<U256, Self::Error> {
let min_tip = U256::from(self.inner.min_suggested_priority_fee);
self.inner.eth_api.gas_oracle().op_suggest_tip_cap(min_tip).await.map_err(Into::into)
}
}
impl<N, Rpc> LoadState for OpEthApi<N, Rpc>
where
N: RpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives>,
Self: LoadPendingBlock,
{
}
impl<N, Rpc> EthState for OpEthApi<N, Rpc>
where
N: RpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives>,
Self: LoadPendingBlock,
{
#[inline]
fn max_proof_window(&self) -> u64 {
self.inner.eth_api.eth_proof_window()
}
}
impl<N, Rpc> EthFees for OpEthApi<N, Rpc>
where
N: RpcNodeCore,
OpEthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<Primitives = N::Primitives, Error = OpEthApiError>,
{
}
impl<N, Rpc> Trace for OpEthApi<N, Rpc>
where
N: RpcNodeCore,
OpEthApiError: FromEvmError<N::Evm>,
Rpc: RpcConvert<Primitives = N::Primitives>,
{
}
impl<N, Rpc> AddDevSigners for OpEthApi<N, Rpc>
where
N: RpcNodeCore,
Rpc: RpcConvert<
Network: RpcTypes<TransactionRequest: SignableTxRequest<ProviderTx<N::Provider>>>,
>,
{
fn with_dev_accounts(&self) {
*self.inner.eth_api.signers().write() = DevSigner::random_signers(20)
}
}
impl<N: RpcNodeCore, Rpc: RpcConvert> fmt::Debug for OpEthApi<N, Rpc> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("OpEthApi").finish_non_exhaustive()
}
}
/// Container type `OpEthApi`
pub struct OpEthApiInner<N: RpcNodeCore, Rpc: RpcConvert> {
/// Gateway to node's core components.
eth_api: EthApiNodeBackend<N, Rpc>,
/// Sequencer client, configured to forward submitted transactions to sequencer of given OP
/// network.
sequencer_client: Option<SequencerClient>,
/// Minimum priority fee enforced by OP-specific logic.
///
/// See also <https://github.com/ethereum-optimism/op-geth/blob/d4e0fe9bb0c2075a9bff269fb975464dd8498f75/eth/gasprice/optimism-gasprice.go#L38-L38>
min_suggested_priority_fee: U256,
/// Pending block receiver.
///
/// If set, then it provides current pending block based on received Flashblocks.
pending_block_rx: Option<PendingBlockRx<N::Primitives>>,
/// Flashblocks receiver.
///
/// If set, then it provides sequences of flashblock built.
flashblock_rx: Option<FlashBlockCompleteSequenceRx>,
}
impl<N: RpcNodeCore, Rpc: RpcConvert> fmt::Debug for OpEthApiInner<N, Rpc> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.debug_struct("OpEthApiInner").finish()
}
}
impl<N: RpcNodeCore, Rpc: RpcConvert> OpEthApiInner<N, Rpc> {
/// Returns a reference to the [`EthApiNodeBackend`].
const fn eth_api(&self) -> &EthApiNodeBackend<N, Rpc> {
&self.eth_api
}
/// Returns the configured sequencer client, if any.
const fn sequencer_client(&self) -> Option<&SequencerClient> {
self.sequencer_client.as_ref()
}
}
/// Converter for OP RPC types.
pub type OpRpcConvert<N, NetworkT> = RpcConverter<
NetworkT,
<N as FullNodeComponents>::Evm,
OpReceiptConverter<<N as FullNodeTypes>::Provider>,
(),
OpTxInfoMapper<<N as FullNodeTypes>::Provider>,
>;
/// Builds [`OpEthApi`] for Optimism.
#[derive(Debug)]
pub struct OpEthApiBuilder<NetworkT = Optimism> {
/// Sequencer client, configured to forward submitted transactions to sequencer of given OP
/// network.
sequencer_url: Option<String>,
/// Headers to use for the sequencer client requests.
sequencer_headers: Vec<String>,
/// Minimum suggested priority fee (tip)
min_suggested_priority_fee: u64,
/// A URL pointing to a secure websocket connection (wss) that streams out [flashblocks].
///
/// [flashblocks]: reth_optimism_flashblocks
flashblocks_url: Option<Url>,
/// Marker for network types.
_nt: PhantomData<NetworkT>,
}
impl<NetworkT> Default for OpEthApiBuilder<NetworkT> {
fn default() -> Self {
Self {
sequencer_url: None,
sequencer_headers: Vec::new(),
min_suggested_priority_fee: 1_000_000,
flashblocks_url: None,
_nt: PhantomData,
}
}
}
impl<NetworkT> OpEthApiBuilder<NetworkT> {
/// Creates a [`OpEthApiBuilder`] instance from core components.
pub const fn new() -> Self {
Self {
sequencer_url: None,
sequencer_headers: Vec::new(),
min_suggested_priority_fee: 1_000_000,
flashblocks_url: None,
_nt: PhantomData,
}
}
/// With a [`SequencerClient`].
pub fn with_sequencer(mut self, sequencer_url: Option<String>) -> Self {
self.sequencer_url = sequencer_url;
self
}
/// With headers to use for the sequencer client requests.
pub fn with_sequencer_headers(mut self, sequencer_headers: Vec<String>) -> Self {
self.sequencer_headers = sequencer_headers;
self
}
/// With minimum suggested priority fee (tip).
pub const fn with_min_suggested_priority_fee(mut self, min: u64) -> Self {
self.min_suggested_priority_fee = min;
self
}
/// With a subscription to flashblocks secure websocket connection.
pub fn with_flashblocks(mut self, flashblocks_url: Option<Url>) -> Self {
self.flashblocks_url = flashblocks_url;
self
}
}
impl<N, NetworkT> EthApiBuilder<N> for OpEthApiBuilder<NetworkT>
where
N: FullNodeComponents<
Evm: ConfigureEvm<
NextBlockEnvCtx: BuildPendingEnv<HeaderTy<N::Types>>
+ From<ExecutionPayloadBaseV1>
+ Unpin,
>,
>,
NetworkT: RpcTypes,
OpRpcConvert<N, NetworkT>: RpcConvert<Network = NetworkT>,
OpEthApi<N, OpRpcConvert<N, NetworkT>>:
FullEthApiServer<Provider = N::Provider, Pool = N::Pool> + AddDevSigners,
{
type EthApi = OpEthApi<N, OpRpcConvert<N, NetworkT>>;
async fn build_eth_api(self, ctx: EthApiCtx<'_, N>) -> eyre::Result<Self::EthApi> {
let Self {
sequencer_url,
sequencer_headers,
min_suggested_priority_fee,
flashblocks_url,
..
} = self;
let rpc_converter =
RpcConverter::new(OpReceiptConverter::new(ctx.components.provider().clone()))
.with_mapper(OpTxInfoMapper::new(ctx.components.provider().clone()));
let sequencer_client = if let Some(url) = sequencer_url {
Some(
SequencerClient::new_with_headers(&url, sequencer_headers)
.await
.wrap_err_with(|| format!("Failed to init sequencer client with: {url}"))?,
)
} else {
None
};
let rxs = if let Some(ws_url) = flashblocks_url {
info!(target: "reth:cli", %ws_url, "Launching flashblocks service");
let (tx, pending_block_rx) = watch::channel(None);
let stream = WsFlashBlockStream::new(ws_url);
let service = FlashBlockService::new(
stream,
ctx.components.evm_config().clone(),
ctx.components.provider().clone(),
ctx.components.task_executor().clone(),
);
let flashblock_rx = service.subscribe_block_sequence();
ctx.components.task_executor().spawn(Box::pin(service.run(tx)));
Some((pending_block_rx, flashblock_rx))
} else {
None
};
let (pending_block_rx, flashblock_rx) = rxs.unzip();
let eth_api = ctx.eth_api_builder().with_rpc_converter(rpc_converter).build_inner();
Ok(OpEthApi::new(
eth_api,
sequencer_client,
U256::from(min_suggested_priority_fee),
pending_block_rx,
flashblock_rx,
))
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/rpc/src/eth/transaction.rs | crates/optimism/rpc/src/eth/transaction.rs | //! Loads and formats OP transaction RPC response.
use crate::{OpEthApi, OpEthApiError, SequencerClient};
use alloy_primitives::{Bytes, B256};
use alloy_rpc_types_eth::TransactionInfo;
use op_alloy_consensus::{transaction::OpTransactionInfo, OpTransaction};
use reth_optimism_primitives::DepositReceipt;
use reth_primitives_traits::SignedTransaction;
use reth_rpc_eth_api::{
helpers::{spec::SignersForRpc, EthTransactions, LoadTransaction},
try_into_op_tx_info, FromEthApiError, RpcConvert, RpcNodeCore, TxInfoMapper,
};
use reth_rpc_eth_types::utils::recover_raw_transaction;
use reth_storage_api::{errors::ProviderError, ReceiptProvider};
use reth_transaction_pool::{
AddedTransactionOutcome, PoolTransaction, TransactionOrigin, TransactionPool,
};
use std::fmt::{Debug, Formatter};
impl<N, Rpc> EthTransactions for OpEthApi<N, Rpc>
where
N: RpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives, Error = OpEthApiError>,
{
fn signers(&self) -> &SignersForRpc<Self::Provider, Self::NetworkTypes> {
self.inner.eth_api.signers()
}
/// Decodes and recovers the transaction and submits it to the pool.
///
/// Returns the hash of the transaction.
async fn send_raw_transaction(&self, tx: Bytes) -> Result<B256, Self::Error> {
let recovered = recover_raw_transaction(&tx)?;
// broadcast raw transaction to subscribers if there is any.
self.eth_api().broadcast_raw_transaction(tx.clone());
let pool_transaction = <Self::Pool as TransactionPool>::Transaction::from_pooled(recovered);
// On optimism, transactions are forwarded directly to the sequencer to be included in
// blocks that it builds.
if let Some(client) = self.raw_tx_forwarder().as_ref() {
tracing::debug!(target: "rpc::eth", hash = %pool_transaction.hash(), "forwarding raw transaction to sequencer");
let hash = client.forward_raw_transaction(&tx).await.inspect_err(|err| {
tracing::debug!(target: "rpc::eth", %err, hash=% *pool_transaction.hash(), "failed to forward raw transaction");
})?;
// Retain tx in local tx pool after forwarding, for local RPC usage.
let _ = self.inner.eth_api.add_pool_transaction(pool_transaction).await.inspect_err(|err| {
tracing::warn!(target: "rpc::eth", %err, %hash, "successfully sent tx to sequencer, but failed to persist in local tx pool");
});
return Ok(hash)
}
// submit the transaction to the pool with a `Local` origin
let AddedTransactionOutcome { hash, .. } = self
.pool()
.add_transaction(TransactionOrigin::Local, pool_transaction)
.await
.map_err(Self::Error::from_eth_err)?;
Ok(hash)
}
}
impl<N, Rpc> LoadTransaction for OpEthApi<N, Rpc>
where
N: RpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives, Error = OpEthApiError>,
{
}
impl<N, Rpc> OpEthApi<N, Rpc>
where
N: RpcNodeCore,
Rpc: RpcConvert<Primitives = N::Primitives>,
{
/// Returns the [`SequencerClient`] if one is set.
pub fn raw_tx_forwarder(&self) -> Option<SequencerClient> {
self.inner.sequencer_client.clone()
}
}
/// Optimism implementation of [`TxInfoMapper`].
///
/// For deposits, receipt is fetched to extract `deposit_nonce` and `deposit_receipt_version`.
/// Otherwise, it works like regular Ethereum implementation, i.e. uses [`TransactionInfo`].
pub struct OpTxInfoMapper<Provider> {
provider: Provider,
}
impl<Provider: Clone> Clone for OpTxInfoMapper<Provider> {
fn clone(&self) -> Self {
Self { provider: self.provider.clone() }
}
}
impl<Provider> Debug for OpTxInfoMapper<Provider> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("OpTxInfoMapper").finish()
}
}
impl<Provider> OpTxInfoMapper<Provider> {
/// Creates [`OpTxInfoMapper`] that uses [`ReceiptProvider`] borrowed from given `eth_api`.
pub const fn new(provider: Provider) -> Self {
Self { provider }
}
}
impl<T, Provider> TxInfoMapper<T> for OpTxInfoMapper<Provider>
where
T: OpTransaction + SignedTransaction,
Provider: ReceiptProvider<Receipt: DepositReceipt>,
{
type Out = OpTransactionInfo;
type Err = ProviderError;
fn try_map(&self, tx: &T, tx_info: TransactionInfo) -> Result<Self::Out, ProviderError> {
try_into_op_tx_info(&self.provider, tx, tx_info)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/primitives/src/predeploys.rs | crates/optimism/primitives/src/predeploys.rs | //! Addresses of OP pre-deploys.
// todo: move to op-alloy
use alloy_primitives::{address, Address};
/// The L2 contract `L2ToL1MessagePasser`, stores commitments to withdrawal transactions.
pub const ADDRESS_L2_TO_L1_MESSAGE_PASSER: Address =
address!("0x4200000000000000000000000000000000000016");
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/primitives/src/lib.rs | crates/optimism/primitives/src/lib.rs | //! Standalone crate for Optimism-specific Reth primitive types.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(not(feature = "std"), no_std)]
#![allow(unused)]
extern crate alloc;
pub mod bedrock;
pub mod predeploys;
pub use predeploys::ADDRESS_L2_TO_L1_MESSAGE_PASSER;
pub mod transaction;
pub use transaction::*;
mod receipt;
pub use receipt::{DepositReceipt, OpReceipt};
/// Optimism-specific block type.
pub type OpBlock = alloy_consensus::Block<OpTransactionSigned>;
/// Optimism-specific block body type.
pub type OpBlockBody = <OpBlock as reth_primitives_traits::Block>::Body;
/// Primitive types for Optimism Node.
#[derive(Debug, Default, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct OpPrimitives;
impl reth_primitives_traits::NodePrimitives for OpPrimitives {
type Block = OpBlock;
type BlockHeader = alloy_consensus::Header;
type BlockBody = OpBlockBody;
type SignedTx = OpTransactionSigned;
type Receipt = OpReceipt;
}
/// Bincode-compatible serde implementations.
#[cfg(feature = "serde-bincode-compat")]
pub mod serde_bincode_compat {
pub use super::receipt::serde_bincode_compat::*;
pub use op_alloy_consensus::serde_bincode_compat::*;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/primitives/src/bedrock.rs | crates/optimism/primitives/src/bedrock.rs | //! OP mainnet bedrock related data.
use alloy_consensus::{Header, EMPTY_OMMER_ROOT_HASH, EMPTY_ROOT_HASH};
use alloy_primitives::{address, b256, bloom, bytes, B256, B64, U256};
/// Transaction 0x9ed8f713b2cc6439657db52dcd2fdb9cc944915428f3c6e2a7703e242b259cb9 in block 985,
/// replayed in blocks:
///
/// 19 022
/// 45 036
pub const TX_BLOCK_985: [u64; 2] = [19_022, 45_036];
/// Transaction 0xc033250c5a45f9d104fc28640071a776d146d48403cf5e95ed0015c712e26cb6 in block
/// 123 322, replayed in block:
///
/// 123 542
pub const TX_BLOCK_123_322: u64 = 123_542;
/// Transaction 0x86f8c77cfa2b439e9b4e92a10f6c17b99fce1220edf4001e4158b57f41c576e5 in block
/// 1 133 328, replayed in blocks:
///
/// 1 135 391
/// 1 144 468
pub const TX_BLOCK_1_133_328: [u64; 2] = [1_135_391, 1_144_468];
/// Transaction 0x3cc27e7cc8b7a9380b2b2f6c224ea5ef06ade62a6af564a9dd0bcca92131cd4e in block
/// 1 244 152, replayed in block:
///
/// 1 272 994
pub const TX_BLOCK_1_244_152: u64 = 1_272_994;
/// The six blocks with replayed transactions.
pub const BLOCK_NUMS_REPLAYED_TX: [u64; 6] = [
TX_BLOCK_985[0],
TX_BLOCK_985[1],
TX_BLOCK_123_322,
TX_BLOCK_1_133_328[0],
TX_BLOCK_1_133_328[1],
TX_BLOCK_1_244_152,
];
/// Returns `true` if transaction is the second or third appearance of the transaction. The blocks
/// with replayed transaction happen to only contain the single transaction.
pub fn is_dup_tx(block_number: u64) -> bool {
if block_number > BLOCK_NUMS_REPLAYED_TX[5] {
return false
}
// these blocks just have one transaction!
if BLOCK_NUMS_REPLAYED_TX.contains(&block_number) {
return true
}
false
}
/// OVM Header #1 hash.
///
/// <https://optimistic.etherscan.io/block/0xbee7192e575af30420cae0c7776304ac196077ee72b048970549e4f08e875453>
pub const OVM_HEADER_1_HASH: B256 =
b256!("0xbee7192e575af30420cae0c7776304ac196077ee72b048970549e4f08e875453");
/// Bedrock hash on Optimism Mainnet.
///
/// <https://optimistic.etherscan.io/block/0xdbf6a80fef073de06add9b0d14026d6e5a86c85f6d102c36d3d8e9cf89c2afd3>
pub const BEDROCK_HEADER_HASH: B256 =
b256!("0xdbf6a80fef073de06add9b0d14026d6e5a86c85f6d102c36d3d8e9cf89c2afd3");
/// Bedrock on Optimism Mainnet. (`105_235_063`)
pub const BEDROCK_HEADER: Header = Header {
difficulty: U256::ZERO,
extra_data: bytes!("424544524f434b"),
gas_limit: 30000000,
gas_used: 0,
logs_bloom: bloom!(
"00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"
),
nonce: B64::ZERO,
number: 105235063,
parent_hash: b256!("0x21a168dfa5e727926063a28ba16fd5ee84c814e847c81a699c7a0ea551e4ca50"),
receipts_root: EMPTY_ROOT_HASH,
state_root: b256!("0x920314c198da844a041d63bf6cbe8b59583165fd2229d1b3f599da812fd424cb"),
timestamp: 1686068903,
transactions_root: EMPTY_ROOT_HASH,
ommers_hash: EMPTY_OMMER_ROOT_HASH,
beneficiary: address!("0x4200000000000000000000000000000000000011"),
withdrawals_root: None,
mix_hash: B256::ZERO,
base_fee_per_gas: Some(0x3b9aca00),
blob_gas_used: None,
excess_blob_gas: None,
parent_beacon_block_root: None,
requests_hash: None,
};
/// Bedrock total difficulty on Optimism Mainnet.
pub const BEDROCK_HEADER_TTD: U256 = U256::ZERO;
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_bedrock_header() {
assert_eq!(BEDROCK_HEADER.hash_slow(), BEDROCK_HEADER_HASH);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/primitives/src/receipt.rs | crates/optimism/primitives/src/receipt.rs | use alloc::vec::Vec;
use alloy_consensus::{
Eip2718EncodableReceipt, Eip658Value, Receipt, ReceiptWithBloom, RlpDecodableReceipt,
RlpEncodableReceipt, TxReceipt, Typed2718,
};
use alloy_eips::{
eip2718::{Eip2718Result, IsTyped2718},
Decodable2718, Encodable2718,
};
use alloy_primitives::{Bloom, Log};
use alloy_rlp::{BufMut, Decodable, Encodable, Header};
use op_alloy_consensus::{OpDepositReceipt, OpTxType};
use reth_primitives_traits::InMemorySize;
/// Typed ethereum transaction receipt.
/// Receipt containing result of transaction execution.
#[derive(Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
#[cfg_attr(feature = "reth-codec", reth_codecs::add_arbitrary_tests(rlp))]
pub enum OpReceipt {
/// Legacy receipt
Legacy(Receipt),
/// EIP-2930 receipt
Eip2930(Receipt),
/// EIP-1559 receipt
Eip1559(Receipt),
/// EIP-7702 receipt
Eip7702(Receipt),
/// Deposit receipt
Deposit(OpDepositReceipt),
}
impl OpReceipt {
/// Returns [`OpTxType`] of the receipt.
pub const fn tx_type(&self) -> OpTxType {
match self {
Self::Legacy(_) => OpTxType::Legacy,
Self::Eip2930(_) => OpTxType::Eip2930,
Self::Eip1559(_) => OpTxType::Eip1559,
Self::Eip7702(_) => OpTxType::Eip7702,
Self::Deposit(_) => OpTxType::Deposit,
}
}
/// Returns inner [`Receipt`],
pub const fn as_receipt(&self) -> &Receipt {
match self {
Self::Legacy(receipt) |
Self::Eip2930(receipt) |
Self::Eip1559(receipt) |
Self::Eip7702(receipt) => receipt,
Self::Deposit(receipt) => &receipt.inner,
}
}
/// Returns a mutable reference to the inner [`Receipt`],
pub const fn as_receipt_mut(&mut self) -> &mut Receipt {
match self {
Self::Legacy(receipt) |
Self::Eip2930(receipt) |
Self::Eip1559(receipt) |
Self::Eip7702(receipt) => receipt,
Self::Deposit(receipt) => &mut receipt.inner,
}
}
/// Consumes this and returns the inner [`Receipt`].
pub fn into_receipt(self) -> Receipt {
match self {
Self::Legacy(receipt) |
Self::Eip2930(receipt) |
Self::Eip1559(receipt) |
Self::Eip7702(receipt) => receipt,
Self::Deposit(receipt) => receipt.inner,
}
}
/// Returns length of RLP-encoded receipt fields with the given [`Bloom`] without an RLP header.
pub fn rlp_encoded_fields_length(&self, bloom: &Bloom) -> usize {
match self {
Self::Legacy(receipt) |
Self::Eip2930(receipt) |
Self::Eip1559(receipt) |
Self::Eip7702(receipt) => receipt.rlp_encoded_fields_length_with_bloom(bloom),
Self::Deposit(receipt) => receipt.rlp_encoded_fields_length_with_bloom(bloom),
}
}
/// RLP-encodes receipt fields with the given [`Bloom`] without an RLP header.
pub fn rlp_encode_fields(&self, bloom: &Bloom, out: &mut dyn BufMut) {
match self {
Self::Legacy(receipt) |
Self::Eip2930(receipt) |
Self::Eip1559(receipt) |
Self::Eip7702(receipt) => receipt.rlp_encode_fields_with_bloom(bloom, out),
Self::Deposit(receipt) => receipt.rlp_encode_fields_with_bloom(bloom, out),
}
}
/// Returns RLP header for inner encoding.
pub fn rlp_header_inner(&self, bloom: &Bloom) -> Header {
Header { list: true, payload_length: self.rlp_encoded_fields_length(bloom) }
}
/// Returns RLP header for inner encoding without bloom.
pub fn rlp_header_inner_without_bloom(&self) -> Header {
Header { list: true, payload_length: self.rlp_encoded_fields_length_without_bloom() }
}
/// RLP-decodes the receipt from the provided buffer. This does not expect a type byte or
/// network header.
pub fn rlp_decode_inner(
buf: &mut &[u8],
tx_type: OpTxType,
) -> alloy_rlp::Result<ReceiptWithBloom<Self>> {
match tx_type {
OpTxType::Legacy => {
let ReceiptWithBloom { receipt, logs_bloom } =
RlpDecodableReceipt::rlp_decode_with_bloom(buf)?;
Ok(ReceiptWithBloom { receipt: Self::Legacy(receipt), logs_bloom })
}
OpTxType::Eip2930 => {
let ReceiptWithBloom { receipt, logs_bloom } =
RlpDecodableReceipt::rlp_decode_with_bloom(buf)?;
Ok(ReceiptWithBloom { receipt: Self::Eip2930(receipt), logs_bloom })
}
OpTxType::Eip1559 => {
let ReceiptWithBloom { receipt, logs_bloom } =
RlpDecodableReceipt::rlp_decode_with_bloom(buf)?;
Ok(ReceiptWithBloom { receipt: Self::Eip1559(receipt), logs_bloom })
}
OpTxType::Eip7702 => {
let ReceiptWithBloom { receipt, logs_bloom } =
RlpDecodableReceipt::rlp_decode_with_bloom(buf)?;
Ok(ReceiptWithBloom { receipt: Self::Eip7702(receipt), logs_bloom })
}
OpTxType::Deposit => {
let ReceiptWithBloom { receipt, logs_bloom } =
RlpDecodableReceipt::rlp_decode_with_bloom(buf)?;
Ok(ReceiptWithBloom { receipt: Self::Deposit(receipt), logs_bloom })
}
}
}
/// RLP-encodes receipt fields without an RLP header.
pub fn rlp_encode_fields_without_bloom(&self, out: &mut dyn BufMut) {
match self {
Self::Legacy(receipt) |
Self::Eip2930(receipt) |
Self::Eip1559(receipt) |
Self::Eip7702(receipt) => {
receipt.status.encode(out);
receipt.cumulative_gas_used.encode(out);
receipt.logs.encode(out);
}
Self::Deposit(receipt) => {
receipt.inner.status.encode(out);
receipt.inner.cumulative_gas_used.encode(out);
receipt.inner.logs.encode(out);
if let Some(nonce) = receipt.deposit_nonce {
nonce.encode(out);
}
if let Some(version) = receipt.deposit_receipt_version {
version.encode(out);
}
}
}
}
/// Returns length of RLP-encoded receipt fields without an RLP header.
pub fn rlp_encoded_fields_length_without_bloom(&self) -> usize {
match self {
Self::Legacy(receipt) |
Self::Eip2930(receipt) |
Self::Eip1559(receipt) |
Self::Eip7702(receipt) => {
receipt.status.length() +
receipt.cumulative_gas_used.length() +
receipt.logs.length()
}
Self::Deposit(receipt) => {
receipt.inner.status.length() +
receipt.inner.cumulative_gas_used.length() +
receipt.inner.logs.length() +
receipt.deposit_nonce.map_or(0, |nonce| nonce.length()) +
receipt.deposit_receipt_version.map_or(0, |version| version.length())
}
}
}
/// RLP-decodes the receipt from the provided buffer without bloom.
pub fn rlp_decode_inner_without_bloom(
buf: &mut &[u8],
tx_type: OpTxType,
) -> alloy_rlp::Result<Self> {
let header = Header::decode(buf)?;
if !header.list {
return Err(alloy_rlp::Error::UnexpectedString);
}
let remaining = buf.len();
let status = Decodable::decode(buf)?;
let cumulative_gas_used = Decodable::decode(buf)?;
let logs = Decodable::decode(buf)?;
let mut deposit_nonce = None;
let mut deposit_receipt_version = None;
// For deposit receipts, try to decode nonce and version if they exist
if tx_type == OpTxType::Deposit && buf.len() + header.payload_length > remaining {
deposit_nonce = Some(Decodable::decode(buf)?);
if buf.len() + header.payload_length > remaining {
deposit_receipt_version = Some(Decodable::decode(buf)?);
}
}
if buf.len() + header.payload_length != remaining {
return Err(alloy_rlp::Error::UnexpectedLength);
}
match tx_type {
OpTxType::Legacy => Ok(Self::Legacy(Receipt { status, cumulative_gas_used, logs })),
OpTxType::Eip2930 => Ok(Self::Eip2930(Receipt { status, cumulative_gas_used, logs })),
OpTxType::Eip1559 => Ok(Self::Eip1559(Receipt { status, cumulative_gas_used, logs })),
OpTxType::Eip7702 => Ok(Self::Eip7702(Receipt { status, cumulative_gas_used, logs })),
OpTxType::Deposit => Ok(Self::Deposit(OpDepositReceipt {
inner: Receipt { status, cumulative_gas_used, logs },
deposit_nonce,
deposit_receipt_version,
})),
}
}
}
impl Eip2718EncodableReceipt for OpReceipt {
fn eip2718_encoded_length_with_bloom(&self, bloom: &Bloom) -> usize {
!self.tx_type().is_legacy() as usize + self.rlp_header_inner(bloom).length_with_payload()
}
fn eip2718_encode_with_bloom(&self, bloom: &Bloom, out: &mut dyn BufMut) {
if !self.tx_type().is_legacy() {
out.put_u8(self.tx_type() as u8);
}
self.rlp_header_inner(bloom).encode(out);
self.rlp_encode_fields(bloom, out);
}
}
impl RlpEncodableReceipt for OpReceipt {
fn rlp_encoded_length_with_bloom(&self, bloom: &Bloom) -> usize {
let mut len = self.eip2718_encoded_length_with_bloom(bloom);
if !self.tx_type().is_legacy() {
len += Header {
list: false,
payload_length: self.eip2718_encoded_length_with_bloom(bloom),
}
.length();
}
len
}
fn rlp_encode_with_bloom(&self, bloom: &Bloom, out: &mut dyn BufMut) {
if !self.tx_type().is_legacy() {
Header { list: false, payload_length: self.eip2718_encoded_length_with_bloom(bloom) }
.encode(out);
}
self.eip2718_encode_with_bloom(bloom, out);
}
}
impl RlpDecodableReceipt for OpReceipt {
fn rlp_decode_with_bloom(buf: &mut &[u8]) -> alloy_rlp::Result<ReceiptWithBloom<Self>> {
let header_buf = &mut &**buf;
let header = Header::decode(header_buf)?;
// Legacy receipt, reuse initial buffer without advancing
if header.list {
return Self::rlp_decode_inner(buf, OpTxType::Legacy)
}
// Otherwise, advance the buffer and try decoding type flag followed by receipt
*buf = *header_buf;
let remaining = buf.len();
let tx_type = OpTxType::decode(buf)?;
let this = Self::rlp_decode_inner(buf, tx_type)?;
if buf.len() + header.payload_length != remaining {
return Err(alloy_rlp::Error::UnexpectedLength);
}
Ok(this)
}
}
impl Encodable2718 for OpReceipt {
fn encode_2718_len(&self) -> usize {
!self.tx_type().is_legacy() as usize +
self.rlp_header_inner_without_bloom().length_with_payload()
}
fn encode_2718(&self, out: &mut dyn BufMut) {
if !self.tx_type().is_legacy() {
out.put_u8(self.tx_type() as u8);
}
self.rlp_header_inner_without_bloom().encode(out);
self.rlp_encode_fields_without_bloom(out);
}
}
impl Decodable2718 for OpReceipt {
fn typed_decode(ty: u8, buf: &mut &[u8]) -> Eip2718Result<Self> {
Ok(Self::rlp_decode_inner_without_bloom(buf, OpTxType::try_from(ty)?)?)
}
fn fallback_decode(buf: &mut &[u8]) -> Eip2718Result<Self> {
Ok(Self::rlp_decode_inner_without_bloom(buf, OpTxType::Legacy)?)
}
}
impl Encodable for OpReceipt {
fn encode(&self, out: &mut dyn BufMut) {
self.network_encode(out);
}
fn length(&self) -> usize {
self.network_len()
}
}
impl Decodable for OpReceipt {
fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
Ok(Self::network_decode(buf)?)
}
}
impl TxReceipt for OpReceipt {
type Log = Log;
fn status_or_post_state(&self) -> Eip658Value {
self.as_receipt().status_or_post_state()
}
fn status(&self) -> bool {
self.as_receipt().status()
}
fn bloom(&self) -> Bloom {
self.as_receipt().bloom()
}
fn cumulative_gas_used(&self) -> u64 {
self.as_receipt().cumulative_gas_used()
}
fn logs(&self) -> &[Log] {
self.as_receipt().logs()
}
fn into_logs(self) -> Vec<Self::Log> {
match self {
Self::Legacy(receipt) |
Self::Eip2930(receipt) |
Self::Eip1559(receipt) |
Self::Eip7702(receipt) => receipt.logs,
Self::Deposit(receipt) => receipt.inner.logs,
}
}
}
impl Typed2718 for OpReceipt {
fn ty(&self) -> u8 {
self.tx_type().into()
}
}
impl IsTyped2718 for OpReceipt {
fn is_type(type_id: u8) -> bool {
<OpTxType as IsTyped2718>::is_type(type_id)
}
}
impl InMemorySize for OpReceipt {
fn size(&self) -> usize {
self.as_receipt().size()
}
}
impl From<op_alloy_consensus::OpReceiptEnvelope> for OpReceipt {
fn from(envelope: op_alloy_consensus::OpReceiptEnvelope) -> Self {
match envelope {
op_alloy_consensus::OpReceiptEnvelope::Legacy(receipt) => Self::Legacy(receipt.receipt),
op_alloy_consensus::OpReceiptEnvelope::Eip2930(receipt) => {
Self::Eip2930(receipt.receipt)
}
op_alloy_consensus::OpReceiptEnvelope::Eip1559(receipt) => {
Self::Eip1559(receipt.receipt)
}
op_alloy_consensus::OpReceiptEnvelope::Eip7702(receipt) => {
Self::Eip7702(receipt.receipt)
}
op_alloy_consensus::OpReceiptEnvelope::Deposit(receipt) => {
Self::Deposit(OpDepositReceipt {
deposit_nonce: receipt.receipt.deposit_nonce,
deposit_receipt_version: receipt.receipt.deposit_receipt_version,
inner: receipt.receipt.inner,
})
}
}
}
}
/// Trait for deposit receipt.
pub trait DepositReceipt: reth_primitives_traits::Receipt {
/// Converts a `Receipt` into a mutable Optimism deposit receipt.
fn as_deposit_receipt_mut(&mut self) -> Option<&mut OpDepositReceipt>;
/// Extracts an Optimism deposit receipt from `Receipt`.
fn as_deposit_receipt(&self) -> Option<&OpDepositReceipt>;
}
impl DepositReceipt for OpReceipt {
fn as_deposit_receipt_mut(&mut self) -> Option<&mut OpDepositReceipt> {
match self {
Self::Deposit(receipt) => Some(receipt),
_ => None,
}
}
fn as_deposit_receipt(&self) -> Option<&OpDepositReceipt> {
match self {
Self::Deposit(receipt) => Some(receipt),
_ => None,
}
}
}
#[cfg(feature = "reth-codec")]
mod compact {
use super::*;
use alloc::borrow::Cow;
use reth_codecs::Compact;
#[derive(reth_codecs::CompactZstd)]
#[reth_zstd(
compressor = reth_zstd_compressors::RECEIPT_COMPRESSOR,
decompressor = reth_zstd_compressors::RECEIPT_DECOMPRESSOR
)]
struct CompactOpReceipt<'a> {
tx_type: OpTxType,
success: bool,
cumulative_gas_used: u64,
#[expect(clippy::owned_cow)]
logs: Cow<'a, Vec<Log>>,
deposit_nonce: Option<u64>,
deposit_receipt_version: Option<u64>,
}
impl<'a> From<&'a OpReceipt> for CompactOpReceipt<'a> {
fn from(receipt: &'a OpReceipt) -> Self {
Self {
tx_type: receipt.tx_type(),
success: receipt.status(),
cumulative_gas_used: receipt.cumulative_gas_used(),
logs: Cow::Borrowed(&receipt.as_receipt().logs),
deposit_nonce: if let OpReceipt::Deposit(receipt) = receipt {
receipt.deposit_nonce
} else {
None
},
deposit_receipt_version: if let OpReceipt::Deposit(receipt) = receipt {
receipt.deposit_receipt_version
} else {
None
},
}
}
}
impl From<CompactOpReceipt<'_>> for OpReceipt {
fn from(receipt: CompactOpReceipt<'_>) -> Self {
let CompactOpReceipt {
tx_type,
success,
cumulative_gas_used,
logs,
deposit_nonce,
deposit_receipt_version,
} = receipt;
let inner =
Receipt { status: success.into(), cumulative_gas_used, logs: logs.into_owned() };
match tx_type {
OpTxType::Legacy => Self::Legacy(inner),
OpTxType::Eip2930 => Self::Eip2930(inner),
OpTxType::Eip1559 => Self::Eip1559(inner),
OpTxType::Eip7702 => Self::Eip7702(inner),
OpTxType::Deposit => Self::Deposit(OpDepositReceipt {
inner,
deposit_nonce,
deposit_receipt_version,
}),
}
}
}
impl Compact for OpReceipt {
fn to_compact<B>(&self, buf: &mut B) -> usize
where
B: bytes::BufMut + AsMut<[u8]>,
{
CompactOpReceipt::from(self).to_compact(buf)
}
fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) {
let (receipt, buf) = CompactOpReceipt::from_compact(buf, len);
(receipt.into(), buf)
}
}
#[cfg(test)]
#[test]
fn test_ensure_backwards_compatibility() {
use reth_codecs::{test_utils::UnusedBits, validate_bitflag_backwards_compat};
assert_eq!(CompactOpReceipt::bitflag_encoded_bytes(), 2);
validate_bitflag_backwards_compat!(CompactOpReceipt<'_>, UnusedBits::NotZero);
}
}
#[cfg(all(feature = "serde", feature = "serde-bincode-compat"))]
pub(super) mod serde_bincode_compat {
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use serde_with::{DeserializeAs, SerializeAs};
/// Bincode-compatible [`super::OpReceipt`] serde implementation.
///
/// Intended to use with the [`serde_with::serde_as`] macro in the following way:
/// ```rust
/// use reth_optimism_primitives::{serde_bincode_compat, OpReceipt};
/// use serde::{de::DeserializeOwned, Deserialize, Serialize};
/// use serde_with::serde_as;
///
/// #[serde_as]
/// #[derive(Serialize, Deserialize)]
/// struct Data {
/// #[serde_as(as = "serde_bincode_compat::OpReceipt<'_>")]
/// receipt: OpReceipt,
/// }
/// ```
#[derive(Debug, Serialize, Deserialize)]
pub enum OpReceipt<'a> {
/// Legacy receipt
Legacy(alloy_consensus::serde_bincode_compat::Receipt<'a, alloy_primitives::Log>),
/// EIP-2930 receipt
Eip2930(alloy_consensus::serde_bincode_compat::Receipt<'a, alloy_primitives::Log>),
/// EIP-1559 receipt
Eip1559(alloy_consensus::serde_bincode_compat::Receipt<'a, alloy_primitives::Log>),
/// EIP-7702 receipt
Eip7702(alloy_consensus::serde_bincode_compat::Receipt<'a, alloy_primitives::Log>),
/// Deposit receipt
Deposit(
op_alloy_consensus::serde_bincode_compat::OpDepositReceipt<'a, alloy_primitives::Log>,
),
}
impl<'a> From<&'a super::OpReceipt> for OpReceipt<'a> {
fn from(value: &'a super::OpReceipt) -> Self {
match value {
super::OpReceipt::Legacy(receipt) => Self::Legacy(receipt.into()),
super::OpReceipt::Eip2930(receipt) => Self::Eip2930(receipt.into()),
super::OpReceipt::Eip1559(receipt) => Self::Eip1559(receipt.into()),
super::OpReceipt::Eip7702(receipt) => Self::Eip7702(receipt.into()),
super::OpReceipt::Deposit(receipt) => Self::Deposit(receipt.into()),
}
}
}
impl<'a> From<OpReceipt<'a>> for super::OpReceipt {
fn from(value: OpReceipt<'a>) -> Self {
match value {
OpReceipt::Legacy(receipt) => Self::Legacy(receipt.into()),
OpReceipt::Eip2930(receipt) => Self::Eip2930(receipt.into()),
OpReceipt::Eip1559(receipt) => Self::Eip1559(receipt.into()),
OpReceipt::Eip7702(receipt) => Self::Eip7702(receipt.into()),
OpReceipt::Deposit(receipt) => Self::Deposit(receipt.into()),
}
}
}
impl SerializeAs<super::OpReceipt> for OpReceipt<'_> {
fn serialize_as<S>(source: &super::OpReceipt, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
OpReceipt::<'_>::from(source).serialize(serializer)
}
}
impl<'de> DeserializeAs<'de, super::OpReceipt> for OpReceipt<'de> {
fn deserialize_as<D>(deserializer: D) -> Result<super::OpReceipt, D::Error>
where
D: Deserializer<'de>,
{
OpReceipt::<'_>::deserialize(deserializer).map(Into::into)
}
}
impl reth_primitives_traits::serde_bincode_compat::SerdeBincodeCompat for super::OpReceipt {
type BincodeRepr<'a> = OpReceipt<'a>;
fn as_repr(&self) -> Self::BincodeRepr<'_> {
self.into()
}
fn from_repr(repr: Self::BincodeRepr<'_>) -> Self {
repr.into()
}
}
#[cfg(test)]
mod tests {
use crate::{receipt::serde_bincode_compat, OpReceipt};
use arbitrary::Arbitrary;
use rand::Rng;
use serde::{Deserialize, Serialize};
use serde_with::serde_as;
#[test]
fn test_tx_bincode_roundtrip() {
#[serde_as]
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)]
struct Data {
#[serde_as(as = "serde_bincode_compat::OpReceipt<'_>")]
receipt: OpReceipt,
}
let mut bytes = [0u8; 1024];
rand::rng().fill(bytes.as_mut_slice());
let mut data = Data {
receipt: OpReceipt::arbitrary(&mut arbitrary::Unstructured::new(&bytes)).unwrap(),
};
let success = data.receipt.as_receipt_mut().status.coerce_status();
// // ensure we don't have an invalid poststate variant
data.receipt.as_receipt_mut().status = success.into();
let encoded = bincode::serialize(&data).unwrap();
let decoded: Data = bincode::deserialize(&encoded).unwrap();
assert_eq!(decoded, data);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_eips::eip2718::Encodable2718;
use alloy_primitives::{address, b256, bytes, hex_literal::hex, Bytes};
use alloy_rlp::Encodable;
use reth_codecs::Compact;
#[test]
fn test_decode_receipt() {
reth_codecs::test_utils::test_decode::<OpReceipt>(&hex!(
"c30328b52ffd23fc426961a00105007eb0042307705a97e503562eacf2b95060cce9de6de68386b6c155b73a9650021a49e2f8baad17f30faff5899d785c4c0873e45bc268bcf07560106424570d11f9a59e8f3db1efa4ceec680123712275f10d92c3411e1caaa11c7c5d591bc11487168e09934a9986848136da1b583babf3a7188e3aed007a1520f1cf4c1ca7d3482c6c28d37c298613c70a76940008816c4c95644579fd08471dc34732fd0f24"
));
}
// Test vector from: https://eips.ethereum.org/EIPS/eip-2481
#[test]
fn encode_legacy_receipt() {
let expected = hex!(
"f901668001b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f85ff85d940000000000000000000000000000000000000011f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100ff"
);
let mut data = Vec::with_capacity(expected.length());
let receipt = ReceiptWithBloom {
receipt: OpReceipt::Legacy(Receipt {
status: Eip658Value::Eip658(false),
cumulative_gas_used: 0x1,
logs: vec![Log::new_unchecked(
address!("0x0000000000000000000000000000000000000011"),
vec![
b256!("0x000000000000000000000000000000000000000000000000000000000000dead"),
b256!("0x000000000000000000000000000000000000000000000000000000000000beef"),
],
bytes!("0100ff"),
)],
}),
logs_bloom: [0; 256].into(),
};
receipt.encode(&mut data);
// check that the rlp length equals the length of the expected rlp
assert_eq!(receipt.length(), expected.len());
assert_eq!(data, expected);
}
// Test vector from: https://eips.ethereum.org/EIPS/eip-2481
#[test]
fn decode_legacy_receipt() {
let data = hex!(
"f901668001b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f85ff85d940000000000000000000000000000000000000011f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100ff"
);
// EIP658Receipt
let expected = ReceiptWithBloom {
receipt: OpReceipt::Legacy(Receipt {
status: Eip658Value::Eip658(false),
cumulative_gas_used: 0x1,
logs: vec![Log::new_unchecked(
address!("0x0000000000000000000000000000000000000011"),
vec![
b256!("0x000000000000000000000000000000000000000000000000000000000000dead"),
b256!("0x000000000000000000000000000000000000000000000000000000000000beef"),
],
bytes!("0100ff"),
)],
}),
logs_bloom: [0; 256].into(),
};
let receipt = ReceiptWithBloom::decode(&mut &data[..]).unwrap();
assert_eq!(receipt, expected);
}
#[test]
fn decode_deposit_receipt_regolith_roundtrip() {
let data = hex!(
"b901107ef9010c0182b741b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0833d3bbf"
);
// Deposit Receipt (post-regolith)
let expected = ReceiptWithBloom {
receipt: OpReceipt::Deposit(OpDepositReceipt {
inner: Receipt {
status: Eip658Value::Eip658(true),
cumulative_gas_used: 46913,
logs: vec![],
},
deposit_nonce: Some(4012991),
deposit_receipt_version: None,
}),
logs_bloom: [0; 256].into(),
};
let receipt = ReceiptWithBloom::decode(&mut &data[..]).unwrap();
assert_eq!(receipt, expected);
let mut buf = Vec::with_capacity(data.len());
receipt.encode(&mut buf);
assert_eq!(buf, &data[..]);
}
#[test]
fn decode_deposit_receipt_canyon_roundtrip() {
let data = hex!(
"b901117ef9010d0182b741b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0833d3bbf01"
);
// Deposit Receipt (post-regolith)
let expected = ReceiptWithBloom {
receipt: OpReceipt::Deposit(OpDepositReceipt {
inner: Receipt {
status: Eip658Value::Eip658(true),
cumulative_gas_used: 46913,
logs: vec![],
},
deposit_nonce: Some(4012991),
deposit_receipt_version: Some(1),
}),
logs_bloom: [0; 256].into(),
};
let receipt = ReceiptWithBloom::decode(&mut &data[..]).unwrap();
assert_eq!(receipt, expected);
let mut buf = Vec::with_capacity(data.len());
expected.encode(&mut buf);
assert_eq!(buf, &data[..]);
}
#[test]
fn gigantic_receipt() {
let receipt = OpReceipt::Legacy(Receipt {
status: Eip658Value::Eip658(true),
cumulative_gas_used: 16747627,
logs: vec![
Log::new_unchecked(
address!("0x4bf56695415f725e43c3e04354b604bcfb6dfb6e"),
vec![b256!(
"0xc69dc3d7ebff79e41f525be431d5cd3cc08f80eaf0f7819054a726eeb7086eb9"
)],
Bytes::from(vec![1; 0xffffff]),
),
Log::new_unchecked(
address!("0xfaca325c86bf9c2d5b413cd7b90b209be92229c2"),
vec![b256!(
"0x8cca58667b1e9ffa004720ac99a3d61a138181963b294d270d91c53d36402ae2"
)],
Bytes::from(vec![1; 0xffffff]),
),
],
});
let mut data = vec![];
receipt.to_compact(&mut data);
let (decoded, _) = OpReceipt::from_compact(&data[..], data.len());
assert_eq!(decoded, receipt);
}
#[test]
fn test_encode_2718_length() {
let receipt = ReceiptWithBloom {
receipt: OpReceipt::Eip1559(Receipt {
status: Eip658Value::Eip658(true),
cumulative_gas_used: 21000,
logs: vec![],
}),
logs_bloom: Bloom::default(),
};
let encoded = receipt.encoded_2718();
assert_eq!(
encoded.len(),
receipt.encode_2718_len(),
"Encoded length should match the actual encoded data length"
);
// Test for legacy receipt as well
let legacy_receipt = ReceiptWithBloom {
receipt: OpReceipt::Legacy(Receipt {
status: Eip658Value::Eip658(true),
cumulative_gas_used: 21000,
logs: vec![],
}),
logs_bloom: Bloom::default(),
};
let legacy_encoded = legacy_receipt.encoded_2718();
assert_eq!(
legacy_encoded.len(),
legacy_receipt.encode_2718_len(),
"Encoded length for legacy receipt should match the actual encoded data length"
);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/primitives/src/transaction/mod.rs | crates/optimism/primitives/src/transaction/mod.rs | //! Optimism transaction types
mod tx_type;
/// Kept for consistency tests
#[cfg(test)]
mod signed;
pub use op_alloy_consensus::{OpTransaction, OpTxType, OpTypedTransaction};
/// Signed transaction.
pub type OpTransactionSigned = op_alloy_consensus::OpTxEnvelope;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/primitives/src/transaction/signed.rs | crates/optimism/primitives/src/transaction/signed.rs | //! This file contains the legacy reth `OpTransactionSigned` type that has been replaced with
//! op-alloy's OpTransactionSigned To test for consistency this is kept
use crate::transaction::OpTransaction;
use alloc::vec::Vec;
use alloy_consensus::{
transaction::{RlpEcdsaDecodableTx, RlpEcdsaEncodableTx, SignerRecoverable},
Sealed, SignableTransaction, Signed, Transaction, TxEip1559, TxEip2930, TxEip7702, TxLegacy,
Typed2718,
};
use alloy_eips::{
eip2718::{Decodable2718, Eip2718Error, Eip2718Result, Encodable2718},
eip2930::AccessList,
eip7702::SignedAuthorization,
};
use alloy_primitives::{keccak256, Address, Bytes, Signature, TxHash, TxKind, Uint, B256};
use alloy_rlp::Header;
use core::{
hash::{Hash, Hasher},
mem,
ops::Deref,
};
use op_alloy_consensus::{OpPooledTransaction, OpTxEnvelope, OpTypedTransaction, TxDeposit};
#[cfg(any(test, feature = "reth-codec"))]
use reth_primitives_traits::{
crypto::secp256k1::{recover_signer, recover_signer_unchecked},
sync::OnceLock,
transaction::{error::TransactionConversionError, signed::RecoveryError},
InMemorySize, SignedTransaction,
};
/// Signed transaction.
#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(rlp))]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[derive(Debug, Clone, Eq)]
pub struct OpTransactionSigned {
/// Transaction hash
#[cfg_attr(feature = "serde", serde(skip))]
hash: OnceLock<TxHash>,
/// The transaction signature values
signature: Signature,
/// Raw transaction info
transaction: OpTypedTransaction,
}
impl Deref for OpTransactionSigned {
type Target = OpTypedTransaction;
fn deref(&self) -> &Self::Target {
&self.transaction
}
}
impl OpTransactionSigned {
/// Creates a new signed transaction from the given transaction, signature and hash.
pub fn new(transaction: OpTypedTransaction, signature: Signature, hash: B256) -> Self {
Self { hash: hash.into(), signature, transaction }
}
#[cfg(test)]
fn input_mut(&mut self) -> &mut Bytes {
match &mut self.transaction {
OpTypedTransaction::Legacy(tx) => &mut tx.input,
OpTypedTransaction::Eip2930(tx) => &mut tx.input,
OpTypedTransaction::Eip1559(tx) => &mut tx.input,
OpTypedTransaction::Eip7702(tx) => &mut tx.input,
OpTypedTransaction::Deposit(tx) => &mut tx.input,
}
}
/// Consumes the type and returns the transaction.
#[inline]
pub fn into_transaction(self) -> OpTypedTransaction {
self.transaction
}
/// Returns the transaction.
#[inline]
pub const fn transaction(&self) -> &OpTypedTransaction {
&self.transaction
}
/// Splits the `OpTransactionSigned` into its transaction and signature.
pub fn split(self) -> (OpTypedTransaction, Signature) {
(self.transaction, self.signature)
}
/// Creates a new signed transaction from the given transaction and signature without the hash.
///
/// Note: this only calculates the hash on the first [`OpTransactionSigned::hash`] call.
pub fn new_unhashed(transaction: OpTypedTransaction, signature: Signature) -> Self {
Self { hash: Default::default(), signature, transaction }
}
/// Returns whether this transaction is a deposit.
pub const fn is_deposit(&self) -> bool {
matches!(self.transaction, OpTypedTransaction::Deposit(_))
}
/// Splits the transaction into parts.
pub fn into_parts(self) -> (OpTypedTransaction, Signature, B256) {
let hash = *self.hash.get_or_init(|| self.recalculate_hash());
(self.transaction, self.signature, hash)
}
}
impl SignerRecoverable for OpTransactionSigned {
fn recover_signer(&self) -> Result<Address, RecoveryError> {
// Optimism's Deposit transaction does not have a signature. Directly return the
// `from` address.
if let OpTypedTransaction::Deposit(TxDeposit { from, .. }) = self.transaction {
return Ok(from)
}
let Self { transaction, signature, .. } = self;
let signature_hash = signature_hash(transaction);
recover_signer(signature, signature_hash)
}
fn recover_signer_unchecked(&self) -> Result<Address, RecoveryError> {
// Optimism's Deposit transaction does not have a signature. Directly return the
// `from` address.
if let OpTypedTransaction::Deposit(TxDeposit { from, .. }) = &self.transaction {
return Ok(*from)
}
let Self { transaction, signature, .. } = self;
let signature_hash = signature_hash(transaction);
recover_signer_unchecked(signature, signature_hash)
}
fn recover_unchecked_with_buf(&self, buf: &mut Vec<u8>) -> Result<Address, RecoveryError> {
match &self.transaction {
// Optimism's Deposit transaction does not have a signature. Directly return the
// `from` address.
OpTypedTransaction::Deposit(tx) => return Ok(tx.from),
OpTypedTransaction::Legacy(tx) => tx.encode_for_signing(buf),
OpTypedTransaction::Eip2930(tx) => tx.encode_for_signing(buf),
OpTypedTransaction::Eip1559(tx) => tx.encode_for_signing(buf),
OpTypedTransaction::Eip7702(tx) => tx.encode_for_signing(buf),
};
recover_signer_unchecked(&self.signature, keccak256(buf))
}
}
impl SignedTransaction for OpTransactionSigned {
fn tx_hash(&self) -> &TxHash {
self.hash.get_or_init(|| self.recalculate_hash())
}
fn recalculate_hash(&self) -> B256 {
keccak256(self.encoded_2718())
}
}
macro_rules! impl_from_signed {
($($tx:ident),*) => {
$(
impl From<Signed<$tx>> for OpTransactionSigned {
fn from(value: Signed<$tx>) -> Self {
let(tx,sig,hash) = value.into_parts();
Self::new(tx.into(), sig, hash)
}
}
)*
};
}
impl_from_signed!(TxLegacy, TxEip2930, TxEip1559, TxEip7702, OpTypedTransaction);
impl From<OpTxEnvelope> for OpTransactionSigned {
fn from(value: OpTxEnvelope) -> Self {
match value {
OpTxEnvelope::Legacy(tx) => tx.into(),
OpTxEnvelope::Eip2930(tx) => tx.into(),
OpTxEnvelope::Eip1559(tx) => tx.into(),
OpTxEnvelope::Eip7702(tx) => tx.into(),
OpTxEnvelope::Deposit(tx) => tx.into(),
}
}
}
impl From<Sealed<TxDeposit>> for OpTransactionSigned {
fn from(value: Sealed<TxDeposit>) -> Self {
let (tx, hash) = value.into_parts();
Self::new(OpTypedTransaction::Deposit(tx), TxDeposit::signature(), hash)
}
}
impl From<OpTransactionSigned> for OpTxEnvelope {
fn from(value: OpTransactionSigned) -> Self {
let (tx, signature, hash) = value.into_parts();
match tx {
OpTypedTransaction::Legacy(tx) => Signed::new_unchecked(tx, signature, hash).into(),
OpTypedTransaction::Eip2930(tx) => Signed::new_unchecked(tx, signature, hash).into(),
OpTypedTransaction::Eip1559(tx) => Signed::new_unchecked(tx, signature, hash).into(),
OpTypedTransaction::Deposit(tx) => Sealed::new_unchecked(tx, hash).into(),
OpTypedTransaction::Eip7702(tx) => Signed::new_unchecked(tx, signature, hash).into(),
}
}
}
impl InMemorySize for OpTransactionSigned {
#[inline]
fn size(&self) -> usize {
mem::size_of::<TxHash>() + self.transaction.size() + mem::size_of::<Signature>()
}
}
impl alloy_rlp::Encodable for OpTransactionSigned {
fn encode(&self, out: &mut dyn alloy_rlp::bytes::BufMut) {
self.network_encode(out);
}
fn length(&self) -> usize {
let mut payload_length = self.encode_2718_len();
if !self.is_legacy() {
payload_length += Header { list: false, payload_length }.length();
}
payload_length
}
}
impl alloy_rlp::Decodable for OpTransactionSigned {
fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
Self::network_decode(buf).map_err(Into::into)
}
}
impl Encodable2718 for OpTransactionSigned {
fn type_flag(&self) -> Option<u8> {
if Typed2718::is_legacy(self) {
None
} else {
Some(self.ty())
}
}
fn encode_2718_len(&self) -> usize {
match &self.transaction {
OpTypedTransaction::Legacy(legacy_tx) => {
legacy_tx.eip2718_encoded_length(&self.signature)
}
OpTypedTransaction::Eip2930(access_list_tx) => {
access_list_tx.eip2718_encoded_length(&self.signature)
}
OpTypedTransaction::Eip1559(dynamic_fee_tx) => {
dynamic_fee_tx.eip2718_encoded_length(&self.signature)
}
OpTypedTransaction::Eip7702(set_code_tx) => {
set_code_tx.eip2718_encoded_length(&self.signature)
}
OpTypedTransaction::Deposit(deposit_tx) => deposit_tx.eip2718_encoded_length(),
}
}
fn encode_2718(&self, out: &mut dyn alloy_rlp::BufMut) {
let Self { transaction, signature, .. } = self;
match &transaction {
OpTypedTransaction::Legacy(legacy_tx) => {
// do nothing w/ with_header
legacy_tx.eip2718_encode(signature, out)
}
OpTypedTransaction::Eip2930(access_list_tx) => {
access_list_tx.eip2718_encode(signature, out)
}
OpTypedTransaction::Eip1559(dynamic_fee_tx) => {
dynamic_fee_tx.eip2718_encode(signature, out)
}
OpTypedTransaction::Eip7702(set_code_tx) => set_code_tx.eip2718_encode(signature, out),
OpTypedTransaction::Deposit(deposit_tx) => deposit_tx.encode_2718(out),
}
}
}
impl Decodable2718 for OpTransactionSigned {
fn typed_decode(ty: u8, buf: &mut &[u8]) -> Eip2718Result<Self> {
match ty.try_into().map_err(|_| Eip2718Error::UnexpectedType(ty))? {
op_alloy_consensus::OpTxType::Legacy => Err(Eip2718Error::UnexpectedType(0)),
op_alloy_consensus::OpTxType::Eip2930 => {
let (tx, signature, hash) = TxEip2930::rlp_decode_signed(buf)?.into_parts();
let signed_tx = Self::new_unhashed(OpTypedTransaction::Eip2930(tx), signature);
signed_tx.hash.get_or_init(|| hash);
Ok(signed_tx)
}
op_alloy_consensus::OpTxType::Eip1559 => {
let (tx, signature, hash) = TxEip1559::rlp_decode_signed(buf)?.into_parts();
let signed_tx = Self::new_unhashed(OpTypedTransaction::Eip1559(tx), signature);
signed_tx.hash.get_or_init(|| hash);
Ok(signed_tx)
}
op_alloy_consensus::OpTxType::Eip7702 => {
let (tx, signature, hash) = TxEip7702::rlp_decode_signed(buf)?.into_parts();
let signed_tx = Self::new_unhashed(OpTypedTransaction::Eip7702(tx), signature);
signed_tx.hash.get_or_init(|| hash);
Ok(signed_tx)
}
op_alloy_consensus::OpTxType::Deposit => Ok(Self::new_unhashed(
OpTypedTransaction::Deposit(TxDeposit::rlp_decode(buf)?),
TxDeposit::signature(),
)),
}
}
fn fallback_decode(buf: &mut &[u8]) -> Eip2718Result<Self> {
let (transaction, signature) = TxLegacy::rlp_decode_with_signature(buf)?;
let signed_tx = Self::new_unhashed(OpTypedTransaction::Legacy(transaction), signature);
Ok(signed_tx)
}
}
impl Transaction for OpTransactionSigned {
fn chain_id(&self) -> Option<u64> {
self.deref().chain_id()
}
fn nonce(&self) -> u64 {
self.deref().nonce()
}
fn gas_limit(&self) -> u64 {
self.deref().gas_limit()
}
fn gas_price(&self) -> Option<u128> {
self.deref().gas_price()
}
fn max_fee_per_gas(&self) -> u128 {
self.deref().max_fee_per_gas()
}
fn max_priority_fee_per_gas(&self) -> Option<u128> {
self.deref().max_priority_fee_per_gas()
}
fn max_fee_per_blob_gas(&self) -> Option<u128> {
self.deref().max_fee_per_blob_gas()
}
fn priority_fee_or_price(&self) -> u128 {
self.deref().priority_fee_or_price()
}
fn effective_gas_price(&self, base_fee: Option<u64>) -> u128 {
self.deref().effective_gas_price(base_fee)
}
fn effective_tip_per_gas(&self, base_fee: u64) -> Option<u128> {
self.deref().effective_tip_per_gas(base_fee)
}
fn is_dynamic_fee(&self) -> bool {
self.deref().is_dynamic_fee()
}
fn kind(&self) -> TxKind {
self.deref().kind()
}
fn is_create(&self) -> bool {
self.deref().is_create()
}
fn value(&self) -> Uint<256, 4> {
self.deref().value()
}
fn input(&self) -> &Bytes {
self.deref().input()
}
fn access_list(&self) -> Option<&AccessList> {
self.deref().access_list()
}
fn blob_versioned_hashes(&self) -> Option<&[B256]> {
self.deref().blob_versioned_hashes()
}
fn authorization_list(&self) -> Option<&[SignedAuthorization]> {
self.deref().authorization_list()
}
}
impl Typed2718 for OpTransactionSigned {
fn ty(&self) -> u8 {
self.deref().ty()
}
}
impl PartialEq for OpTransactionSigned {
fn eq(&self, other: &Self) -> bool {
self.signature == other.signature &&
self.transaction == other.transaction &&
self.tx_hash() == other.tx_hash()
}
}
impl Hash for OpTransactionSigned {
fn hash<H: Hasher>(&self, state: &mut H) {
self.signature.hash(state);
self.transaction.hash(state);
}
}
#[cfg(feature = "reth-codec")]
impl reth_codecs::Compact for OpTransactionSigned {
fn to_compact<B>(&self, buf: &mut B) -> usize
where
B: bytes::BufMut + AsMut<[u8]>,
{
let start = buf.as_mut().len();
// Placeholder for bitflags.
// The first byte uses 4 bits as flags: IsCompressed[1bit], TxType[2bits], Signature[1bit]
buf.put_u8(0);
let sig_bit = self.signature.to_compact(buf) as u8;
let zstd_bit = self.transaction.input().len() >= 32;
let tx_bits = if zstd_bit {
let mut tmp = Vec::with_capacity(256);
if cfg!(feature = "std") {
reth_zstd_compressors::TRANSACTION_COMPRESSOR.with(|compressor| {
let mut compressor = compressor.borrow_mut();
let tx_bits = self.transaction.to_compact(&mut tmp);
buf.put_slice(&compressor.compress(&tmp).expect("Failed to compress"));
tx_bits as u8
})
} else {
let mut compressor = reth_zstd_compressors::create_tx_compressor();
let tx_bits = self.transaction.to_compact(&mut tmp);
buf.put_slice(&compressor.compress(&tmp).expect("Failed to compress"));
tx_bits as u8
}
} else {
self.transaction.to_compact(buf) as u8
};
// Replace bitflags with the actual values
buf.as_mut()[start] = sig_bit | (tx_bits << 1) | ((zstd_bit as u8) << 3);
buf.as_mut().len() - start
}
fn from_compact(mut buf: &[u8], _len: usize) -> (Self, &[u8]) {
use bytes::Buf;
// The first byte uses 4 bits as flags: IsCompressed[1], TxType[2], Signature[1]
let bitflags = buf.get_u8() as usize;
let sig_bit = bitflags & 1;
let (signature, buf) = Signature::from_compact(buf, sig_bit);
let zstd_bit = bitflags >> 3;
let (transaction, buf) = if zstd_bit != 0 {
if cfg!(feature = "std") {
reth_zstd_compressors::TRANSACTION_DECOMPRESSOR.with(|decompressor| {
let mut decompressor = decompressor.borrow_mut();
// TODO: enforce that zstd is only present at a "top" level type
let transaction_type = (bitflags & 0b110) >> 1;
let (transaction, _) = OpTypedTransaction::from_compact(
decompressor.decompress(buf),
transaction_type,
);
(transaction, buf)
})
} else {
let mut decompressor = reth_zstd_compressors::create_tx_decompressor();
let transaction_type = (bitflags & 0b110) >> 1;
let (transaction, _) = OpTypedTransaction::from_compact(
decompressor.decompress(buf),
transaction_type,
);
(transaction, buf)
}
} else {
let transaction_type = bitflags >> 1;
OpTypedTransaction::from_compact(buf, transaction_type)
};
(Self { signature, transaction, hash: Default::default() }, buf)
}
}
#[cfg(any(test, feature = "arbitrary"))]
impl<'a> arbitrary::Arbitrary<'a> for OpTransactionSigned {
fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result<Self> {
let mut transaction = OpTypedTransaction::arbitrary(u)?;
let secp = secp256k1::Secp256k1::new();
let key_pair = secp256k1::Keypair::new(&secp, &mut rand_08::thread_rng());
let signature = reth_primitives_traits::crypto::secp256k1::sign_message(
B256::from_slice(&key_pair.secret_bytes()[..]),
signature_hash(&transaction),
)
.unwrap();
let signature = if transaction.is_deposit() { TxDeposit::signature() } else { signature };
Ok(Self::new_unhashed(transaction, signature))
}
}
/// Calculates the signing hash for the transaction.
fn signature_hash(tx: &OpTypedTransaction) -> B256 {
match tx {
OpTypedTransaction::Legacy(tx) => tx.signature_hash(),
OpTypedTransaction::Eip2930(tx) => tx.signature_hash(),
OpTypedTransaction::Eip1559(tx) => tx.signature_hash(),
OpTypedTransaction::Eip7702(tx) => tx.signature_hash(),
OpTypedTransaction::Deposit(_) => B256::ZERO,
}
}
#[cfg(test)]
mod tests {
use super::*;
use proptest::proptest;
use proptest_arbitrary_interop::arb;
use reth_codecs::Compact;
proptest! {
#[test]
fn test_roundtrip_compact_encode_envelope(reth_tx in arb::<OpTransactionSigned>()) {
let mut expected_buf = Vec::<u8>::new();
let expected_len = reth_tx.to_compact(&mut expected_buf);
let mut actual_but = Vec::<u8>::new();
let alloy_tx = OpTxEnvelope::from(reth_tx);
let actual_len = alloy_tx.to_compact(&mut actual_but);
assert_eq!(actual_but, expected_buf);
assert_eq!(actual_len, expected_len);
}
#[test]
fn test_roundtrip_compact_decode_envelope_zstd(mut reth_tx in arb::<OpTransactionSigned>()) {
// zstd only kicks in if the input is large enough
*reth_tx.input_mut() = vec![0;33].into();
let mut buf = Vec::<u8>::new();
let len = reth_tx.to_compact(&mut buf);
let (actual_tx, _) = OpTxEnvelope::from_compact(&buf, len);
let expected_tx = OpTxEnvelope::from(reth_tx);
assert_eq!(actual_tx, expected_tx);
}
#[test]
fn test_roundtrip_compact_encode_envelope_zstd(mut reth_tx in arb::<OpTransactionSigned>()) {
// zstd only kicks in if the input is large enough
*reth_tx.input_mut() = vec![0;33].into();
let mut expected_buf = Vec::<u8>::new();
let expected_len = reth_tx.to_compact(&mut expected_buf);
let mut actual_but = Vec::<u8>::new();
let alloy_tx = OpTxEnvelope::from(reth_tx);
let actual_len = alloy_tx.to_compact(&mut actual_but);
assert_eq!(actual_but, expected_buf);
assert_eq!(actual_len, expected_len);
}
#[test]
fn test_roundtrip_compact_decode_envelope(reth_tx in arb::<OpTransactionSigned>()) {
let mut buf = Vec::<u8>::new();
let len = reth_tx.to_compact(&mut buf);
let (actual_tx, _) = OpTxEnvelope::from_compact(&buf, len);
let expected_tx = OpTxEnvelope::from(reth_tx);
assert_eq!(actual_tx, expected_tx);
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/primitives/src/transaction/tx_type.rs | crates/optimism/primitives/src/transaction/tx_type.rs | //! Optimism transaction type.
#[cfg(test)]
mod tests {
use alloy_consensus::constants::EIP7702_TX_TYPE_ID;
use op_alloy_consensus::{OpTxType, DEPOSIT_TX_TYPE_ID};
use reth_codecs::{txtype::*, Compact};
use rstest::rstest;
#[rstest]
#[case(OpTxType::Legacy, COMPACT_IDENTIFIER_LEGACY, vec![])]
#[case(OpTxType::Eip2930, COMPACT_IDENTIFIER_EIP2930, vec![])]
#[case(OpTxType::Eip1559, COMPACT_IDENTIFIER_EIP1559, vec![])]
#[case(OpTxType::Eip7702, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP7702_TX_TYPE_ID])]
#[case(OpTxType::Deposit, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![DEPOSIT_TX_TYPE_ID])]
fn test_txtype_to_compact(
#[case] tx_type: OpTxType,
#[case] expected_identifier: usize,
#[case] expected_buf: Vec<u8>,
) {
let mut buf = vec![];
let identifier = tx_type.to_compact(&mut buf);
assert_eq!(
identifier, expected_identifier,
"Unexpected identifier for OpTxType {tx_type:?}",
);
assert_eq!(buf, expected_buf, "Unexpected buffer for OpTxType {tx_type:?}",);
}
#[rstest]
#[case(OpTxType::Legacy, COMPACT_IDENTIFIER_LEGACY, vec![])]
#[case(OpTxType::Eip2930, COMPACT_IDENTIFIER_EIP2930, vec![])]
#[case(OpTxType::Eip1559, COMPACT_IDENTIFIER_EIP1559, vec![])]
#[case(OpTxType::Eip7702, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP7702_TX_TYPE_ID])]
#[case(OpTxType::Deposit, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![DEPOSIT_TX_TYPE_ID])]
fn test_txtype_from_compact(
#[case] expected_type: OpTxType,
#[case] identifier: usize,
#[case] buf: Vec<u8>,
) {
let (actual_type, remaining_buf) = OpTxType::from_compact(&buf, identifier);
assert_eq!(actual_type, expected_type, "Unexpected TxType for identifier {identifier}");
assert!(remaining_buf.is_empty(), "Buffer not fully consumed for identifier {identifier}");
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/bin/src/lib.rs | crates/optimism/bin/src/lib.rs | //! Rust Optimism (op-reth) binary executable.
//!
//! ## Feature Flags
//!
//! - `jemalloc`: Uses [jemallocator](https://github.com/tikv/jemallocator) as the global allocator.
//! This is **not recommended on Windows**. See [here](https://rust-lang.github.io/rfcs/1974-global-allocators.html#jemalloc)
//! for more info.
//! - `jemalloc-prof`: Enables [jemallocator's](https://github.com/tikv/jemallocator) heap profiling
//! and leak detection functionality. See [jemalloc's opt.prof](https://jemalloc.net/jemalloc.3.html#opt.prof)
//! documentation for usage details. This is **not recommended on Windows**. See [here](https://rust-lang.github.io/rfcs/1974-global-allocators.html#jemalloc)
//! for more info.
//! - `asm-keccak`: replaces the default, pure-Rust implementation of Keccak256 with one implemented
//! in assembly; see [the `keccak-asm` crate](https://github.com/DaniPopes/keccak-asm) for more
//! details and supported targets
//! - `min-error-logs`: Disables all logs below `error` level.
//! - `min-warn-logs`: Disables all logs below `warn` level.
//! - `min-info-logs`: Disables all logs below `info` level. This can speed up the node, since fewer
//! calls to the logging component are made.
//! - `min-debug-logs`: Disables all logs below `debug` level.
//! - `min-trace-logs`: Disables all logs below `trace` level.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
/// Re-exported from `reth_optimism_cli`.
pub mod cli {
pub use reth_optimism_cli::*;
}
/// Re-exported from `reth_optimism_chainspec`.
pub mod chainspec {
pub use reth_optimism_chainspec::*;
}
/// Re-exported from `reth_optimism_consensus`.
pub mod consensus {
pub use reth_optimism_consensus::*;
}
/// Re-exported from `reth_optimism_evm`.
pub mod evm {
pub use reth_optimism_evm::*;
}
/// Re-exported from `reth_optimism_forks`.
pub mod forks {
pub use reth_optimism_forks::*;
}
/// Re-exported from `reth_optimism_node`.
pub mod node {
pub use reth_optimism_node::*;
}
/// Re-exported from `reth_optimism_payload_builder`.
pub mod payload {
pub use reth_optimism_payload_builder::*;
}
/// Re-exported from `reth_optimism_primitives`.
pub mod primitives {
pub use reth_optimism_primitives::*;
}
/// Re-exported from `reth_optimism_rpc`.
pub mod rpc {
pub use reth_optimism_rpc::*;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/bin/src/main.rs | crates/optimism/bin/src/main.rs | #![allow(missing_docs, rustdoc::missing_crate_level_docs)]
use clap::Parser;
use reth_optimism_cli::{chainspec::OpChainSpecParser, Cli};
use reth_optimism_node::{args::RollupArgs, OpNode};
use tracing::info;
#[global_allocator]
static ALLOC: reth_cli_util::allocator::Allocator = reth_cli_util::allocator::new_allocator();
fn main() {
reth_cli_util::sigsegv_handler::install();
// Enable backtraces unless a RUST_BACKTRACE value has already been explicitly provided.
if std::env::var_os("RUST_BACKTRACE").is_none() {
std::env::set_var("RUST_BACKTRACE", "1");
}
if let Err(err) =
Cli::<OpChainSpecParser, RollupArgs>::parse().run(async move |builder, rollup_args| {
info!(target: "reth::cli", "Launching node");
let handle =
builder.node(OpNode::new(rollup_args)).launch_with_debug_capabilities().await?;
handle.node_exit_future.await
})
{
eprintln!("Error: {err:?}");
std::process::exit(1);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/flashblocks/src/sequence.rs | crates/optimism/flashblocks/src/sequence.rs | use crate::{ExecutionPayloadBaseV1, FlashBlock};
use alloy_eips::eip2718::WithEncoded;
use core::mem;
use eyre::{bail, OptionExt};
use reth_primitives_traits::{Recovered, SignedTransaction};
use std::collections::BTreeMap;
use tokio::sync::broadcast;
use tracing::{debug, trace, warn};
/// The size of the broadcast channel for completed flashblock sequences.
const FLASHBLOCK_SEQUENCE_CHANNEL_SIZE: usize = 128;
/// An ordered B-tree keeping the track of a sequence of [`FlashBlock`]s by their indices.
#[derive(Debug)]
pub(crate) struct FlashBlockPendingSequence<T> {
/// tracks the individual flashblocks in order
///
/// With a blocktime of 2s and flashblock tick-rate of 200ms plus one extra flashblock per new
/// pending block, we expect 11 flashblocks per slot.
inner: BTreeMap<u64, PreparedFlashBlock<T>>,
/// Broadcasts flashblocks to subscribers.
block_broadcaster: broadcast::Sender<FlashBlockCompleteSequence>,
}
impl<T> FlashBlockPendingSequence<T>
where
T: SignedTransaction,
{
pub(crate) fn new() -> Self {
// Note: if the channel is full, send will not block but rather overwrite the oldest
// messages. Order is preserved.
let (tx, _) = broadcast::channel(FLASHBLOCK_SEQUENCE_CHANNEL_SIZE);
Self { inner: BTreeMap::new(), block_broadcaster: tx }
}
/// Gets a subscriber to the flashblock sequences produced.
pub(crate) fn subscribe_block_sequence(
&self,
) -> broadcast::Receiver<FlashBlockCompleteSequence> {
self.block_broadcaster.subscribe()
}
// Clears the state and broadcasts the blocks produced to subscribers.
fn clear_and_broadcast_blocks(&mut self) {
let flashblocks = mem::take(&mut self.inner);
// If there are any subscribers, send the flashblocks to them.
if self.block_broadcaster.receiver_count() > 0 {
let flashblocks = match FlashBlockCompleteSequence::new(
flashblocks.into_iter().map(|block| block.1.into()).collect(),
) {
Ok(flashblocks) => flashblocks,
Err(err) => {
debug!(target: "flashblocks", error = ?err, "Failed to create full flashblock complete sequence");
return;
}
};
// Note: this should only ever fail if there are no receivers. This can happen if
// there is a race condition between the clause right above and this
// one. We can simply warn the user and continue.
if let Err(err) = self.block_broadcaster.send(flashblocks) {
warn!(target: "flashblocks", error = ?err, "Failed to send flashblocks to subscribers");
}
}
}
/// Inserts a new block into the sequence.
///
/// A [`FlashBlock`] with index 0 resets the set.
pub(crate) fn insert(&mut self, flashblock: FlashBlock) -> eyre::Result<()> {
if flashblock.index == 0 {
trace!(number=%flashblock.block_number(), "Tracking new flashblock sequence");
// Flash block at index zero resets the whole state.
self.clear_and_broadcast_blocks();
self.inner.insert(flashblock.index, PreparedFlashBlock::new(flashblock)?);
return Ok(())
}
// only insert if we previously received the same block, assume we received index 0
if self.block_number() == Some(flashblock.metadata.block_number) {
trace!(number=%flashblock.block_number(), index = %flashblock.index, block_count = self.inner.len() ,"Received followup flashblock");
self.inner.insert(flashblock.index, PreparedFlashBlock::new(flashblock)?);
} else {
trace!(number=%flashblock.block_number(), index = %flashblock.index, current=?self.block_number() ,"Ignoring untracked flashblock following");
}
Ok(())
}
/// Iterator over sequence of executable transactions.
///
/// A flashblocks is not ready if there's missing previous flashblocks, i.e. there's a gap in
/// the sequence
///
/// Note: flashblocks start at `index 0`.
pub(crate) fn ready_transactions(
&self,
) -> impl Iterator<Item = WithEncoded<Recovered<T>>> + '_ {
self.inner
.values()
.enumerate()
.take_while(|(idx, block)| {
// flashblock index 0 is the first flashblock
block.block().index == *idx as u64
})
.flat_map(|(_, block)| block.txs.clone())
}
/// Returns the first block number
pub(crate) fn block_number(&self) -> Option<u64> {
Some(self.inner.values().next()?.block().metadata.block_number)
}
/// Returns the payload base of the first tracked flashblock.
pub(crate) fn payload_base(&self) -> Option<ExecutionPayloadBaseV1> {
self.inner.values().next()?.block().base.clone()
}
/// Returns the number of tracked flashblocks.
pub(crate) fn count(&self) -> usize {
self.inner.len()
}
}
/// A complete sequence of flashblocks, often corresponding to a full block.
/// Ensure invariants of a complete flashblocks sequence.
#[derive(Debug, Clone)]
pub struct FlashBlockCompleteSequence(Vec<FlashBlock>);
impl FlashBlockCompleteSequence {
/// Create a complete sequence from a vector of flashblocks.
/// Ensure that:
/// * vector is not empty
/// * first flashblock have the base payload
/// * sequence of flashblocks is sound (successive index from 0, same payload id, ...)
pub fn new(blocks: Vec<FlashBlock>) -> eyre::Result<Self> {
let first_block = blocks.first().ok_or_eyre("No flashblocks in sequence")?;
// Ensure that first flashblock have base
first_block.base.as_ref().ok_or_eyre("Flashblock at index 0 has no base")?;
// Ensure that index are successive from 0, have same block number and payload id
if !blocks.iter().enumerate().all(|(idx, block)| {
idx == block.index as usize &&
block.payload_id == first_block.payload_id &&
block.metadata.block_number == first_block.metadata.block_number
}) {
bail!("Flashblock inconsistencies detected in sequence");
}
Ok(Self(blocks))
}
/// Returns the block number
pub fn block_number(&self) -> u64 {
self.0.first().unwrap().metadata.block_number
}
/// Returns the payload base of the first flashblock.
pub fn payload_base(&self) -> &ExecutionPayloadBaseV1 {
self.0.first().unwrap().base.as_ref().unwrap()
}
/// Returns the number of flashblocks in the sequence.
pub const fn count(&self) -> usize {
self.0.len()
}
}
impl<T> TryFrom<FlashBlockPendingSequence<T>> for FlashBlockCompleteSequence {
type Error = eyre::Error;
fn try_from(sequence: FlashBlockPendingSequence<T>) -> Result<Self, Self::Error> {
Self::new(
sequence.inner.into_values().map(|block| block.block().clone()).collect::<Vec<_>>(),
)
}
}
#[derive(Debug)]
struct PreparedFlashBlock<T> {
/// The prepared transactions, ready for execution
txs: Vec<WithEncoded<Recovered<T>>>,
/// The tracked flashblock
block: FlashBlock,
}
impl<T> PreparedFlashBlock<T> {
const fn block(&self) -> &FlashBlock {
&self.block
}
}
impl<T> From<PreparedFlashBlock<T>> for FlashBlock {
fn from(val: PreparedFlashBlock<T>) -> Self {
val.block
}
}
impl<T> PreparedFlashBlock<T>
where
T: SignedTransaction,
{
/// Creates a flashblock that is ready for execution by preparing all transactions
///
/// Returns an error if decoding or signer recovery fails.
fn new(block: FlashBlock) -> eyre::Result<Self> {
let mut txs = Vec::with_capacity(block.diff.transactions.len());
for encoded in block.diff.transactions.iter().cloned() {
let tx = T::decode_2718_exact(encoded.as_ref())?;
let signer = tx.try_recover()?;
let tx = WithEncoded::new(encoded, tx.with_signer(signer));
txs.push(tx);
}
Ok(Self { txs, block })
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::ExecutionPayloadFlashblockDeltaV1;
use alloy_consensus::{
transaction::SignerRecoverable, EthereumTxEnvelope, EthereumTypedTransaction, TxEip1559,
};
use alloy_eips::Encodable2718;
use alloy_primitives::{hex, Signature, TxKind, U256};
#[test]
fn test_sequence_stops_before_gap() {
let mut sequence = FlashBlockPendingSequence::new();
let tx = EthereumTxEnvelope::new_unhashed(
EthereumTypedTransaction::<TxEip1559>::Eip1559(TxEip1559 {
chain_id: 4,
nonce: 26u64,
max_priority_fee_per_gas: 1500000000,
max_fee_per_gas: 1500000013,
gas_limit: 21_000u64,
to: TxKind::Call(hex!("61815774383099e24810ab832a5b2a5425c154d5").into()),
value: U256::from(3000000000000000000u64),
input: Default::default(),
access_list: Default::default(),
}),
Signature::new(
U256::from_be_bytes(hex!(
"59e6b67f48fb32e7e570dfb11e042b5ad2e55e3ce3ce9cd989c7e06e07feeafd"
)),
U256::from_be_bytes(hex!(
"016b83f4f980694ed2eee4d10667242b1f40dc406901b34125b008d334d47469"
)),
true,
),
);
let tx = Recovered::new_unchecked(tx.clone(), tx.recover_signer_unchecked().unwrap());
sequence
.insert(FlashBlock {
payload_id: Default::default(),
index: 0,
base: None,
diff: ExecutionPayloadFlashblockDeltaV1 {
transactions: vec![tx.encoded_2718().into()],
..Default::default()
},
metadata: Default::default(),
})
.unwrap();
sequence
.insert(FlashBlock {
payload_id: Default::default(),
index: 2,
base: None,
diff: Default::default(),
metadata: Default::default(),
})
.unwrap();
let actual_txs: Vec<_> = sequence.ready_transactions().collect();
let expected_txs = vec![WithEncoded::new(tx.encoded_2718().into(), tx)];
assert_eq!(actual_txs, expected_txs);
}
#[test]
fn test_sequence_sends_flashblocks_to_subscribers() {
let mut sequence = FlashBlockPendingSequence::<EthereumTxEnvelope<TxEip1559>>::new();
let mut subscriber = sequence.subscribe_block_sequence();
for idx in 0..10 {
sequence
.insert(FlashBlock {
payload_id: Default::default(),
index: idx,
base: Some(ExecutionPayloadBaseV1::default()),
diff: Default::default(),
metadata: Default::default(),
})
.unwrap();
}
assert_eq!(sequence.count(), 10);
// Then we don't receive anything until we insert a new flashblock
let no_flashblock = subscriber.try_recv();
assert!(no_flashblock.is_err());
// Let's insert a new flashblock with index 0
sequence
.insert(FlashBlock {
payload_id: Default::default(),
index: 0,
base: Some(ExecutionPayloadBaseV1::default()),
diff: Default::default(),
metadata: Default::default(),
})
.unwrap();
let flashblocks = subscriber.try_recv().unwrap();
assert_eq!(flashblocks.count(), 10);
for (idx, block) in flashblocks.0.iter().enumerate() {
assert_eq!(block.index, idx as u64);
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/flashblocks/src/lib.rs | crates/optimism/flashblocks/src/lib.rs | //! A downstream integration of Flashblocks.
pub use payload::{
ExecutionPayloadBaseV1, ExecutionPayloadFlashblockDeltaV1, FlashBlock, Metadata,
};
use reth_rpc_eth_types::PendingBlock;
pub use service::FlashBlockService;
pub use ws::{WsConnect, WsFlashBlockStream};
mod payload;
mod sequence;
pub use sequence::FlashBlockCompleteSequence;
mod service;
mod worker;
mod ws;
/// Receiver of the most recent [`PendingBlock`] built out of [`FlashBlock`]s.
///
/// [`FlashBlock`]: crate::FlashBlock
pub type PendingBlockRx<N> = tokio::sync::watch::Receiver<Option<PendingBlock<N>>>;
/// Receiver of the sequences of [`FlashBlock`]s built.
///
/// [`FlashBlock`]: crate::FlashBlock
pub type FlashBlockCompleteSequenceRx =
tokio::sync::broadcast::Receiver<FlashBlockCompleteSequence>;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/flashblocks/src/service.rs | crates/optimism/flashblocks/src/service.rs | use crate::{
sequence::FlashBlockPendingSequence,
worker::{BuildArgs, FlashBlockBuilder},
ExecutionPayloadBaseV1, FlashBlock, FlashBlockCompleteSequence,
};
use alloy_eips::eip2718::WithEncoded;
use alloy_primitives::B256;
use futures_util::{FutureExt, Stream, StreamExt};
use reth_chain_state::{CanonStateNotification, CanonStateNotifications, CanonStateSubscriptions};
use reth_evm::ConfigureEvm;
use reth_primitives_traits::{
AlloyBlockHeader, BlockTy, HeaderTy, NodePrimitives, ReceiptTy, Recovered,
};
use reth_revm::cached::CachedReads;
use reth_rpc_eth_types::PendingBlock;
use reth_storage_api::{BlockReaderIdExt, StateProviderFactory};
use reth_tasks::TaskExecutor;
use std::{
pin::Pin,
task::{ready, Context, Poll},
time::Instant,
};
use tokio::{
pin,
sync::{broadcast, oneshot},
};
use tracing::{debug, trace, warn};
/// The `FlashBlockService` maintains an in-memory [`PendingBlock`] built out of a sequence of
/// [`FlashBlock`]s.
#[derive(Debug)]
pub struct FlashBlockService<
N: NodePrimitives,
S,
EvmConfig: ConfigureEvm<Primitives = N, NextBlockEnvCtx: Unpin>,
Provider,
> {
rx: S,
current: Option<PendingBlock<N>>,
blocks: FlashBlockPendingSequence<N::SignedTx>,
rebuild: bool,
builder: FlashBlockBuilder<EvmConfig, Provider>,
canon_receiver: CanonStateNotifications<N>,
spawner: TaskExecutor,
job: Option<BuildJob<N>>,
/// Cached state reads for the current block.
/// Current `PendingBlock` is built out of a sequence of `FlashBlocks`, and executed again when
/// fb received on top of the same block. Avoid redundant I/O across multiple executions
/// within the same block.
cached_state: Option<(B256, CachedReads)>,
}
impl<N, S, EvmConfig, Provider> FlashBlockService<N, S, EvmConfig, Provider>
where
N: NodePrimitives,
S: Stream<Item = eyre::Result<FlashBlock>> + Unpin + 'static,
EvmConfig: ConfigureEvm<Primitives = N, NextBlockEnvCtx: From<ExecutionPayloadBaseV1> + Unpin>
+ Clone
+ 'static,
Provider: StateProviderFactory
+ CanonStateSubscriptions<Primitives = N>
+ BlockReaderIdExt<
Header = HeaderTy<N>,
Block = BlockTy<N>,
Transaction = N::SignedTx,
Receipt = ReceiptTy<N>,
> + Unpin
+ Clone
+ 'static,
{
/// Constructs a new `FlashBlockService` that receives [`FlashBlock`]s from `rx` stream.
pub fn new(rx: S, evm_config: EvmConfig, provider: Provider, spawner: TaskExecutor) -> Self {
Self {
rx,
current: None,
blocks: FlashBlockPendingSequence::new(),
canon_receiver: provider.subscribe_to_canonical_state(),
builder: FlashBlockBuilder::new(evm_config, provider),
rebuild: false,
spawner,
job: None,
cached_state: None,
}
}
/// Returns a subscriber to the flashblock sequence.
pub fn subscribe_block_sequence(&self) -> broadcast::Receiver<FlashBlockCompleteSequence> {
self.blocks.subscribe_block_sequence()
}
/// Drives the services and sends new blocks to the receiver
///
/// Note: this should be spawned
pub async fn run(mut self, tx: tokio::sync::watch::Sender<Option<PendingBlock<N>>>) {
while let Some(block) = self.next().await {
if let Ok(block) = block.inspect_err(|e| tracing::error!("{e}")) {
let _ = tx.send(block).inspect_err(|e| tracing::error!("{e}"));
}
}
warn!("Flashblock service has stopped");
}
/// Returns the [`BuildArgs`] made purely out of [`FlashBlock`]s that were received earlier.
///
/// Returns `None` if the flashblock have no `base` or the base is not a child block of latest.
fn build_args(
&mut self,
) -> Option<BuildArgs<impl IntoIterator<Item = WithEncoded<Recovered<N::SignedTx>>>>> {
let Some(base) = self.blocks.payload_base() else {
trace!(
flashblock_number = ?self.blocks.block_number(),
count = %self.blocks.count(),
"Missing flashblock payload base"
);
return None
};
// attempt an initial consecutive check
if let Some(latest) = self.builder.provider().latest_header().ok().flatten() {
if latest.hash() != base.parent_hash {
trace!(flashblock_parent=?base.parent_hash, flashblock_number=base.block_number, local_latest=?latest.num_hash(), "Skipping non consecutive build attempt");
return None;
}
}
Some(BuildArgs {
base,
transactions: self.blocks.ready_transactions().collect::<Vec<_>>(),
cached_state: self.cached_state.take(),
})
}
/// Takes out `current` [`PendingBlock`] if `state` is not preceding it.
fn on_new_tip(&mut self, state: CanonStateNotification<N>) -> Option<PendingBlock<N>> {
let latest = state.tip_checked()?.hash();
self.current.take_if(|current| current.parent_hash() != latest)
}
}
impl<N, S, EvmConfig, Provider> Stream for FlashBlockService<N, S, EvmConfig, Provider>
where
N: NodePrimitives,
S: Stream<Item = eyre::Result<FlashBlock>> + Unpin + 'static,
EvmConfig: ConfigureEvm<Primitives = N, NextBlockEnvCtx: From<ExecutionPayloadBaseV1> + Unpin>
+ Clone
+ 'static,
Provider: StateProviderFactory
+ CanonStateSubscriptions<Primitives = N>
+ BlockReaderIdExt<
Header = HeaderTy<N>,
Block = BlockTy<N>,
Transaction = N::SignedTx,
Receipt = ReceiptTy<N>,
> + Unpin
+ Clone
+ 'static,
{
type Item = eyre::Result<Option<PendingBlock<N>>>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = self.get_mut();
loop {
// drive pending build job to completion
let result = match this.job.as_mut() {
Some((now, rx)) => {
let result = ready!(rx.poll_unpin(cx));
result.ok().map(|res| (*now, res))
}
None => None,
};
// reset job
this.job.take();
if let Some((now, result)) = result {
match result {
Ok(Some((new_pending, cached_reads))) => {
// built a new pending block
this.current = Some(new_pending.clone());
// cache reads
this.cached_state = Some((new_pending.parent_hash(), cached_reads));
this.rebuild = false;
trace!(
parent_hash = %new_pending.block().parent_hash(),
block_number = new_pending.block().number(),
flash_blocks = this.blocks.count(),
elapsed = ?now.elapsed(),
"Built new block with flashblocks"
);
return Poll::Ready(Some(Ok(Some(new_pending))));
}
Ok(None) => {
// nothing to do because tracked flashblock doesn't attach to latest
}
Err(err) => {
// we can ignore this error
debug!(%err, "failed to execute flashblock");
}
}
}
// consume new flashblocks while they're ready
while let Poll::Ready(Some(result)) = this.rx.poll_next_unpin(cx) {
match result {
Ok(flashblock) => match this.blocks.insert(flashblock) {
Ok(_) => this.rebuild = true,
Err(err) => debug!(%err, "Failed to prepare flashblock"),
},
Err(err) => return Poll::Ready(Some(Err(err))),
}
}
// update on new head block
if let Poll::Ready(Ok(state)) = {
let fut = this.canon_receiver.recv();
pin!(fut);
fut.poll_unpin(cx)
} {
if let Some(current) = this.on_new_tip(state) {
trace!(
parent_hash = %current.block().parent_hash(),
block_number = current.block().number(),
"Clearing current flashblock on new canonical block"
);
return Poll::Ready(Some(Ok(None)))
}
}
if !this.rebuild && this.current.is_some() {
return Poll::Pending
}
// try to build a block on top of latest
if let Some(args) = this.build_args() {
let now = Instant::now();
let (tx, rx) = oneshot::channel();
let builder = this.builder.clone();
this.spawner.spawn_blocking(async move {
let _ = tx.send(builder.execute(args));
});
this.job.replace((now, rx));
// continue and poll the spawned job
continue
}
return Poll::Pending
}
}
}
type BuildJob<N> =
(Instant, oneshot::Receiver<eyre::Result<Option<(PendingBlock<N>, CachedReads)>>>);
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/flashblocks/src/worker.rs | crates/optimism/flashblocks/src/worker.rs | use crate::ExecutionPayloadBaseV1;
use alloy_eips::{eip2718::WithEncoded, BlockNumberOrTag};
use alloy_primitives::B256;
use reth_chain_state::{CanonStateSubscriptions, ExecutedBlock};
use reth_errors::RethError;
use reth_evm::{
execute::{BlockBuilder, BlockBuilderOutcome},
ConfigureEvm,
};
use reth_execution_types::ExecutionOutcome;
use reth_primitives_traits::{
AlloyBlockHeader, BlockTy, HeaderTy, NodePrimitives, ReceiptTy, Recovered,
};
use reth_revm::{cached::CachedReads, database::StateProviderDatabase, db::State};
use reth_rpc_eth_types::{EthApiError, PendingBlock};
use reth_storage_api::{noop::NoopProvider, BlockReaderIdExt, StateProviderFactory};
use std::{
sync::Arc,
time::{Duration, Instant},
};
use tracing::trace;
/// The `FlashBlockBuilder` builds [`PendingBlock`] out of a sequence of transactions.
#[derive(Debug)]
pub(crate) struct FlashBlockBuilder<EvmConfig, Provider> {
evm_config: EvmConfig,
provider: Provider,
}
impl<EvmConfig, Provider> FlashBlockBuilder<EvmConfig, Provider> {
pub(crate) const fn new(evm_config: EvmConfig, provider: Provider) -> Self {
Self { evm_config, provider }
}
pub(crate) const fn provider(&self) -> &Provider {
&self.provider
}
}
pub(crate) struct BuildArgs<I> {
pub base: ExecutionPayloadBaseV1,
pub transactions: I,
pub cached_state: Option<(B256, CachedReads)>,
}
impl<N, EvmConfig, Provider> FlashBlockBuilder<EvmConfig, Provider>
where
N: NodePrimitives,
EvmConfig: ConfigureEvm<Primitives = N, NextBlockEnvCtx: From<ExecutionPayloadBaseV1> + Unpin>,
Provider: StateProviderFactory
+ CanonStateSubscriptions<Primitives = N>
+ BlockReaderIdExt<
Header = HeaderTy<N>,
Block = BlockTy<N>,
Transaction = N::SignedTx,
Receipt = ReceiptTy<N>,
> + Unpin,
{
/// Returns the [`PendingBlock`] made purely out of transactions and [`ExecutionPayloadBaseV1`]
/// in `args`.
///
/// Returns `None` if the flashblock doesn't attach to the latest header.
pub(crate) fn execute<I: IntoIterator<Item = WithEncoded<Recovered<N::SignedTx>>>>(
&self,
mut args: BuildArgs<I>,
) -> eyre::Result<Option<(PendingBlock<N>, CachedReads)>> {
trace!("Attempting new pending block from flashblocks");
let latest = self
.provider
.latest_header()?
.ok_or(EthApiError::HeaderNotFound(BlockNumberOrTag::Latest.into()))?;
let latest_hash = latest.hash();
if args.base.parent_hash != latest_hash {
trace!(flashblock_parent = ?args.base.parent_hash, local_latest=?latest.num_hash(),"Skipping non consecutive flashblock");
// doesn't attach to the latest block
return Ok(None)
}
let state_provider = self.provider.history_by_block_hash(latest.hash())?;
let mut request_cache = args
.cached_state
.take()
.filter(|(hash, _)| hash == &latest_hash)
.map(|(_, state)| state)
.unwrap_or_default();
let cached_db = request_cache.as_db_mut(StateProviderDatabase::new(&state_provider));
let mut state = State::builder().with_database(cached_db).with_bundle_update().build();
let mut builder = self
.evm_config
.builder_for_next_block(&mut state, &latest, args.base.into())
.map_err(RethError::other)?;
builder.apply_pre_execution_changes()?;
for tx in args.transactions {
let _gas_used = builder.execute_transaction(tx)?;
}
let BlockBuilderOutcome { execution_result, block, hashed_state, .. } =
builder.finish(NoopProvider::default())?;
let execution_outcome = ExecutionOutcome::new(
state.take_bundle(),
vec![execution_result.receipts],
block.number(),
vec![execution_result.requests],
);
Ok(Some((
PendingBlock::with_executed_block(
Instant::now() + Duration::from_secs(1),
ExecutedBlock {
recovered_block: block.into(),
execution_output: Arc::new(execution_outcome),
hashed_state: Arc::new(hashed_state),
},
),
request_cache,
)))
}
}
impl<EvmConfig: Clone, Provider: Clone> Clone for FlashBlockBuilder<EvmConfig, Provider> {
fn clone(&self) -> Self {
Self { evm_config: self.evm_config.clone(), provider: self.provider.clone() }
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/flashblocks/src/payload.rs | crates/optimism/flashblocks/src/payload.rs | use alloy_eips::eip4895::Withdrawal;
use alloy_primitives::{Address, Bloom, Bytes, B256, U256};
use alloy_rpc_types_engine::PayloadId;
use reth_optimism_evm::OpNextBlockEnvAttributes;
use reth_optimism_primitives::OpReceipt;
use serde::{Deserialize, Serialize};
use std::collections::BTreeMap;
/// Represents a Flashblock, a real-time block-like structure emitted by the Base L2 chain.
///
/// A Flashblock provides a snapshot of a block’s effects before finalization,
/// allowing faster insight into state transitions, balance changes, and logs.
/// It includes a diff of the block’s execution and associated metadata.
///
/// See: [Base Flashblocks Documentation](https://docs.base.org/chain/flashblocks)
#[derive(Default, Debug, Clone, Eq, PartialEq, Serialize, Deserialize)]
pub struct FlashBlock {
/// The unique payload ID as assigned by the execution engine for this block.
pub payload_id: PayloadId,
/// A sequential index that identifies the order of this Flashblock.
pub index: u64,
/// A subset of block header fields.
pub base: Option<ExecutionPayloadBaseV1>,
/// The execution diff representing state transitions and transactions.
pub diff: ExecutionPayloadFlashblockDeltaV1,
/// Additional metadata about the block such as receipts and balances.
pub metadata: Metadata,
}
impl FlashBlock {
/// Returns the block number of this flashblock.
pub const fn block_number(&self) -> u64 {
self.metadata.block_number
}
/// Returns the first parent hash of this flashblock.
pub fn parent_hash(&self) -> Option<B256> {
Some(self.base.as_ref()?.parent_hash)
}
}
/// Provides metadata about the block that may be useful for indexing or analysis.
#[derive(Default, Debug, Clone, Eq, PartialEq, Serialize, Deserialize)]
pub struct Metadata {
/// The number of the block in the L2 chain.
pub block_number: u64,
/// A map of addresses to their updated balances after the block execution.
/// This represents balance changes due to transactions, rewards, or system transfers.
pub new_account_balances: BTreeMap<Address, U256>,
/// Execution receipts for all transactions in the block.
/// Contains logs, gas usage, and other EVM-level metadata.
pub receipts: BTreeMap<B256, OpReceipt>,
}
/// Represents the base configuration of an execution payload that remains constant
/// throughout block construction. This includes fundamental block properties like
/// parent hash, block number, and other header fields that are determined at
/// block creation and cannot be modified.
#[derive(Clone, Debug, Eq, PartialEq, Default, Deserialize, Serialize)]
pub struct ExecutionPayloadBaseV1 {
/// Ecotone parent beacon block root
pub parent_beacon_block_root: B256,
/// The parent hash of the block.
pub parent_hash: B256,
/// The fee recipient of the block.
pub fee_recipient: Address,
/// The previous randao of the block.
pub prev_randao: B256,
/// The block number.
#[serde(with = "alloy_serde::quantity")]
pub block_number: u64,
/// The gas limit of the block.
#[serde(with = "alloy_serde::quantity")]
pub gas_limit: u64,
/// The timestamp of the block.
#[serde(with = "alloy_serde::quantity")]
pub timestamp: u64,
/// The extra data of the block.
pub extra_data: Bytes,
/// The base fee per gas of the block.
pub base_fee_per_gas: U256,
}
/// Represents the modified portions of an execution payload within a flashblock.
/// This structure contains only the fields that can be updated during block construction,
/// such as state root, receipts, logs, and new transactions. Other immutable block fields
/// like parent hash and block number are excluded since they remain constant throughout
/// the block's construction.
#[derive(Clone, Debug, Eq, PartialEq, Default, Deserialize, Serialize)]
pub struct ExecutionPayloadFlashblockDeltaV1 {
/// The state root of the block.
pub state_root: B256,
/// The receipts root of the block.
pub receipts_root: B256,
/// The logs bloom of the block.
pub logs_bloom: Bloom,
/// The gas used of the block.
#[serde(with = "alloy_serde::quantity")]
pub gas_used: u64,
/// The block hash of the block.
pub block_hash: B256,
/// The transactions of the block.
pub transactions: Vec<Bytes>,
/// Array of [`Withdrawal`] enabled with V2
pub withdrawals: Vec<Withdrawal>,
/// The withdrawals root of the block.
pub withdrawals_root: B256,
}
impl From<ExecutionPayloadBaseV1> for OpNextBlockEnvAttributes {
fn from(value: ExecutionPayloadBaseV1) -> Self {
Self {
timestamp: value.timestamp,
suggested_fee_recipient: value.fee_recipient,
prev_randao: value.prev_randao,
gas_limit: value.gas_limit,
parent_beacon_block_root: Some(value.parent_beacon_block_root),
extra_data: value.extra_data,
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/flashblocks/src/ws/stream.rs | crates/optimism/flashblocks/src/ws/stream.rs | use crate::FlashBlock;
use futures_util::{
stream::{SplitSink, SplitStream},
FutureExt, Sink, Stream, StreamExt,
};
use std::{
fmt::{Debug, Formatter},
future::Future,
pin::Pin,
task::{ready, Context, Poll},
};
use tokio::net::TcpStream;
use tokio_tungstenite::{
connect_async,
tungstenite::{protocol::CloseFrame, Bytes, Error, Message},
MaybeTlsStream, WebSocketStream,
};
use tracing::debug;
use url::Url;
/// An asynchronous stream of [`FlashBlock`] from a websocket connection.
///
/// The stream attempts to connect to a websocket URL and then decode each received item.
///
/// If the connection fails, the error is returned and connection retried. The number of retries is
/// unbounded.
pub struct WsFlashBlockStream<Stream, Sink, Connector> {
ws_url: Url,
state: State,
connector: Connector,
connect: ConnectFuture<Sink, Stream>,
stream: Option<Stream>,
sink: Option<Sink>,
}
impl WsFlashBlockStream<WsStream, WsSink, WsConnector> {
/// Creates a new websocket stream over `ws_url`.
pub fn new(ws_url: Url) -> Self {
Self {
ws_url,
state: State::default(),
connector: WsConnector,
connect: Box::pin(async move { Err(Error::ConnectionClosed)? }),
stream: None,
sink: None,
}
}
}
impl<Stream, S, C> WsFlashBlockStream<Stream, S, C> {
/// Creates a new websocket stream over `ws_url`.
pub fn with_connector(ws_url: Url, connector: C) -> Self {
Self {
ws_url,
state: State::default(),
connector,
connect: Box::pin(async move { Err(Error::ConnectionClosed)? }),
stream: None,
sink: None,
}
}
}
impl<Str, S, C> Stream for WsFlashBlockStream<Str, S, C>
where
Str: Stream<Item = Result<Message, Error>> + Unpin,
S: Sink<Message> + Send + Sync + Unpin,
C: WsConnect<Stream = Str, Sink = S> + Clone + Send + Sync + 'static + Unpin,
{
type Item = eyre::Result<FlashBlock>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = self.get_mut();
'start: loop {
if this.state == State::Initial {
this.connect();
}
if this.state == State::Connect {
match ready!(this.connect.poll_unpin(cx)) {
Ok((sink, stream)) => this.stream(sink, stream),
Err(err) => {
this.state = State::Initial;
return Poll::Ready(Some(Err(err)));
}
}
}
while let State::Stream(msg) = &mut this.state {
if msg.is_some() {
let mut sink = Pin::new(this.sink.as_mut().unwrap());
let _ = ready!(sink.as_mut().poll_ready(cx));
if let Some(pong) = msg.take() {
let _ = sink.as_mut().start_send(pong);
}
let _ = ready!(sink.as_mut().poll_flush(cx));
}
let Some(msg) = ready!(this
.stream
.as_mut()
.expect("Stream state should be unreachable without stream")
.poll_next_unpin(cx))
else {
this.state = State::Initial;
continue 'start;
};
match msg {
Ok(Message::Binary(bytes)) => {
return Poll::Ready(Some(FlashBlock::decode(bytes)))
}
Ok(Message::Text(bytes)) => {
return Poll::Ready(Some(FlashBlock::decode(bytes.into())))
}
Ok(Message::Ping(bytes)) => this.ping(bytes),
Ok(Message::Close(frame)) => this.close(frame),
Ok(msg) => debug!("Received unexpected message: {:?}", msg),
Err(err) => return Poll::Ready(Some(Err(err.into()))),
}
}
}
}
}
impl<Stream, S, C> WsFlashBlockStream<Stream, S, C>
where
C: WsConnect<Stream = Stream, Sink = S> + Clone + Send + Sync + 'static,
{
fn connect(&mut self) {
let ws_url = self.ws_url.clone();
let mut connector = self.connector.clone();
Pin::new(&mut self.connect).set(Box::pin(async move { connector.connect(ws_url).await }));
self.state = State::Connect;
}
fn stream(&mut self, sink: S, stream: Stream) {
self.sink.replace(sink);
self.stream.replace(stream);
self.state = State::Stream(None);
}
fn ping(&mut self, pong: Bytes) {
if let State::Stream(current) = &mut self.state {
current.replace(Message::Pong(pong));
}
}
fn close(&mut self, frame: Option<CloseFrame>) {
if let State::Stream(current) = &mut self.state {
current.replace(Message::Close(frame));
}
}
}
impl<Stream: Debug, S: Debug, C: Debug> Debug for WsFlashBlockStream<Stream, S, C> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("FlashBlockStream")
.field("ws_url", &self.ws_url)
.field("state", &self.state)
.field("connector", &self.connector)
.field("connect", &"Pin<Box<dyn Future<..>>>")
.field("stream", &self.stream)
.finish()
}
}
#[derive(Default, Debug, Eq, PartialEq)]
enum State {
#[default]
Initial,
Connect,
Stream(Option<Message>),
}
type Ws = WebSocketStream<MaybeTlsStream<TcpStream>>;
type WsStream = SplitStream<Ws>;
type WsSink = SplitSink<Ws, Message>;
type ConnectFuture<Sink, Stream> =
Pin<Box<dyn Future<Output = eyre::Result<(Sink, Stream)>> + Send + Sync + 'static>>;
/// The `WsConnect` trait allows for connecting to a websocket.
///
/// Implementors of the `WsConnect` trait are called 'connectors'.
///
/// Connectors are defined by one method, [`connect()`]. A call to [`connect()`] attempts to
/// establish a secure websocket connection and return an asynchronous stream of [`Message`]s
/// wrapped in a [`Result`].
///
/// [`connect()`]: Self::connect
pub trait WsConnect {
/// An associated `Stream` of [`Message`]s wrapped in a [`Result`] that this connection returns.
type Stream;
/// An associated `Sink` of [`Message`]s that this connection sends.
type Sink;
/// Asynchronously connects to a websocket hosted on `ws_url`.
///
/// See the [`WsConnect`] documentation for details.
fn connect(
&mut self,
ws_url: Url,
) -> impl Future<Output = eyre::Result<(Self::Sink, Self::Stream)>> + Send + Sync;
}
/// Establishes a secure websocket subscription.
///
/// See the [`WsConnect`] documentation for details.
#[derive(Debug, Clone)]
pub struct WsConnector;
impl WsConnect for WsConnector {
type Stream = WsStream;
type Sink = WsSink;
async fn connect(&mut self, ws_url: Url) -> eyre::Result<(WsSink, WsStream)> {
let (stream, _response) = connect_async(ws_url.as_str()).await?;
Ok(stream.split())
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::ExecutionPayloadBaseV1;
use alloy_primitives::bytes::Bytes;
use brotli::enc::BrotliEncoderParams;
use std::{future, iter};
use tokio_tungstenite::tungstenite::{
protocol::frame::{coding::CloseCode, Frame},
Error,
};
/// A `FakeConnector` creates [`FakeStream`].
///
/// It simulates the websocket stream instead of connecting to a real websocket.
#[derive(Clone)]
struct FakeConnector(FakeStream);
/// A `FakeConnectorWithSink` creates [`FakeStream`] and [`FakeSink`].
///
/// It simulates the websocket stream instead of connecting to a real websocket. It also accepts
/// messages into an in-memory buffer.
#[derive(Clone)]
struct FakeConnectorWithSink(FakeStream);
/// Simulates a websocket stream while using a preprogrammed set of messages instead.
#[derive(Default)]
struct FakeStream(Vec<Result<Message, Error>>);
impl FakeStream {
fn new(mut messages: Vec<Result<Message, Error>>) -> Self {
messages.reverse();
Self(messages)
}
}
impl Clone for FakeStream {
fn clone(&self) -> Self {
Self(
self.0
.iter()
.map(|v| match v {
Ok(msg) => Ok(msg.clone()),
Err(err) => Err(match err {
Error::AttackAttempt => Error::AttackAttempt,
err => unimplemented!("Cannot clone this error: {err}"),
}),
})
.collect(),
)
}
}
impl Stream for FakeStream {
type Item = Result<Message, Error>;
fn poll_next(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = self.get_mut();
Poll::Ready(this.0.pop())
}
}
#[derive(Clone)]
struct NoopSink;
impl<T> Sink<T> for NoopSink {
type Error = ();
fn poll_ready(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Result<(), Self::Error>> {
unimplemented!()
}
fn start_send(self: Pin<&mut Self>, _item: T) -> Result<(), Self::Error> {
unimplemented!()
}
fn poll_flush(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Result<(), Self::Error>> {
unimplemented!()
}
fn poll_close(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Result<(), Self::Error>> {
unimplemented!()
}
}
/// Receives [`Message`]s and stores them. A call to `start_send` first buffers the message
/// to simulate flushing behavior.
#[derive(Clone, Default)]
struct FakeSink(Option<Message>, Vec<Message>);
impl Sink<Message> for FakeSink {
type Error = ();
fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.poll_flush(cx)
}
fn start_send(self: Pin<&mut Self>, item: Message) -> Result<(), Self::Error> {
self.get_mut().0.replace(item);
Ok(())
}
fn poll_flush(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Result<(), Self::Error>> {
let this = self.get_mut();
if let Some(item) = this.0.take() {
this.1.push(item);
}
Poll::Ready(Ok(()))
}
fn poll_close(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
}
impl WsConnect for FakeConnector {
type Stream = FakeStream;
type Sink = NoopSink;
fn connect(
&mut self,
_ws_url: Url,
) -> impl Future<Output = eyre::Result<(Self::Sink, Self::Stream)>> + Send + Sync {
future::ready(Ok((NoopSink, self.0.clone())))
}
}
impl<T: IntoIterator<Item = Result<Message, Error>>> From<T> for FakeConnector {
fn from(value: T) -> Self {
Self(FakeStream::new(value.into_iter().collect()))
}
}
impl WsConnect for FakeConnectorWithSink {
type Stream = FakeStream;
type Sink = FakeSink;
fn connect(
&mut self,
_ws_url: Url,
) -> impl Future<Output = eyre::Result<(Self::Sink, Self::Stream)>> + Send + Sync {
future::ready(Ok((FakeSink::default(), self.0.clone())))
}
}
impl<T: IntoIterator<Item = Result<Message, Error>>> From<T> for FakeConnectorWithSink {
fn from(value: T) -> Self {
Self(FakeStream::new(value.into_iter().collect()))
}
}
/// Repeatedly fails to connect with the given error message.
#[derive(Clone)]
struct FailingConnector(String);
impl WsConnect for FailingConnector {
type Stream = FakeStream;
type Sink = NoopSink;
fn connect(
&mut self,
_ws_url: Url,
) -> impl Future<Output = eyre::Result<(Self::Sink, Self::Stream)>> + Send + Sync {
future::ready(Err(eyre::eyre!("{}", &self.0)))
}
}
fn to_json_message<B: TryFrom<Bytes, Error: Debug>, F: Fn(B) -> Message>(
wrapper_f: F,
) -> impl Fn(&FlashBlock) -> Result<Message, Error> + use<F, B> {
move |block| to_json_message_using(block, &wrapper_f)
}
fn to_json_binary_message(block: &FlashBlock) -> Result<Message, Error> {
to_json_message_using(block, Message::Binary)
}
fn to_json_message_using<B: TryFrom<Bytes, Error: Debug>, F: Fn(B) -> Message>(
block: &FlashBlock,
wrapper_f: F,
) -> Result<Message, Error> {
Ok(wrapper_f(B::try_from(Bytes::from(serde_json::to_vec(block).unwrap())).unwrap()))
}
fn to_brotli_message(block: &FlashBlock) -> Result<Message, Error> {
let json = serde_json::to_vec(block).unwrap();
let mut compressed = Vec::new();
brotli::BrotliCompress(
&mut json.as_slice(),
&mut compressed,
&BrotliEncoderParams::default(),
)?;
Ok(Message::Binary(Bytes::from(compressed)))
}
fn flashblock() -> FlashBlock {
FlashBlock {
payload_id: Default::default(),
index: 0,
base: Some(ExecutionPayloadBaseV1 {
parent_beacon_block_root: Default::default(),
parent_hash: Default::default(),
fee_recipient: Default::default(),
prev_randao: Default::default(),
block_number: 0,
gas_limit: 0,
timestamp: 0,
extra_data: Default::default(),
base_fee_per_gas: Default::default(),
}),
diff: Default::default(),
metadata: Default::default(),
}
}
#[test_case::test_case(to_json_message(Message::Binary); "json binary")]
#[test_case::test_case(to_json_message(Message::Text); "json UTF-8")]
#[test_case::test_case(to_brotli_message; "brotli")]
#[tokio::test]
async fn test_stream_decodes_messages_successfully(
to_message: impl Fn(&FlashBlock) -> Result<Message, Error>,
) {
let flashblocks = [flashblock()];
let connector = FakeConnector::from(flashblocks.iter().map(to_message));
let ws_url = "http://localhost".parse().unwrap();
let stream = WsFlashBlockStream::with_connector(ws_url, connector);
let actual_messages: Vec<_> = stream.take(1).map(Result::unwrap).collect().await;
let expected_messages = flashblocks.to_vec();
assert_eq!(actual_messages, expected_messages);
}
#[test_case::test_case(Message::Pong(Bytes::from(b"test".as_slice())); "pong")]
#[test_case::test_case(Message::Frame(Frame::pong(b"test".as_slice())); "frame")]
#[tokio::test]
async fn test_stream_ignores_unexpected_message(message: Message) {
let flashblock = flashblock();
let connector = FakeConnector::from([Ok(message), to_json_binary_message(&flashblock)]);
let ws_url = "http://localhost".parse().unwrap();
let mut stream = WsFlashBlockStream::with_connector(ws_url, connector);
let expected_message = flashblock;
let actual_message =
stream.next().await.expect("Binary message should not be ignored").unwrap();
assert_eq!(actual_message, expected_message)
}
#[tokio::test]
async fn test_stream_passes_errors_through() {
let connector = FakeConnector::from([Err(Error::AttackAttempt)]);
let ws_url = "http://localhost".parse().unwrap();
let stream = WsFlashBlockStream::with_connector(ws_url, connector);
let actual_messages: Vec<_> =
stream.take(1).map(Result::unwrap_err).map(|e| format!("{e}")).collect().await;
let expected_messages = vec!["Attack attempt detected".to_owned()];
assert_eq!(actual_messages, expected_messages);
}
#[tokio::test]
async fn test_connect_error_causes_retries() {
let tries = 3;
let error_msg = "test".to_owned();
let connector = FailingConnector(error_msg.clone());
let ws_url = "http://localhost".parse().unwrap();
let stream = WsFlashBlockStream::with_connector(ws_url, connector);
let actual_errors: Vec<_> =
stream.take(tries).map(Result::unwrap_err).map(|e| format!("{e}")).collect().await;
let expected_errors: Vec<_> = iter::repeat_n(error_msg, tries).collect();
assert_eq!(actual_errors, expected_errors);
}
#[test_case::test_case(
Message::Close(Some(CloseFrame { code: CloseCode::Normal, reason: "test".into() })),
Message::Close(Some(CloseFrame { code: CloseCode::Normal, reason: "test".into() }));
"close"
)]
#[test_case::test_case(
Message::Ping(Bytes::from_static(&[1u8, 2, 3])),
Message::Pong(Bytes::from_static(&[1u8, 2, 3]));
"ping"
)]
#[tokio::test]
async fn test_stream_responds_to_messages(msg: Message, expected_response: Message) {
let flashblock = flashblock();
let messages = [Ok(msg), to_json_binary_message(&flashblock)];
let connector = FakeConnectorWithSink::from(messages);
let ws_url = "http://localhost".parse().unwrap();
let mut stream = WsFlashBlockStream::with_connector(ws_url, connector);
let _ = stream.next().await;
let expected_response = vec![expected_response];
let FakeSink(actual_buffer, actual_response) = stream.sink.unwrap();
assert!(actual_buffer.is_none(), "buffer not flushed: {actual_buffer:#?}");
assert_eq!(actual_response, expected_response);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/flashblocks/src/ws/decoding.rs | crates/optimism/flashblocks/src/ws/decoding.rs | use crate::{ExecutionPayloadBaseV1, ExecutionPayloadFlashblockDeltaV1, FlashBlock, Metadata};
use alloy_primitives::bytes::Bytes;
use alloy_rpc_types_engine::PayloadId;
use serde::{Deserialize, Serialize};
use std::{fmt::Debug, io};
/// Internal helper for decoding
#[derive(Clone, Debug, PartialEq, Default, Deserialize, Serialize)]
struct FlashblocksPayloadV1 {
/// The payload id of the flashblock
pub payload_id: PayloadId,
/// The index of the flashblock in the block
pub index: u64,
/// The base execution payload configuration
#[serde(skip_serializing_if = "Option::is_none")]
pub base: Option<ExecutionPayloadBaseV1>,
/// The delta/diff containing modified portions of the execution payload
pub diff: ExecutionPayloadFlashblockDeltaV1,
/// Additional metadata associated with the flashblock
pub metadata: serde_json::Value,
}
impl FlashBlock {
/// Decodes `bytes` into [`FlashBlock`].
///
/// This function is specific to the Base Optimism websocket encoding.
///
/// It is assumed that the `bytes` are encoded in JSON and optionally compressed using brotli.
/// Whether the `bytes` is compressed or not is determined by looking at the first
/// non ascii-whitespace character.
pub(crate) fn decode(bytes: Bytes) -> eyre::Result<Self> {
let bytes = try_parse_message(bytes)?;
let payload: FlashblocksPayloadV1 = serde_json::from_slice(&bytes)
.map_err(|e| eyre::eyre!("failed to parse message: {e}"))?;
let metadata: Metadata = serde_json::from_value(payload.metadata)
.map_err(|e| eyre::eyre!("failed to parse message metadata: {e}"))?;
Ok(Self {
payload_id: payload.payload_id,
index: payload.index,
base: payload.base,
diff: payload.diff,
metadata,
})
}
}
/// Maps `bytes` into a potentially different [`Bytes`].
///
/// If the bytes start with a "{" character, prepended by any number of ASCII-whitespaces,
/// then it assumes that it is JSON-encoded and returns it as-is.
///
/// Otherwise, the `bytes` are passed through a brotli decompressor and returned.
fn try_parse_message(bytes: Bytes) -> eyre::Result<Bytes> {
if bytes.trim_ascii_start().starts_with(b"{") {
return Ok(bytes);
}
let mut decompressor = brotli::Decompressor::new(bytes.as_ref(), 4096);
let mut decompressed = Vec::new();
io::copy(&mut decompressor, &mut decompressed)?;
Ok(decompressed.into())
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/flashblocks/src/ws/mod.rs | crates/optimism/flashblocks/src/ws/mod.rs | pub use stream::{WsConnect, WsFlashBlockStream};
mod decoding;
mod stream;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/flashblocks/tests/it/stream.rs | crates/optimism/flashblocks/tests/it/stream.rs | use futures_util::stream::StreamExt;
use reth_optimism_flashblocks::WsFlashBlockStream;
#[tokio::test]
async fn test_streaming_flashblocks_from_remote_source_is_successful() {
let items = 3;
let ws_url = "wss://sepolia.flashblocks.base.org/ws".parse().unwrap();
let stream = WsFlashBlockStream::new(ws_url);
let blocks: Vec<_> = stream.take(items).collect().await;
for block in blocks {
assert!(block.is_ok());
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/flashblocks/tests/it/main.rs | crates/optimism/flashblocks/tests/it/main.rs | //! Integration tests.
//!
//! All the individual modules are rooted here to produce a single binary.
mod stream;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/consensus/src/proof.rs | crates/optimism/consensus/src/proof.rs | //! Helper function for Receipt root calculation for Optimism hardforks.
use alloc::vec::Vec;
use alloy_consensus::ReceiptWithBloom;
use alloy_eips::eip2718::Encodable2718;
use alloy_primitives::B256;
use alloy_trie::root::ordered_trie_root_with_encoder;
use reth_optimism_forks::OpHardforks;
use reth_optimism_primitives::DepositReceipt;
/// Calculates the receipt root for a header.
pub(crate) fn calculate_receipt_root_optimism<R: DepositReceipt>(
receipts: &[ReceiptWithBloom<&R>],
chain_spec: impl OpHardforks,
timestamp: u64,
) -> B256 {
// There is a minor bug in op-geth and op-erigon where in the Regolith hardfork,
// the receipt root calculation does not include the deposit nonce in the receipt
// encoding. In the Regolith Hardfork, we must strip the deposit nonce from the
// receipts before calculating the receipt root. This was corrected in the Canyon
// hardfork.
if chain_spec.is_regolith_active_at_timestamp(timestamp) &&
!chain_spec.is_canyon_active_at_timestamp(timestamp)
{
let receipts = receipts
.iter()
.map(|receipt| {
let mut receipt = receipt.clone().map_receipt(|r| r.clone());
if let Some(receipt) = receipt.receipt.as_deposit_receipt_mut() {
receipt.deposit_nonce = None;
}
receipt
})
.collect::<Vec<_>>();
return ordered_trie_root_with_encoder(receipts.as_slice(), |r, buf| r.encode_2718(buf))
}
ordered_trie_root_with_encoder(receipts, |r, buf| r.encode_2718(buf))
}
/// Calculates the receipt root for a header for the reference type of an OP receipt.
///
/// NOTE: Prefer calculate receipt root optimism if you have log blooms memoized.
pub fn calculate_receipt_root_no_memo_optimism<R: DepositReceipt>(
receipts: &[R],
chain_spec: impl OpHardforks,
timestamp: u64,
) -> B256 {
// There is a minor bug in op-geth and op-erigon where in the Regolith hardfork,
// the receipt root calculation does not include the deposit nonce in the receipt
// encoding. In the Regolith Hardfork, we must strip the deposit nonce from the
// receipts before calculating the receipt root. This was corrected in the Canyon
// hardfork.
if chain_spec.is_regolith_active_at_timestamp(timestamp) &&
!chain_spec.is_canyon_active_at_timestamp(timestamp)
{
let receipts = receipts
.iter()
.map(|r| {
let mut r = (*r).clone();
if let Some(receipt) = r.as_deposit_receipt_mut() {
receipt.deposit_nonce = None;
}
r
})
.collect::<Vec<_>>();
return ordered_trie_root_with_encoder(&receipts, |r, buf| {
r.with_bloom_ref().encode_2718(buf);
})
}
ordered_trie_root_with_encoder(receipts, |r, buf| {
r.with_bloom_ref().encode_2718(buf);
})
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_consensus::{Receipt, ReceiptWithBloom, TxReceipt};
use alloy_primitives::{b256, bloom, hex, Address, Bytes, Log, LogData};
use op_alloy_consensus::OpDepositReceipt;
use reth_optimism_chainspec::BASE_SEPOLIA;
use reth_optimism_primitives::OpReceipt;
/// Tests that the receipt root is computed correctly for the regolith block.
/// This was implemented due to a minor bug in op-geth and op-erigon where in
/// the Regolith hardfork, the receipt root calculation does not include the
/// deposit nonce in the receipt encoding.
/// To fix this an op-reth patch was applied to the receipt root calculation
/// to strip the deposit nonce from each receipt before calculating the root.
#[test]
fn check_optimism_receipt_root() {
let cases = [
// Deposit nonces didn't exist in Bedrock; No need to strip. For the purposes of this
// test, we do have them, so we should get the same root as Canyon.
(
"bedrock",
1679079599,
b256!("0xe255fed45eae7ede0556fe4fabc77b0d294d18781a5a581cab09127bc4cd9ffb"),
),
// Deposit nonces introduced in Regolith. They weren't included in the receipt RLP,
// so we need to strip them - the receipt root will differ.
(
"regolith",
1679079600,
b256!("0xe255fed45eae7ede0556fe4fabc77b0d294d18781a5a581cab09127bc4cd9ffb"),
),
// Receipt root hashing bug fixed in Canyon. Back to including the deposit nonce
// in the receipt RLP when computing the receipt root.
(
"canyon",
1699981200,
b256!("0x6eefbb5efb95235476654a8bfbf8cb64a4f5f0b0c80b700b0c5964550beee6d7"),
),
];
for case in cases {
let receipts = [
// 0xb0d6ee650637911394396d81172bd1c637d568ed1fbddab0daddfca399c58b53
OpReceipt::Deposit(OpDepositReceipt {
inner: Receipt {
status: true.into(),
cumulative_gas_used: 46913,
logs: vec![],
},
deposit_nonce: Some(4012991u64),
deposit_receipt_version: None,
}),
// 0x2f433586bae30573c393adfa02bc81d2a1888a3d6c9869f473fb57245166bd9a
OpReceipt::Eip1559(Receipt {
status: true.into(),
cumulative_gas_used: 118083,
logs: vec![
Log {
address: hex!("ddb6dcce6b794415145eb5caa6cd335aeda9c272").into(),
data: LogData::new_unchecked(
vec![
b256!("0xc3d58168c5ae7397731d063d5bbf3d657854427343f4c083240f7aacaa2d0f62"),
b256!("0x000000000000000000000000c498902843af527e674846bb7edefa8ad62b8fb9"),
b256!("0x000000000000000000000000c498902843af527e674846bb7edefa8ad62b8fb9"),
b256!("0x0000000000000000000000000000000000000000000000000000000000000000"),
],
Bytes::from_static(&hex!("00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001"))
)
},
Log {
address: hex!("ddb6dcce6b794415145eb5caa6cd335aeda9c272").into(),
data: LogData::new_unchecked(
vec![
b256!("0xc3d58168c5ae7397731d063d5bbf3d657854427343f4c083240f7aacaa2d0f62"),
b256!("0x000000000000000000000000c498902843af527e674846bb7edefa8ad62b8fb9"),
b256!("0x0000000000000000000000000000000000000000000000000000000000000000"),
b256!("0x000000000000000000000000c498902843af527e674846bb7edefa8ad62b8fb9"),
],
Bytes::from_static(&hex!("00000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000001"))
)
},
Log {
address: hex!("ddb6dcce6b794415145eb5caa6cd335aeda9c272").into(),
data: LogData::new_unchecked(
vec![
b256!("0x0eb774bb9698a73583fe07b6972cf2dcc08d1d97581a22861f45feb86b395820"),
b256!("0x000000000000000000000000c498902843af527e674846bb7edefa8ad62b8fb9"),
b256!("0x000000000000000000000000c498902843af527e674846bb7edefa8ad62b8fb9"),
], Bytes::from_static(&hex!("0000000000000000000000000000000000000000000000000000000000000003")))
},
]}),
// 0x6c33676e8f6077f46a62eabab70bc6d1b1b18a624b0739086d77093a1ecf8266
OpReceipt::Eip1559(Receipt {
status: true.into(),
cumulative_gas_used: 189253,
logs: vec![
Log {
address: hex!("ddb6dcce6b794415145eb5caa6cd335aeda9c272").into(),
data: LogData::new_unchecked(vec![
b256!("0xc3d58168c5ae7397731d063d5bbf3d657854427343f4c083240f7aacaa2d0f62"),
b256!("0x0000000000000000000000009d521a04bee134ff8136d2ec957e5bc8c50394ec"),
b256!("0x0000000000000000000000009d521a04bee134ff8136d2ec957e5bc8c50394ec"),
b256!("0x0000000000000000000000000000000000000000000000000000000000000000"),
],
Bytes::from_static(&hex!("00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001")))
},
Log {
address: hex!("ddb6dcce6b794415145eb5caa6cd335aeda9c272").into(),
data: LogData::new_unchecked(vec![
b256!("0xc3d58168c5ae7397731d063d5bbf3d657854427343f4c083240f7aacaa2d0f62"),
b256!("0x0000000000000000000000009d521a04bee134ff8136d2ec957e5bc8c50394ec"),
b256!("0x0000000000000000000000000000000000000000000000000000000000000000"),
b256!("0x0000000000000000000000009d521a04bee134ff8136d2ec957e5bc8c50394ec"),
],
Bytes::from_static(&hex!("00000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000001")))
},
Log {
address: hex!("ddb6dcce6b794415145eb5caa6cd335aeda9c272").into(),
data: LogData::new_unchecked(vec![
b256!("0x0eb774bb9698a73583fe07b6972cf2dcc08d1d97581a22861f45feb86b395820"),
b256!("0x0000000000000000000000009d521a04bee134ff8136d2ec957e5bc8c50394ec"),
b256!("0x0000000000000000000000009d521a04bee134ff8136d2ec957e5bc8c50394ec"),
],
Bytes::from_static(&hex!("0000000000000000000000000000000000000000000000000000000000000003")))
},
],
}),
// 0x4d3ecbef04ba7ce7f5ab55be0c61978ca97c117d7da448ed9771d4ff0c720a3f
OpReceipt::Eip1559(Receipt {
status: true.into(),
cumulative_gas_used: 346969,
logs: vec![
Log {
address: hex!("4200000000000000000000000000000000000006").into(),
data: LogData::new_unchecked( vec![
b256!("0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef"),
b256!("0x000000000000000000000000c3feb4ef4c2a5af77add15c95bd98f6b43640cc8"),
b256!("0x0000000000000000000000002992607c1614484fe6d865088e5c048f0650afd4"),
],
Bytes::from_static(&hex!("0000000000000000000000000000000000000000000000000018de76816d8000")))
},
Log {
address: hex!("cf8e7e6b26f407dee615fc4db18bf829e7aa8c09").into(),
data: LogData::new_unchecked( vec![
b256!("0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef"),
b256!("0x0000000000000000000000002992607c1614484fe6d865088e5c048f0650afd4"),
b256!("0x0000000000000000000000008dbffe4c8bf3caf5deae3a99b50cfcf3648cbc09"),
],
Bytes::from_static(&hex!("000000000000000000000000000000000000000000000002d24d8e9ac1aa79e2")))
},
Log {
address: hex!("2992607c1614484fe6d865088e5c048f0650afd4").into(),
data: LogData::new_unchecked( vec![
b256!("0x1c411e9a96e071241c2f21f7726b17ae89e3cab4c78be50e062b03a9fffbbad1"),
],
Bytes::from_static(&hex!("000000000000000000000000000000000000000000000009bd50642785c15736000000000000000000000000000000000000000000011bb7ac324f724a29bbbf")))
},
Log {
address: hex!("2992607c1614484fe6d865088e5c048f0650afd4").into(),
data: LogData::new_unchecked( vec![
b256!("0xd78ad95fa46c994b6551d0da85fc275fe613ce37657fb8d5e3d130840159d822"),
b256!("0x00000000000000000000000029843613c7211d014f5dd5718cf32bcd314914cb"),
b256!("0x0000000000000000000000008dbffe4c8bf3caf5deae3a99b50cfcf3648cbc09"),
],
Bytes::from_static(&hex!("0000000000000000000000000000000000000000000000000018de76816d800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002d24d8e9ac1aa79e2")))
},
Log {
address: hex!("6d0f8d488b669aa9ba2d0f0b7b75a88bf5051cd3").into(),
data: LogData::new_unchecked( vec![
b256!("0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef"),
b256!("0x0000000000000000000000008dbffe4c8bf3caf5deae3a99b50cfcf3648cbc09"),
b256!("0x000000000000000000000000c3feb4ef4c2a5af77add15c95bd98f6b43640cc8"),
],
Bytes::from_static(&hex!("00000000000000000000000000000000000000000000000014bc73062aea8093")))
},
Log {
address: hex!("8dbffe4c8bf3caf5deae3a99b50cfcf3648cbc09").into(),
data: LogData::new_unchecked( vec![
b256!("0x1c411e9a96e071241c2f21f7726b17ae89e3cab4c78be50e062b03a9fffbbad1"),
],
Bytes::from_static(&hex!("00000000000000000000000000000000000000000000002f122cfadc1ca82a35000000000000000000000000000000000000000000000665879dc0609945d6d1")))
},
Log {
address: hex!("8dbffe4c8bf3caf5deae3a99b50cfcf3648cbc09").into(),
data: LogData::new_unchecked( vec![
b256!("0xd78ad95fa46c994b6551d0da85fc275fe613ce37657fb8d5e3d130840159d822"),
b256!("0x00000000000000000000000029843613c7211d014f5dd5718cf32bcd314914cb"),
b256!("0x000000000000000000000000c3feb4ef4c2a5af77add15c95bd98f6b43640cc8"),
],
Bytes::from_static(&hex!("0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002d24d8e9ac1aa79e200000000000000000000000000000000000000000000000014bc73062aea80930000000000000000000000000000000000000000000000000000000000000000")))
},
],
}),
// 0xf738af5eb00ba23dbc1be2dbce41dbc0180f0085b7fb46646e90bf737af90351
OpReceipt::Eip1559(Receipt {
status: true.into(),
cumulative_gas_used: 623249,
logs: vec![
Log {
address: hex!("ac6564f3718837caadd42eed742d75c12b90a052").into(),
data: LogData::new_unchecked( vec![
b256!("0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef"),
b256!("0x0000000000000000000000000000000000000000000000000000000000000000"),
b256!("0x000000000000000000000000a4fa7f3fbf0677f254ebdb1646146864c305b76e"),
b256!("0x000000000000000000000000000000000000000000000000000000000011a1d3"),
],
Default::default())
},
Log {
address: hex!("ac6564f3718837caadd42eed742d75c12b90a052").into(),
data: LogData::new_unchecked( vec![
b256!("0x9d89e36eadf856db0ad9ffb5a569e07f95634dddd9501141ecf04820484ad0dc"),
b256!("0x000000000000000000000000a4fa7f3fbf0677f254ebdb1646146864c305b76e"),
b256!("0x000000000000000000000000000000000000000000000000000000000011a1d3"),
],
Bytes::from_static(&hex!("00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000037697066733a2f2f516d515141646b33736538396b47716577395256567a316b68643548375562476d4d4a485a62566f386a6d346f4a2f30000000000000000000")))
},
Log {
address: hex!("ac6564f3718837caadd42eed742d75c12b90a052").into(),
data: LogData::new_unchecked( vec![
b256!("0x110d160a1bedeea919a88fbc4b2a9fb61b7e664084391b6ca2740db66fef80fe"),
b256!("0x00000000000000000000000084d47f6eea8f8d87910448325519d1bb45c2972a"),
b256!("0x000000000000000000000000a4fa7f3fbf0677f254ebdb1646146864c305b76e"),
b256!("0x000000000000000000000000000000000000000000000000000000000011a1d3"),
],
Bytes::from_static(&hex!("0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000a4fa7f3fbf0677f254ebdb1646146864c305b76e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007717500762343034303661353035646234633961386163316433306335633332303265370000000000000000000000000000000000000000000000000000000000000037697066733a2f2f516d515141646b33736538396b47716577395256567a316b68643548375562476d4d4a485a62566f386a6d346f4a2f30000000000000000000")))
},
],
}),
];
let root = calculate_receipt_root_optimism(
&receipts.iter().map(TxReceipt::with_bloom_ref).collect::<Vec<_>>(),
BASE_SEPOLIA.as_ref(),
case.1,
);
assert_eq!(root, case.2);
}
}
#[test]
fn check_receipt_root_optimism() {
let logs = vec![Log {
address: Address::ZERO,
data: LogData::new_unchecked(vec![], Default::default()),
}];
let logs_bloom = bloom!(
"00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001"
);
let inner =
OpReceipt::Eip2930(Receipt { status: true.into(), cumulative_gas_used: 102068, logs });
let receipt = ReceiptWithBloom { receipt: &inner, logs_bloom };
let receipt = vec![receipt];
let root = calculate_receipt_root_optimism(&receipt, BASE_SEPOLIA.as_ref(), 0);
assert_eq!(
root,
b256!("0xfe70ae4a136d98944951b2123859698d59ad251a381abc9960fa81cae3d0d4a0")
);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/consensus/src/lib.rs | crates/optimism/consensus/src/lib.rs | //! Optimism Consensus implementation.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![cfg_attr(not(feature = "std"), no_std)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
extern crate alloc;
use alloc::{format, sync::Arc};
use alloy_consensus::{BlockHeader as _, EMPTY_OMMER_ROOT_HASH};
use alloy_primitives::B64;
use core::fmt::Debug;
use reth_chainspec::EthChainSpec;
use reth_consensus::{Consensus, ConsensusError, FullConsensus, HeaderValidator};
use reth_consensus_common::validation::{
validate_against_parent_4844, validate_against_parent_eip1559_base_fee,
validate_against_parent_hash_number, validate_against_parent_timestamp, validate_cancun_gas,
validate_header_base_fee, validate_header_extra_data, validate_header_gas,
};
use reth_execution_types::BlockExecutionResult;
use reth_optimism_forks::OpHardforks;
use reth_optimism_primitives::DepositReceipt;
use reth_primitives_traits::{
Block, BlockBody, BlockHeader, GotExpected, NodePrimitives, RecoveredBlock, SealedBlock,
SealedHeader,
};
mod proof;
pub use proof::calculate_receipt_root_no_memo_optimism;
pub mod validation;
pub use validation::{canyon, isthmus, validate_block_post_execution};
pub mod error;
pub use error::OpConsensusError;
/// Optimism consensus implementation.
///
/// Provides basic checks as outlined in the execution specs.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct OpBeaconConsensus<ChainSpec> {
/// Configuration
chain_spec: Arc<ChainSpec>,
}
impl<ChainSpec> OpBeaconConsensus<ChainSpec> {
/// Create a new instance of [`OpBeaconConsensus`]
pub const fn new(chain_spec: Arc<ChainSpec>) -> Self {
Self { chain_spec }
}
}
impl<N, ChainSpec> FullConsensus<N> for OpBeaconConsensus<ChainSpec>
where
N: NodePrimitives<Receipt: DepositReceipt>,
ChainSpec: EthChainSpec<Header = N::BlockHeader> + OpHardforks + Debug + Send + Sync,
{
fn validate_block_post_execution(
&self,
block: &RecoveredBlock<N::Block>,
result: &BlockExecutionResult<N::Receipt>,
) -> Result<(), ConsensusError> {
validate_block_post_execution(block.header(), &self.chain_spec, &result.receipts)
}
}
impl<B, ChainSpec> Consensus<B> for OpBeaconConsensus<ChainSpec>
where
B: Block,
ChainSpec: EthChainSpec<Header = B::Header> + OpHardforks + Debug + Send + Sync,
{
type Error = ConsensusError;
fn validate_body_against_header(
&self,
body: &B::Body,
header: &SealedHeader<B::Header>,
) -> Result<(), ConsensusError> {
validation::validate_body_against_header_op(&self.chain_spec, body, header.header())
}
fn validate_block_pre_execution(&self, block: &SealedBlock<B>) -> Result<(), ConsensusError> {
// Check ommers hash
let ommers_hash = block.body().calculate_ommers_root();
if Some(block.ommers_hash()) != ommers_hash {
return Err(ConsensusError::BodyOmmersHashDiff(
GotExpected {
got: ommers_hash.unwrap_or(EMPTY_OMMER_ROOT_HASH),
expected: block.ommers_hash(),
}
.into(),
))
}
// Check transaction root
if let Err(error) = block.ensure_transaction_root_valid() {
return Err(ConsensusError::BodyTransactionRootDiff(error.into()))
}
// Check empty shanghai-withdrawals
if self.chain_spec.is_canyon_active_at_timestamp(block.timestamp()) {
canyon::ensure_empty_shanghai_withdrawals(block.body()).map_err(|err| {
ConsensusError::Other(format!("failed to verify block {}: {err}", block.number()))
})?
} else {
return Ok(())
}
if self.chain_spec.is_ecotone_active_at_timestamp(block.timestamp()) {
validate_cancun_gas(block)?;
}
// Check withdrawals root field in header
if self.chain_spec.is_isthmus_active_at_timestamp(block.timestamp()) {
// storage root of withdrawals pre-deploy is verified post-execution
isthmus::ensure_withdrawals_storage_root_is_some(block.header()).map_err(|err| {
ConsensusError::Other(format!("failed to verify block {}: {err}", block.number()))
})?
} else {
// canyon is active, else would have returned already
canyon::ensure_empty_withdrawals_root(block.header())?
}
Ok(())
}
}
impl<H, ChainSpec> HeaderValidator<H> for OpBeaconConsensus<ChainSpec>
where
H: BlockHeader,
ChainSpec: EthChainSpec<Header = H> + OpHardforks + Debug + Send + Sync,
{
fn validate_header(&self, header: &SealedHeader<H>) -> Result<(), ConsensusError> {
let header = header.header();
// with OP-stack Bedrock activation number determines when TTD (eth Merge) has been reached.
debug_assert!(
self.chain_spec.is_bedrock_active_at_block(header.number()),
"manually import OVM blocks"
);
if header.nonce() != Some(B64::ZERO) {
return Err(ConsensusError::TheMergeNonceIsNotZero)
}
if header.ommers_hash() != EMPTY_OMMER_ROOT_HASH {
return Err(ConsensusError::TheMergeOmmerRootIsNotEmpty)
}
// Post-merge, the consensus layer is expected to perform checks such that the block
// timestamp is a function of the slot. This is different from pre-merge, where blocks
// are only allowed to be in the future (compared to the system's clock) by a certain
// threshold.
//
// Block validation with respect to the parent should ensure that the block timestamp
// is greater than its parent timestamp.
// validate header extra data for all networks post merge
validate_header_extra_data(header)?;
validate_header_gas(header)?;
validate_header_base_fee(header, &self.chain_spec)
}
fn validate_header_against_parent(
&self,
header: &SealedHeader<H>,
parent: &SealedHeader<H>,
) -> Result<(), ConsensusError> {
validate_against_parent_hash_number(header.header(), parent)?;
if self.chain_spec.is_bedrock_active_at_block(header.number()) {
validate_against_parent_timestamp(header.header(), parent.header())?;
}
validate_against_parent_eip1559_base_fee(
header.header(),
parent.header(),
&self.chain_spec,
)?;
// ensure that the blob gas fields for this block
if let Some(blob_params) = self.chain_spec.blob_params_at_timestamp(header.timestamp()) {
validate_against_parent_4844(header.header(), parent.header(), blob_params)?;
}
Ok(())
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/consensus/src/error.rs | crates/optimism/consensus/src/error.rs | //! Optimism consensus errors
use alloy_primitives::B256;
use reth_consensus::ConsensusError;
use reth_storage_errors::provider::ProviderError;
/// Optimism consensus error.
#[derive(Debug, Clone, thiserror::Error)]
pub enum OpConsensusError {
/// Block body has non-empty withdrawals list (l1 withdrawals).
#[error("non-empty block body withdrawals list")]
WithdrawalsNonEmpty,
/// Failed to compute L2 withdrawals storage root.
#[error("compute L2 withdrawals root failed: {_0}")]
L2WithdrawalsRootCalculationFail(#[from] ProviderError),
/// L2 withdrawals root missing in block header.
#[error("L2 withdrawals root missing from block header")]
L2WithdrawalsRootMissing,
/// L2 withdrawals root in block header, doesn't match local storage root of predeploy.
#[error("L2 withdrawals root mismatch, header: {header}, exec_res: {exec_res}")]
L2WithdrawalsRootMismatch {
/// Storage root of pre-deploy in block.
header: B256,
/// Storage root of pre-deploy loaded from local state.
exec_res: B256,
},
/// L1 [`ConsensusError`], that also occurs on L2.
#[error(transparent)]
Eth(#[from] ConsensusError),
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/consensus/src/validation/isthmus.rs | crates/optimism/consensus/src/validation/isthmus.rs | //! Block verification w.r.t. consensus rules new in Isthmus hardfork.
use crate::OpConsensusError;
use alloy_consensus::BlockHeader;
use alloy_primitives::{address, Address, B256};
use alloy_trie::EMPTY_ROOT_HASH;
use core::fmt::Debug;
use reth_storage_api::{errors::ProviderResult, StorageRootProvider};
use reth_trie_common::HashedStorage;
use revm::database::BundleState;
use tracing::warn;
/// The L2 contract `L2ToL1MessagePasser`, stores commitments to withdrawal transactions.
pub const ADDRESS_L2_TO_L1_MESSAGE_PASSER: Address =
address!("0x4200000000000000000000000000000000000016");
/// Verifies that `withdrawals_root` (i.e. `l2tol1-msg-passer` storage root since Isthmus) field is
/// set in block header.
pub fn ensure_withdrawals_storage_root_is_some<H: BlockHeader>(
header: H,
) -> Result<(), OpConsensusError> {
header.withdrawals_root().ok_or(OpConsensusError::L2WithdrawalsRootMissing)?;
Ok(())
}
/// Computes the storage root of predeploy `L2ToL1MessagePasser.sol`.
///
/// Uses state updates from block execution. See also [`withdrawals_root_prehashed`].
pub fn withdrawals_root<DB: StorageRootProvider>(
state_updates: &BundleState,
state: DB,
) -> ProviderResult<B256> {
// if l2 withdrawals transactions were executed there will be storage updates for
// `L2ToL1MessagePasser.sol` predeploy
withdrawals_root_prehashed(
state_updates
.state()
.get(&ADDRESS_L2_TO_L1_MESSAGE_PASSER)
.map(|acc| {
HashedStorage::from_plain_storage(
acc.status,
acc.storage.iter().map(|(slot, value)| (slot, &value.present_value)),
)
})
.unwrap_or_default(),
state,
)
}
/// Computes the storage root of predeploy `L2ToL1MessagePasser.sol`.
///
/// Uses pre-hashed storage updates of `L2ToL1MessagePasser.sol` predeploy, resulting from
/// execution of L2 withdrawals transactions. If none, takes empty [`HashedStorage::default`].
pub fn withdrawals_root_prehashed<DB: StorageRootProvider>(
hashed_storage_updates: HashedStorage,
state: DB,
) -> ProviderResult<B256> {
state.storage_root(ADDRESS_L2_TO_L1_MESSAGE_PASSER, hashed_storage_updates)
}
/// Verifies block header field `withdrawals_root` against storage root of
/// `L2ToL1MessagePasser.sol` predeploy post block execution.
///
/// Takes state updates resulting from execution of block.
///
/// See <https://specs.optimism.io/protocol/isthmus/exec-engine.html#l2tol1messagepasser-storage-root-in-header>.
pub fn verify_withdrawals_root<DB, H>(
state_updates: &BundleState,
state: DB,
header: H,
) -> Result<(), OpConsensusError>
where
DB: StorageRootProvider,
H: BlockHeader + Debug,
{
let header_storage_root =
header.withdrawals_root().ok_or(OpConsensusError::L2WithdrawalsRootMissing)?;
let storage_root = withdrawals_root(state_updates, state)
.map_err(OpConsensusError::L2WithdrawalsRootCalculationFail)?;
if storage_root == EMPTY_ROOT_HASH {
// if there was no MessagePasser contract storage, something is wrong
// (it should at least store an implementation address and owner address)
warn!("isthmus: no storage root for L2ToL1MessagePasser contract");
}
if header_storage_root != storage_root {
return Err(OpConsensusError::L2WithdrawalsRootMismatch {
header: header_storage_root,
exec_res: storage_root,
})
}
Ok(())
}
/// Verifies block header field `withdrawals_root` against storage root of
/// `L2ToL1MessagePasser.sol` predeploy post block execution.
///
/// Takes pre-hashed storage updates of `L2ToL1MessagePasser.sol` predeploy, resulting from
/// execution of block, if any. Otherwise takes empty [`HashedStorage::default`].
///
/// See <https://specs.optimism.io/protocol/isthmus/exec-engine.html#l2tol1messagepasser-storage-root-in-header>.
pub fn verify_withdrawals_root_prehashed<DB, H>(
hashed_storage_updates: HashedStorage,
state: DB,
header: H,
) -> Result<(), OpConsensusError>
where
DB: StorageRootProvider,
H: BlockHeader + core::fmt::Debug,
{
let header_storage_root =
header.withdrawals_root().ok_or(OpConsensusError::L2WithdrawalsRootMissing)?;
let storage_root = withdrawals_root_prehashed(hashed_storage_updates, state)
.map_err(OpConsensusError::L2WithdrawalsRootCalculationFail)?;
if header_storage_root != storage_root {
return Err(OpConsensusError::L2WithdrawalsRootMismatch {
header: header_storage_root,
exec_res: storage_root,
})
}
Ok(())
}
// #[cfg(test)]
// mod test {
// use super::*;
// use alloc::sync::Arc;
// use alloy_chains::Chain;
// use alloy_consensus::Header;
// use alloy_primitives::{keccak256, B256, U256};
// use core::str::FromStr;
// use reth_db_common::init::init_genesis;
// use reth_optimism_chainspec::OpChainSpecBuilder;
// use reth_optimism_node::OpNode;
// use reth_optimism_primitives::ADDRESS_L2_TO_L1_MESSAGE_PASSER;
// use reth_provider::{
// providers::BlockchainProvider, test_utils::create_test_provider_factory_with_node_types,
// StateWriter,
// };
// use reth_revm::db::BundleState;
// use reth_storage_api::StateProviderFactory;
// use reth_trie::{test_utils::storage_root_prehashed, HashedStorage};
// use reth_trie_common::HashedPostState;
// #[test]
// fn l2tol1_message_passer_no_withdrawals() {
// let hashed_address = keccak256(ADDRESS_L2_TO_L1_MESSAGE_PASSER);
// // create account storage
// let init_storage = HashedStorage::from_iter(
// false,
// [
// "50000000000000000000000000000004253371b55351a08cb3267d4d265530b6",
// "512428ed685fff57294d1a9cbb147b18ae5db9cf6ae4b312fa1946ba0561882e",
// "51e6784c736ef8548f856909870b38e49ef7a4e3e77e5e945e0d5e6fcaa3037f",
// ]
// .into_iter()
// .map(|str| (B256::from_str(str).unwrap(), U256::from(1))),
// );
// let mut state = HashedPostState::default();
// state.storages.insert(hashed_address, init_storage.clone());
// // init test db
// // note: must be empty (default) chain spec to ensure storage is empty after init
// genesis, // otherwise can't use `storage_root_prehashed` to determine storage root later
// let provider_factory = create_test_provider_factory_with_node_types::<OpNode>(Arc::new(
//
// OpChainSpecBuilder::default().chain(Chain::dev()).genesis(Default::default()).build(),
// ));
// let _ = init_genesis(&provider_factory).unwrap();
// // write account storage to database
// let provider_rw = provider_factory.provider_rw().unwrap();
// provider_rw.write_hashed_state(&state.clone().into_sorted()).unwrap();
// provider_rw.commit().unwrap();
// // create block header with withdrawals root set to storage root of l2tol1-msg-passer
// let header = Header {
// withdrawals_root: Some(storage_root_prehashed(init_storage.storage)),
// ..Default::default()
// };
// // create state provider factory
// let state_provider_factory = BlockchainProvider::new(provider_factory).unwrap();
// // validate block against existing state by passing empty state updates
// verify_withdrawals_root(
// &BundleState::default(),
// state_provider_factory.latest().expect("load state"),
// &header,
// )
// .unwrap();
// }
// }
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/consensus/src/validation/canyon.rs | crates/optimism/consensus/src/validation/canyon.rs | //! Canyon consensus rule checks.
use alloy_consensus::BlockHeader;
use alloy_trie::EMPTY_ROOT_HASH;
use reth_consensus::ConsensusError;
use reth_primitives_traits::{BlockBody, GotExpected};
use crate::OpConsensusError;
/// Verifies that withdrawals root in block header (Shanghai) is always [`EMPTY_ROOT_HASH`] in
/// Canyon.
#[inline]
pub fn ensure_empty_withdrawals_root<H: BlockHeader>(header: &H) -> Result<(), ConsensusError> {
// Shanghai rule
let header_withdrawals_root =
&header.withdrawals_root().ok_or(ConsensusError::WithdrawalsRootMissing)?;
// Canyon rules
if *header_withdrawals_root != EMPTY_ROOT_HASH {
return Err(ConsensusError::BodyWithdrawalsRootDiff(
GotExpected { got: *header_withdrawals_root, expected: EMPTY_ROOT_HASH }.into(),
));
}
Ok(())
}
/// Verifies that withdrawals in block body (Shanghai) is always empty in Canyon.
/// <https://specs.optimism.io/protocol/rollup-node-p2p.html#block-validation>
#[inline]
pub fn ensure_empty_shanghai_withdrawals<T: BlockBody>(body: &T) -> Result<(), OpConsensusError> {
// Shanghai rule
let withdrawals = body.withdrawals().ok_or(ConsensusError::BodyWithdrawalsMissing)?;
// Canyon rule
if !withdrawals.as_ref().is_empty() {
return Err(OpConsensusError::WithdrawalsNonEmpty)
}
Ok(())
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/consensus/src/validation/mod.rs | crates/optimism/consensus/src/validation/mod.rs | //! Verification of blocks w.r.t. Optimism hardforks.
pub mod canyon;
pub mod isthmus;
// Re-export the decode_holocene_base_fee function for compatibility
pub use reth_optimism_chainspec::decode_holocene_base_fee;
use crate::proof::calculate_receipt_root_optimism;
use alloc::vec::Vec;
use alloy_consensus::{BlockHeader, TxReceipt, EMPTY_OMMER_ROOT_HASH};
use alloy_eips::Encodable2718;
use alloy_primitives::{Bloom, Bytes, B256};
use alloy_trie::EMPTY_ROOT_HASH;
use reth_consensus::ConsensusError;
use reth_optimism_forks::OpHardforks;
use reth_optimism_primitives::DepositReceipt;
use reth_primitives_traits::{receipt::gas_spent_by_transactions, BlockBody, GotExpected};
/// Ensures the block response data matches the header.
///
/// This ensures the body response items match the header's hashes:
/// - ommer hash
/// - transaction root
/// - withdrawals root: the body's withdrawals root must only match the header's before isthmus
pub fn validate_body_against_header_op<B, H>(
chain_spec: impl OpHardforks,
body: &B,
header: &H,
) -> Result<(), ConsensusError>
where
B: BlockBody,
H: reth_primitives_traits::BlockHeader,
{
let ommers_hash = body.calculate_ommers_root();
if Some(header.ommers_hash()) != ommers_hash {
return Err(ConsensusError::BodyOmmersHashDiff(
GotExpected {
got: ommers_hash.unwrap_or(EMPTY_OMMER_ROOT_HASH),
expected: header.ommers_hash(),
}
.into(),
))
}
let tx_root = body.calculate_tx_root();
if header.transactions_root() != tx_root {
return Err(ConsensusError::BodyTransactionRootDiff(
GotExpected { got: tx_root, expected: header.transactions_root() }.into(),
))
}
match (header.withdrawals_root(), body.calculate_withdrawals_root()) {
(Some(header_withdrawals_root), Some(withdrawals_root)) => {
// after isthmus, the withdrawals root field is repurposed and no longer mirrors the
// withdrawals root computed from the body
if chain_spec.is_isthmus_active_at_timestamp(header.timestamp()) {
// After isthmus we only ensure that the body has empty withdrawals
if withdrawals_root != EMPTY_ROOT_HASH {
return Err(ConsensusError::BodyWithdrawalsRootDiff(
GotExpected { got: withdrawals_root, expected: EMPTY_ROOT_HASH }.into(),
))
}
} else {
// before isthmus we ensure that the header root matches the body
if withdrawals_root != header_withdrawals_root {
return Err(ConsensusError::BodyWithdrawalsRootDiff(
GotExpected { got: withdrawals_root, expected: header_withdrawals_root }
.into(),
))
}
}
}
(None, None) => {
// this is ok because we assume the fork is not active in this case
}
_ => return Err(ConsensusError::WithdrawalsRootUnexpected),
}
Ok(())
}
/// Validate a block with regard to execution results:
///
/// - Compares the receipts root in the block header to the block body
/// - Compares the gas used in the block header to the actual gas usage after execution
pub fn validate_block_post_execution<R: DepositReceipt>(
header: impl BlockHeader,
chain_spec: impl OpHardforks,
receipts: &[R],
) -> Result<(), ConsensusError> {
// Before Byzantium, receipts contained state root that would mean that expensive
// operation as hashing that is required for state root got calculated in every
// transaction This was replaced with is_success flag.
// See more about EIP here: https://eips.ethereum.org/EIPS/eip-658
if chain_spec.is_byzantium_active_at_block(header.number()) {
if let Err(error) = verify_receipts_optimism(
header.receipts_root(),
header.logs_bloom(),
receipts,
chain_spec,
header.timestamp(),
) {
let receipts = receipts
.iter()
.map(|r| Bytes::from(r.with_bloom_ref().encoded_2718()))
.collect::<Vec<_>>();
tracing::debug!(%error, ?receipts, "receipts verification failed");
return Err(error)
}
}
// Check if gas used matches the value set in header.
let cumulative_gas_used =
receipts.last().map(|receipt| receipt.cumulative_gas_used()).unwrap_or(0);
if header.gas_used() != cumulative_gas_used {
return Err(ConsensusError::BlockGasUsed {
gas: GotExpected { got: cumulative_gas_used, expected: header.gas_used() },
gas_spent_by_tx: gas_spent_by_transactions(receipts),
})
}
Ok(())
}
/// Verify the calculated receipts root against the expected receipts root.
fn verify_receipts_optimism<R: DepositReceipt>(
expected_receipts_root: B256,
expected_logs_bloom: Bloom,
receipts: &[R],
chain_spec: impl OpHardforks,
timestamp: u64,
) -> Result<(), ConsensusError> {
// Calculate receipts root.
let receipts_with_bloom = receipts.iter().map(TxReceipt::with_bloom_ref).collect::<Vec<_>>();
let receipts_root =
calculate_receipt_root_optimism(&receipts_with_bloom, chain_spec, timestamp);
// Calculate header logs bloom.
let logs_bloom = receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | r.bloom_ref());
compare_receipts_root_and_logs_bloom(
receipts_root,
logs_bloom,
expected_receipts_root,
expected_logs_bloom,
)?;
Ok(())
}
/// Compare the calculated receipts root with the expected receipts root, also compare
/// the calculated logs bloom with the expected logs bloom.
fn compare_receipts_root_and_logs_bloom(
calculated_receipts_root: B256,
calculated_logs_bloom: Bloom,
expected_receipts_root: B256,
expected_logs_bloom: Bloom,
) -> Result<(), ConsensusError> {
if calculated_receipts_root != expected_receipts_root {
return Err(ConsensusError::BodyReceiptRootDiff(
GotExpected { got: calculated_receipts_root, expected: expected_receipts_root }.into(),
))
}
if calculated_logs_bloom != expected_logs_bloom {
return Err(ConsensusError::BodyBloomLogDiff(
GotExpected { got: calculated_logs_bloom, expected: expected_logs_bloom }.into(),
))
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_consensus::Header;
use alloy_primitives::{b256, hex, Bytes, U256};
use op_alloy_consensus::OpTxEnvelope;
use reth_chainspec::{BaseFeeParams, ChainSpec, EthChainSpec, ForkCondition, Hardfork};
use reth_optimism_chainspec::{OpChainSpec, BASE_SEPOLIA};
use reth_optimism_forks::{OpHardfork, BASE_SEPOLIA_HARDFORKS};
use std::sync::Arc;
fn holocene_chainspec() -> Arc<OpChainSpec> {
let mut hardforks = BASE_SEPOLIA_HARDFORKS.clone();
hardforks.insert(OpHardfork::Holocene.boxed(), ForkCondition::Timestamp(1800000000));
Arc::new(OpChainSpec {
inner: ChainSpec {
chain: BASE_SEPOLIA.inner.chain,
genesis: BASE_SEPOLIA.inner.genesis.clone(),
genesis_header: BASE_SEPOLIA.inner.genesis_header.clone(),
paris_block_and_final_difficulty: Some((0, U256::from(0))),
hardforks,
base_fee_params: BASE_SEPOLIA.inner.base_fee_params.clone(),
prune_delete_limit: 10000,
..Default::default()
},
})
}
fn isthmus_chainspec() -> OpChainSpec {
let mut chainspec = BASE_SEPOLIA.as_ref().clone();
chainspec
.inner
.hardforks
.insert(OpHardfork::Isthmus.boxed(), ForkCondition::Timestamp(1800000000));
chainspec
}
#[test]
fn test_get_base_fee_pre_holocene() {
let op_chain_spec = BASE_SEPOLIA.clone();
let parent = Header {
base_fee_per_gas: Some(1),
gas_used: 15763614,
gas_limit: 144000000,
..Default::default()
};
let base_fee =
reth_optimism_chainspec::OpChainSpec::next_block_base_fee(&op_chain_spec, &parent, 0);
assert_eq!(
base_fee.unwrap(),
op_chain_spec.next_block_base_fee(&parent, 0).unwrap_or_default()
);
}
#[test]
fn test_get_base_fee_holocene_extra_data_not_set() {
let op_chain_spec = holocene_chainspec();
let parent = Header {
base_fee_per_gas: Some(1),
gas_used: 15763614,
gas_limit: 144000000,
timestamp: 1800000003,
extra_data: Bytes::from_static(&[0, 0, 0, 0, 0, 0, 0, 0, 0]),
..Default::default()
};
let base_fee = reth_optimism_chainspec::OpChainSpec::next_block_base_fee(
&op_chain_spec,
&parent,
1800000005,
);
assert_eq!(
base_fee.unwrap(),
op_chain_spec.next_block_base_fee(&parent, 0).unwrap_or_default()
);
}
#[test]
fn test_get_base_fee_holocene_extra_data_set() {
let parent = Header {
base_fee_per_gas: Some(1),
gas_used: 15763614,
gas_limit: 144000000,
extra_data: Bytes::from_static(&[0, 0, 0, 0, 8, 0, 0, 0, 8]),
timestamp: 1800000003,
..Default::default()
};
let base_fee = reth_optimism_chainspec::OpChainSpec::next_block_base_fee(
&holocene_chainspec(),
&parent,
1800000005,
);
assert_eq!(
base_fee.unwrap(),
parent
.next_block_base_fee(BaseFeeParams::new(0x00000008, 0x00000008))
.unwrap_or_default()
);
}
// <https://sepolia.basescan.org/block/19773628>
#[test]
fn test_get_base_fee_holocene_extra_data_set_base_sepolia() {
let parent = Header {
base_fee_per_gas: Some(507),
gas_used: 4847634,
gas_limit: 60000000,
extra_data: hex!("00000000fa0000000a").into(),
timestamp: 1735315544,
..Default::default()
};
let base_fee = reth_optimism_chainspec::OpChainSpec::next_block_base_fee(
&*BASE_SEPOLIA,
&parent,
1735315546,
)
.unwrap();
assert_eq!(base_fee, 507);
}
#[test]
fn body_against_header_isthmus() {
let chainspec = isthmus_chainspec();
let header = Header {
base_fee_per_gas: Some(507),
gas_used: 4847634,
gas_limit: 60000000,
extra_data: hex!("00000000fa0000000a").into(),
timestamp: 1800000000,
withdrawals_root: Some(b256!(
"0x611e1d75cbb77fa782d79485a8384e853bc92e56883c313a51e3f9feef9a9a71"
)),
..Default::default()
};
let mut body = alloy_consensus::BlockBody::<OpTxEnvelope> {
transactions: vec![],
ommers: vec![],
withdrawals: Some(Default::default()),
};
validate_body_against_header_op(&chainspec, &body, &header).unwrap();
body.withdrawals.take();
validate_body_against_header_op(&chainspec, &body, &header).unwrap_err();
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/cli/src/app.rs | crates/optimism/cli/src/app.rs | use crate::{Cli, Commands};
use eyre::{eyre, Result};
use reth_cli::chainspec::ChainSpecParser;
use reth_cli_commands::launcher::Launcher;
use reth_cli_runner::CliRunner;
use reth_node_metrics::recorder::install_prometheus_recorder;
use reth_optimism_chainspec::OpChainSpec;
use reth_optimism_consensus::OpBeaconConsensus;
use reth_optimism_node::{OpExecutorProvider, OpNode};
use reth_tracing::{FileWorkerGuard, Layers};
use std::{fmt, sync::Arc};
use tracing::info;
/// A wrapper around a parsed CLI that handles command execution.
#[derive(Debug)]
pub struct CliApp<Spec: ChainSpecParser, Ext: clap::Args + fmt::Debug> {
cli: Cli<Spec, Ext>,
runner: Option<CliRunner>,
layers: Option<Layers>,
guard: Option<FileWorkerGuard>,
}
impl<C, Ext> CliApp<C, Ext>
where
C: ChainSpecParser<ChainSpec = OpChainSpec>,
Ext: clap::Args + fmt::Debug,
{
pub(crate) fn new(cli: Cli<C, Ext>) -> Self {
Self { cli, runner: None, layers: Some(Layers::new()), guard: None }
}
/// Sets the runner for the CLI commander.
///
/// This replaces any existing runner with the provided one.
pub fn set_runner(&mut self, runner: CliRunner) {
self.runner = Some(runner);
}
/// Access to tracing layers.
///
/// Returns a mutable reference to the tracing layers, or error
/// if tracing initialized and layers have detached already.
pub fn access_tracing_layers(&mut self) -> Result<&mut Layers> {
self.layers.as_mut().ok_or_else(|| eyre!("Tracing already initialized"))
}
/// Execute the configured cli command.
///
/// This accepts a closure that is used to launch the node via the
/// [`NodeCommand`](reth_cli_commands::node::NodeCommand).
pub fn run(mut self, launcher: impl Launcher<C, Ext>) -> Result<()> {
let runner = match self.runner.take() {
Some(runner) => runner,
None => CliRunner::try_default_runtime()?,
};
// add network name to logs dir
// Add network name if available to the logs dir
if let Some(chain_spec) = self.cli.command.chain_spec() {
self.cli.logs.log_file_directory =
self.cli.logs.log_file_directory.join(chain_spec.chain.to_string());
}
self.init_tracing()?;
// Install the prometheus recorder to be sure to record all metrics
let _ = install_prometheus_recorder();
let components = |spec: Arc<OpChainSpec>| {
(OpExecutorProvider::optimism(spec.clone()), OpBeaconConsensus::new(spec))
};
match self.cli.command {
Commands::Node(command) => {
runner.run_command_until_exit(|ctx| command.execute(ctx, launcher))
}
Commands::Init(command) => {
runner.run_blocking_until_ctrl_c(command.execute::<OpNode>())
}
Commands::InitState(command) => {
runner.run_blocking_until_ctrl_c(command.execute::<OpNode>())
}
Commands::ImportOp(command) => {
runner.run_blocking_until_ctrl_c(command.execute::<OpNode>())
}
Commands::ImportReceiptsOp(command) => {
runner.run_blocking_until_ctrl_c(command.execute::<OpNode>())
}
Commands::DumpGenesis(command) => runner.run_blocking_until_ctrl_c(command.execute()),
Commands::Db(command) => runner.run_blocking_until_ctrl_c(command.execute::<OpNode>()),
Commands::Stage(command) => {
runner.run_command_until_exit(|ctx| command.execute::<OpNode, _>(ctx, components))
}
Commands::P2P(command) => runner.run_until_ctrl_c(command.execute::<OpNode>()),
Commands::Config(command) => runner.run_until_ctrl_c(command.execute()),
Commands::Recover(command) => {
runner.run_command_until_exit(|ctx| command.execute::<OpNode>(ctx))
}
Commands::Prune(command) => runner.run_until_ctrl_c(command.execute::<OpNode>()),
#[cfg(feature = "dev")]
Commands::TestVectors(command) => runner.run_until_ctrl_c(command.execute()),
Commands::ReExecute(command) => {
runner.run_until_ctrl_c(command.execute::<OpNode>(components))
}
}
}
/// Initializes tracing with the configured options.
///
/// If file logging is enabled, this function stores guard to the struct.
pub fn init_tracing(&mut self) -> Result<()> {
if self.guard.is_none() {
let layers = self.layers.take().unwrap_or_default();
self.guard = self.cli.logs.init_tracing_with_layers(layers)?;
info!(target: "reth::cli", "Initialized tracing, debug log directory: {}", self.cli.logs.log_file_directory);
}
Ok(())
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/cli/src/lib.rs | crates/optimism/cli/src/lib.rs | //! OP-Reth CLI implementation.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
/// A configurable App on top of the cli parser.
pub mod app;
/// Optimism chain specification parser.
pub mod chainspec;
/// Optimism CLI commands.
pub mod commands;
/// Module with a codec for reading and encoding receipts in files.
///
/// Enables decoding and encoding `OpGethReceipt` type. See <https://github.com/testinprod-io/op-geth/pull/1>.
///
/// Currently configured to use codec [`OpGethReceipt`](receipt_file_codec::OpGethReceipt) based on
/// export of below Bedrock data using <https://github.com/testinprod-io/op-geth/pull/1>. Codec can
/// be replaced with regular encoding of receipts for export.
///
/// NOTE: receipts can be exported using regular op-geth encoding for `Receipt` type, to fit
/// reth's needs for importing. However, this would require patching the diff in <https://github.com/testinprod-io/op-geth/pull/1> to export the `Receipt` and not `OpGethReceipt` type (originally
/// made for op-erigon's import needs).
pub mod receipt_file_codec;
/// OVM block, same as EVM block at bedrock, except for signature of deposit transaction
/// not having a signature back then.
/// Enables decoding and encoding `Block` types within file contexts.
pub mod ovm_file_codec;
pub use app::CliApp;
pub use commands::{import::ImportOpCommand, import_receipts::ImportReceiptsOpCommand};
use reth_optimism_chainspec::OpChainSpec;
use std::{ffi::OsString, fmt, sync::Arc};
use chainspec::OpChainSpecParser;
use clap::{command, Parser};
use commands::Commands;
use futures_util::Future;
use reth_cli::chainspec::ChainSpecParser;
use reth_cli_commands::launcher::FnLauncher;
use reth_cli_runner::CliRunner;
use reth_db::DatabaseEnv;
use reth_node_builder::{NodeBuilder, WithLaunchContext};
use reth_node_core::{args::LogArgs, version::version_metadata};
use reth_optimism_node::args::RollupArgs;
// This allows us to manually enable node metrics features, required for proper jemalloc metric
// reporting
use reth_node_metrics as _;
/// The main op-reth cli interface.
///
/// This is the entrypoint to the executable.
#[derive(Debug, Parser)]
#[command(author, version = version_metadata().short_version.as_ref(), long_version = version_metadata().long_version.as_ref(), about = "Reth", long_about = None)]
pub struct Cli<Spec: ChainSpecParser = OpChainSpecParser, Ext: clap::Args + fmt::Debug = RollupArgs>
{
/// The command to run
#[command(subcommand)]
pub command: Commands<Spec, Ext>,
/// The logging configuration for the CLI.
#[command(flatten)]
pub logs: LogArgs,
}
impl Cli {
/// Parsers only the default CLI arguments
pub fn parse_args() -> Self {
Self::parse()
}
/// Parsers only the default CLI arguments from the given iterator
pub fn try_parse_args_from<I, T>(itr: I) -> Result<Self, clap::error::Error>
where
I: IntoIterator<Item = T>,
T: Into<OsString> + Clone,
{
Self::try_parse_from(itr)
}
}
impl<C, Ext> Cli<C, Ext>
where
C: ChainSpecParser<ChainSpec = OpChainSpec>,
Ext: clap::Args + fmt::Debug,
{
/// Configures the CLI and returns a [`CliApp`] instance.
///
/// This method is used to prepare the CLI for execution by wrapping it in a
/// [`CliApp`] that can be further configured before running.
pub fn configure(self) -> CliApp<C, Ext> {
CliApp::new(self)
}
/// Execute the configured cli command.
///
/// This accepts a closure that is used to launch the node via the
/// [`NodeCommand`](reth_cli_commands::node::NodeCommand).
pub fn run<L, Fut>(self, launcher: L) -> eyre::Result<()>
where
L: FnOnce(WithLaunchContext<NodeBuilder<Arc<DatabaseEnv>, C::ChainSpec>>, Ext) -> Fut,
Fut: Future<Output = eyre::Result<()>>,
{
self.with_runner(CliRunner::try_default_runtime()?, launcher)
}
/// Execute the configured cli command with the provided [`CliRunner`].
pub fn with_runner<L, Fut>(self, runner: CliRunner, launcher: L) -> eyre::Result<()>
where
L: FnOnce(WithLaunchContext<NodeBuilder<Arc<DatabaseEnv>, C::ChainSpec>>, Ext) -> Fut,
Fut: Future<Output = eyre::Result<()>>,
{
let mut this = self.configure();
this.set_runner(runner);
this.run(FnLauncher::new::<C, Ext>(async move |builder, chain_spec| {
launcher(builder, chain_spec).await
}))
}
}
#[cfg(test)]
mod test {
use crate::{chainspec::OpChainSpecParser, commands::Commands, Cli};
use clap::Parser;
use reth_cli_commands::{node::NoArgs, NodeCommand};
use reth_optimism_chainspec::{BASE_MAINNET, OP_DEV};
use reth_optimism_node::args::RollupArgs;
#[test]
fn parse_dev() {
let cmd = NodeCommand::<OpChainSpecParser, NoArgs>::parse_from(["op-reth", "--dev"]);
let chain = OP_DEV.clone();
assert_eq!(cmd.chain.chain, chain.chain);
assert_eq!(cmd.chain.genesis_hash(), chain.genesis_hash());
assert_eq!(
cmd.chain.paris_block_and_final_difficulty,
chain.paris_block_and_final_difficulty
);
assert_eq!(cmd.chain.hardforks, chain.hardforks);
assert!(cmd.rpc.http);
assert!(cmd.network.discovery.disable_discovery);
assert!(cmd.dev.dev);
}
#[test]
fn parse_node() {
let cmd = Cli::<OpChainSpecParser, RollupArgs>::parse_from([
"op-reth",
"node",
"--chain",
"base",
"--datadir",
"/mnt/datadirs/base",
"--instance",
"2",
"--http",
"--http.addr",
"0.0.0.0",
"--ws",
"--ws.addr",
"0.0.0.0",
"--http.api",
"admin,debug,eth,net,trace,txpool,web3,rpc,reth,ots",
"--rollup.sequencer-http",
"https://mainnet-sequencer.base.org",
"--rpc-max-tracing-requests",
"1000000",
"--rpc.gascap",
"18446744073709551615",
"--rpc.max-connections",
"429496729",
"--rpc.max-logs-per-response",
"0",
"--rpc.max-subscriptions-per-connection",
"10000",
"--metrics",
"9003",
"--log.file.max-size",
"100",
]);
match cmd.command {
Commands::Node(command) => {
assert_eq!(command.chain.as_ref(), BASE_MAINNET.as_ref());
}
_ => panic!("unexpected command"),
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/cli/src/ovm_file_codec.rs | crates/optimism/cli/src/ovm_file_codec.rs | use alloy_consensus::{
transaction::{from_eip155_value, RlpEcdsaDecodableTx, RlpEcdsaEncodableTx},
Header, TxEip1559, TxEip2930, TxEip7702, TxLegacy,
};
use alloy_eips::{
eip2718::{Decodable2718, Eip2718Error, Eip2718Result, Encodable2718},
eip4895::Withdrawals,
Typed2718,
};
use alloy_primitives::{
bytes::{Buf, BytesMut},
keccak256, Signature, TxHash, B256, U256,
};
use alloy_rlp::{Decodable, Error as RlpError, RlpDecodable};
use derive_more::{AsRef, Deref};
use op_alloy_consensus::{OpTxType, OpTypedTransaction, TxDeposit};
use reth_downloaders::file_client::FileClientError;
use serde::{Deserialize, Serialize};
use tokio_util::codec::Decoder;
#[expect(dead_code)]
/// Specific codec for reading raw block bodies from a file
/// with optimism-specific signature handling
pub(crate) struct OvmBlockFileCodec;
impl Decoder for OvmBlockFileCodec {
type Item = OvmBlock;
type Error = FileClientError;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
if src.is_empty() {
return Ok(None);
}
let buf_slice = &mut src.as_ref();
let body =
OvmBlock::decode(buf_slice).map_err(|err| FileClientError::Rlp(err, src.to_vec()))?;
src.advance(src.len() - buf_slice.len());
Ok(Some(body))
}
}
/// OVM block, same as EVM block but with different transaction signature handling
/// Pre-bedrock system transactions on Optimism were sent from the zero address
/// with an empty signature,
#[derive(Debug, Clone, PartialEq, Eq, RlpDecodable)]
pub struct OvmBlock {
/// Block header
pub header: Header,
/// Block body
pub body: OvmBlockBody,
}
impl OvmBlock {
/// Decodes a `Block` from the given byte slice.
pub fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
let header = Header::decode(buf)?;
let body = OvmBlockBody::decode(buf)?;
Ok(Self { header, body })
}
}
/// The body of a block for OVM
#[derive(Debug, Clone, PartialEq, Eq, Default, RlpDecodable)]
#[rlp(trailing)]
pub struct OvmBlockBody {
/// Transactions in the block
pub transactions: Vec<OvmTransactionSigned>,
/// Uncle headers for the given block
pub ommers: Vec<Header>,
/// Withdrawals in the block.
pub withdrawals: Option<Withdrawals>,
}
/// Signed transaction pre bedrock.
#[derive(Debug, Clone, PartialEq, Eq, Hash, AsRef, Deref, Serialize, Deserialize)]
pub struct OvmTransactionSigned {
/// Transaction hash
pub hash: TxHash,
/// The transaction signature values
pub signature: Signature,
/// Raw transaction info
#[deref]
#[as_ref]
pub transaction: OpTypedTransaction,
}
impl AsRef<Self> for OvmTransactionSigned {
fn as_ref(&self) -> &Self {
self
}
}
impl OvmTransactionSigned {
/// Calculate transaction hash, eip2728 transaction does not contain rlp header and start with
/// tx type.
pub fn recalculate_hash(&self) -> B256 {
keccak256(self.encoded_2718())
}
/// Create a new signed transaction from a transaction and its signature.
///
/// This will also calculate the transaction hash using its encoding.
pub fn from_transaction_and_signature(
transaction: OpTypedTransaction,
signature: Signature,
) -> Self {
let mut initial_tx = Self { transaction, hash: Default::default(), signature };
initial_tx.hash = initial_tx.recalculate_hash();
initial_tx
}
/// Decodes legacy transaction from the data buffer into a tuple.
///
/// This expects `rlp(legacy_tx)`
///
/// Refer to the docs for [`Self::decode_rlp_legacy_transaction`] for details on the exact
/// format expected.
pub(crate) fn decode_rlp_legacy_transaction_tuple(
data: &mut &[u8],
) -> alloy_rlp::Result<(TxLegacy, TxHash, Signature)> {
let original_encoding = *data;
let header = alloy_rlp::Header::decode(data)?;
let remaining_len = data.len();
let transaction_payload_len = header.payload_length;
if transaction_payload_len > remaining_len {
return Err(RlpError::InputTooShort);
}
let mut transaction = TxLegacy {
nonce: Decodable::decode(data)?,
gas_price: Decodable::decode(data)?,
gas_limit: Decodable::decode(data)?,
to: Decodable::decode(data)?,
value: Decodable::decode(data)?,
input: Decodable::decode(data)?,
chain_id: None,
};
let v = Decodable::decode(data)?;
let r: U256 = Decodable::decode(data)?;
let s: U256 = Decodable::decode(data)?;
let tx_length = header.payload_length + header.length();
let hash = keccak256(&original_encoding[..tx_length]);
// Handle both pre-bedrock and regular cases
let (signature, chain_id) = if v == 0 && r.is_zero() && s.is_zero() {
// Pre-bedrock system transactions case
(Signature::new(r, s, false), None)
} else {
// Regular transaction case
let (parity, chain_id) = from_eip155_value(v)
.ok_or(alloy_rlp::Error::Custom("invalid parity for legacy transaction"))?;
(Signature::new(r, s, parity), chain_id)
};
// Set chain ID and verify length
transaction.chain_id = chain_id;
let decoded = remaining_len - data.len();
if decoded != transaction_payload_len {
return Err(RlpError::UnexpectedLength);
}
Ok((transaction, hash, signature))
}
/// Decodes legacy transaction from the data buffer.
///
/// This should be used _only_ be used in general transaction decoding methods, which have
/// already ensured that the input is a legacy transaction with the following format:
/// `rlp(legacy_tx)`
///
/// Legacy transactions are encoded as lists, so the input should start with a RLP list header.
///
/// This expects `rlp(legacy_tx)`
// TODO: make buf advancement semantics consistent with `decode_enveloped_typed_transaction`,
// so decoding methods do not need to manually advance the buffer
pub fn decode_rlp_legacy_transaction(data: &mut &[u8]) -> alloy_rlp::Result<Self> {
let (transaction, hash, signature) = Self::decode_rlp_legacy_transaction_tuple(data)?;
let signed = Self { transaction: OpTypedTransaction::Legacy(transaction), hash, signature };
Ok(signed)
}
}
impl Decodable for OvmTransactionSigned {
/// This `Decodable` implementation only supports decoding rlp encoded transactions as it's used
/// by p2p.
///
/// The p2p encoding format always includes an RLP header, although the type RLP header depends
/// on whether or not the transaction is a legacy transaction.
///
/// If the transaction is a legacy transaction, it is just encoded as a RLP list:
/// `rlp(tx-data)`.
///
/// If the transaction is a typed transaction, it is encoded as a RLP string:
/// `rlp(tx-type || rlp(tx-data))`
///
/// This can be used for decoding all signed transactions in p2p `BlockBodies` responses.
///
/// This cannot be used for decoding EIP-4844 transactions in p2p `PooledTransactions`, since
/// the EIP-4844 variant of [`OvmTransactionSigned`] does not include the blob sidecar.
///
/// For a method suitable for decoding pooled transactions, see \[`PooledTransaction`\].
///
/// CAUTION: Due to a quirk in [`Header::decode`], this method will succeed even if a typed
/// transaction is encoded in this format, and does not start with a RLP header:
/// `tx-type || rlp(tx-data)`.
///
/// This is because [`Header::decode`] does not advance the buffer, and returns a length-1
/// string header if the first byte is less than `0xf7`.
fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
Self::network_decode(buf).map_err(Into::into)
}
}
impl Typed2718 for OvmTransactionSigned {
fn ty(&self) -> u8 {
self.transaction.tx_type() as u8
}
}
impl Encodable2718 for OvmTransactionSigned {
fn type_flag(&self) -> Option<u8> {
match self.transaction.tx_type() {
OpTxType::Legacy => None,
tx_type => Some(tx_type as u8),
}
}
fn encode_2718_len(&self) -> usize {
match &self.transaction {
OpTypedTransaction::Legacy(legacy_tx) => {
legacy_tx.eip2718_encoded_length(&self.signature)
}
OpTypedTransaction::Eip2930(access_list_tx) => {
access_list_tx.eip2718_encoded_length(&self.signature)
}
OpTypedTransaction::Eip1559(dynamic_fee_tx) => {
dynamic_fee_tx.eip2718_encoded_length(&self.signature)
}
OpTypedTransaction::Eip7702(set_code_tx) => {
set_code_tx.eip2718_encoded_length(&self.signature)
}
OpTypedTransaction::Deposit(deposit_tx) => deposit_tx.eip2718_encoded_length(),
}
}
fn encode_2718(&self, out: &mut dyn alloy_rlp::BufMut) {
self.transaction.eip2718_encode(&self.signature, out)
}
}
impl Decodable2718 for OvmTransactionSigned {
fn typed_decode(ty: u8, buf: &mut &[u8]) -> Eip2718Result<Self> {
match ty.try_into().map_err(|_| Eip2718Error::UnexpectedType(ty))? {
OpTxType::Legacy => Err(Eip2718Error::UnexpectedType(0)),
OpTxType::Eip2930 => {
let (tx, signature, hash) = TxEip2930::rlp_decode_signed(buf)?.into_parts();
Ok(Self { transaction: OpTypedTransaction::Eip2930(tx), signature, hash })
}
OpTxType::Eip1559 => {
let (tx, signature, hash) = TxEip1559::rlp_decode_signed(buf)?.into_parts();
Ok(Self { transaction: OpTypedTransaction::Eip1559(tx), signature, hash })
}
OpTxType::Eip7702 => {
let (tx, signature, hash) = TxEip7702::rlp_decode_signed(buf)?.into_parts();
Ok(Self { transaction: OpTypedTransaction::Eip7702(tx), signature, hash })
}
OpTxType::Deposit => Ok(Self::from_transaction_and_signature(
OpTypedTransaction::Deposit(TxDeposit::rlp_decode(buf)?),
TxDeposit::signature(),
)),
}
}
fn fallback_decode(buf: &mut &[u8]) -> Eip2718Result<Self> {
Ok(Self::decode_rlp_legacy_transaction(buf)?)
}
}
#[cfg(test)]
mod tests {
use crate::ovm_file_codec::OvmTransactionSigned;
use alloy_consensus::Typed2718;
use alloy_primitives::{address, b256, hex, TxKind, U256};
use op_alloy_consensus::OpTypedTransaction;
const DEPOSIT_FUNCTION_SELECTOR: [u8; 4] = [0xb6, 0xb5, 0x5f, 0x25];
use alloy_rlp::Decodable;
#[test]
fn test_decode_legacy_transactions() {
// Test Case 1: contract deposit - regular L2 transaction calling deposit() function
// tx: https://optimistic.etherscan.io/getRawTx?tx=0x7860252963a2df21113344f323035ef59648638a571eef742e33d789602c7a1c
let deposit_tx_bytes = hex!(
"f88881f0830f481c830c6e4594a75127121d28a9bf848f3b70e7eea26570aa770080a4b6b55f2500000000000000000000000000000000000000000000000000000000000710b238a0d5c622d92ddf37f9c18a3465a572f74d8b1aeaf50c1cfb10b3833242781fd45fa02c4f1d5819bf8b70bf651e7a063b7db63c55bd336799c6ae3e5bc72ad6ef3def"
);
let deposit_decoded = OvmTransactionSigned::decode(&mut &deposit_tx_bytes[..]).unwrap();
// Verify deposit transaction
let deposit_tx = match &deposit_decoded.transaction {
OpTypedTransaction::Legacy(ref tx) => tx,
_ => panic!("Expected legacy transaction for NFT deposit"),
};
assert_eq!(
deposit_tx.to,
TxKind::Call(address!("0xa75127121d28a9bf848f3b70e7eea26570aa7700"))
);
assert_eq!(deposit_tx.nonce, 240);
assert_eq!(deposit_tx.gas_price, 1001500);
assert_eq!(deposit_tx.gas_limit, 814661);
assert_eq!(deposit_tx.value, U256::ZERO);
assert_eq!(&deposit_tx.input.as_ref()[0..4], DEPOSIT_FUNCTION_SELECTOR);
assert_eq!(deposit_tx.chain_id, Some(10));
assert_eq!(
deposit_decoded.signature.r(),
U256::from_str_radix(
"d5c622d92ddf37f9c18a3465a572f74d8b1aeaf50c1cfb10b3833242781fd45f",
16
)
.unwrap()
);
assert_eq!(
deposit_decoded.signature.s(),
U256::from_str_radix(
"2c4f1d5819bf8b70bf651e7a063b7db63c55bd336799c6ae3e5bc72ad6ef3def",
16
)
.unwrap()
);
// Test Case 2: pre-bedrock system transaction from block 105235052
// tx: https://optimistic.etherscan.io/getRawTx?tx=0xe20b11349681dd049f8df32f5cdbb4c68d46b537685defcd86c7fa42cfe75b9e
let system_tx_bytes = hex!(
"f9026c830d899383124f808302a77e94a0cc33dd6f4819d473226257792afe230ec3c67f80b902046c459a280000000000000000000000004d73adb72bc3dd368966edd0f0b2148401a178e2000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000647fac7f00000000000000000000000000000000000000000000000000000000000001400000000000000000000000000000000000000000000000000000000000000084704316e5000000000000000000000000000000000000000000000000000000000000006e10975631049de3c008989b0d8c19fc720dc556ca01abfbd794c6eb5075dd000d000000000000000000000000000000000000000000000000000000000000001410975631049de3c008989b0d8c19fc720dc556ca01abfbd794c6eb5075dd000d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082a39325251d44e11f3b6d92f9382438eb6c8b5068d4a488d4f177b26f2ca20db34ae53467322852afcc779f25eafd124c5586f54b9026497ba934403d4c578e3c1b5aa754c918ee2ecd25402df656c2419717e4017a7aecb84af3914fd3c7bf6930369c4e6ff76950246b98e354821775f02d33cdbee5ef6aed06c15b75691692d31c00000000000000000000000000000000000000000000000000000000000038a0e8991e95e66d809f4b6fb0af27c31368ca0f30e657165c428aa681ec5ea25bbea013ed325bd97365087ec713e9817d252b59113ea18430b71a5890c4eeb6b9efc4"
);
let system_decoded = OvmTransactionSigned::decode(&mut &system_tx_bytes[..]).unwrap();
// Verify system transaction
assert!(system_decoded.is_legacy());
let system_tx = match &system_decoded.transaction {
OpTypedTransaction::Legacy(ref tx) => tx,
_ => panic!("Expected Legacy transaction"),
};
assert_eq!(system_tx.nonce, 887187);
assert_eq!(system_tx.gas_price, 1200000);
assert_eq!(system_tx.gas_limit, 173950);
assert_eq!(
system_tx.to,
TxKind::Call(address!("0xa0cc33dd6f4819d473226257792afe230ec3c67f"))
);
assert_eq!(system_tx.value, U256::ZERO);
assert_eq!(system_tx.chain_id, Some(10));
assert_eq!(
system_decoded.signature.r(),
U256::from_str_radix(
"e8991e95e66d809f4b6fb0af27c31368ca0f30e657165c428aa681ec5ea25bbe",
16
)
.unwrap()
);
assert_eq!(
system_decoded.signature.s(),
U256::from_str_radix(
"13ed325bd97365087ec713e9817d252b59113ea18430b71a5890c4eeb6b9efc4",
16
)
.unwrap()
);
assert_eq!(
system_decoded.hash,
b256!("0xe20b11349681dd049f8df32f5cdbb4c68d46b537685defcd86c7fa42cfe75b9e")
);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/cli/src/receipt_file_codec.rs | crates/optimism/cli/src/receipt_file_codec.rs | //! Codec for reading raw receipts from a file.
use alloy_consensus::Receipt;
use alloy_primitives::{
bytes::{Buf, BytesMut},
Address, Bloom, Bytes, Log, B256,
};
use alloy_rlp::{Decodable, RlpDecodable};
use op_alloy_consensus::{OpDepositReceipt, OpTxType};
use reth_optimism_primitives::OpReceipt;
use tokio_util::codec::Decoder;
use reth_downloaders::{file_client::FileClientError, receipt_file_client::ReceiptWithBlockNumber};
/// Codec for reading raw receipts from a file.
///
/// If using with [`FramedRead`](tokio_util::codec::FramedRead), the user should make sure the
/// framed reader has capacity for the entire receipts file. Otherwise, the decoder will return
/// [`InputTooShort`](alloy_rlp::Error::InputTooShort), because RLP receipts can only be
/// decoded if the internal buffer is large enough to contain the entire receipt.
///
/// Without ensuring the framed reader has capacity for the entire file, a receipt is likely to
/// fall across two read buffers, the decoder will not be able to decode the receipt, which will
/// cause it to fail.
///
/// It's recommended to use [`with_capacity`](tokio_util::codec::FramedRead::with_capacity) to set
/// the capacity of the framed reader to the size of the file.
#[derive(Debug)]
pub struct OpGethReceiptFileCodec<R = Receipt>(core::marker::PhantomData<R>);
impl<R> Default for OpGethReceiptFileCodec<R> {
fn default() -> Self {
Self(Default::default())
}
}
impl<R> Decoder for OpGethReceiptFileCodec<R>
where
R: TryFrom<OpGethReceipt, Error: Into<FileClientError>>,
{
type Item = Option<ReceiptWithBlockNumber<R>>;
type Error = FileClientError;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
if src.is_empty() {
return Ok(None)
}
let buf_slice = &mut src.as_ref();
let receipt = OpGethReceiptContainer::decode(buf_slice)
.map_err(|err| Self::Error::Rlp(err, src.to_vec()))?
.0;
src.advance(src.len() - buf_slice.len());
Ok(Some(
receipt
.map(|receipt| {
let number = receipt.block_number;
receipt
.try_into()
.map_err(Into::into)
.map(|receipt| ReceiptWithBlockNumber { receipt, number })
})
.transpose()?,
))
}
}
/// See <https://github.com/testinprod-io/op-geth/pull/1>
#[derive(Debug, PartialEq, Eq, RlpDecodable)]
pub struct OpGethReceipt {
tx_type: u8,
post_state: Bytes,
status: u64,
cumulative_gas_used: u64,
bloom: Bloom,
/// <https://github.com/testinprod-io/op-geth/blob/29062eb0fac595eeeddd3a182a25326405c66e05/core/types/log.go#L67-L72>
logs: Vec<Log>,
tx_hash: B256,
contract_address: Address,
gas_used: u64,
block_hash: B256,
block_number: u64,
transaction_index: u32,
l1_gas_price: u64,
l1_gas_used: u64,
l1_fee: u64,
fee_scalar: String,
}
#[derive(Debug, PartialEq, Eq, RlpDecodable)]
#[rlp(trailing)]
struct OpGethReceiptContainer(Option<OpGethReceipt>);
impl TryFrom<OpGethReceipt> for OpReceipt {
type Error = FileClientError;
fn try_from(exported_receipt: OpGethReceipt) -> Result<Self, Self::Error> {
let OpGethReceipt { tx_type, status, cumulative_gas_used, logs, .. } = exported_receipt;
let tx_type = OpTxType::try_from(tx_type.to_be_bytes()[0])
.map_err(|e| FileClientError::Rlp(e.into(), vec![tx_type]))?;
let receipt =
alloy_consensus::Receipt { status: (status != 0).into(), cumulative_gas_used, logs };
match tx_type {
OpTxType::Legacy => Ok(Self::Legacy(receipt)),
OpTxType::Eip2930 => Ok(Self::Eip2930(receipt)),
OpTxType::Eip1559 => Ok(Self::Eip1559(receipt)),
OpTxType::Eip7702 => Ok(Self::Eip7702(receipt)),
OpTxType::Deposit => Ok(Self::Deposit(OpDepositReceipt {
inner: receipt,
deposit_nonce: None,
deposit_receipt_version: None,
})),
}
}
}
#[cfg(test)]
pub(crate) mod test {
use alloy_consensus::{Receipt, TxReceipt};
use alloy_primitives::{address, b256, hex, LogData};
use super::*;
pub(crate) const HACK_RECEIPT_ENCODED_BLOCK_1: &[u8] = &hex!(
"f9030ff9030c8080018303183db9010000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000400000000000100000000000000200000000002000000000000001000000000000000000004000000000000000000000000000040000400000100400000000000000100000000000000000000000000000020000000000000000000000000000000000000000000000001000000000000000000000100000000000000000000000000000000000000000000000000000000000000088000000080000000000010000000000000000000000000000800008000120000000000000000000000000000000002000f90197f89b948ce8c13d816fe6daf12d6fd9e4952e1fc88850aff863a00109fc6f55cf40689f02fbaad7af7fe7bbac8a3d2186600afc7d3e10cac60271a00000000000000000000000000000000000000000000000000000000000014218a000000000000000000000000070b17c0fe982ab4a7ac17a4c25485643151a1f2da000000000000000000000000000000000000000000000000000000000618d8837f89c948ce8c13d816fe6daf12d6fd9e4952e1fc88850aff884a092e98423f8adac6e64d0608e519fd1cefb861498385c6dee70d58fc926ddc68ca000000000000000000000000000000000000000000000000000000000d0e3ebf0a00000000000000000000000000000000000000000000000000000000000014218a000000000000000000000000070b17c0fe982ab4a7ac17a4c25485643151a1f2d80f85a948ce8c13d816fe6daf12d6fd9e4952e1fc88850aff842a0fe25c73e3b9089fac37d55c4c7efcba6f04af04cebd2fc4d6d7dbb07e1e5234fa000000000000000000000000000000000000000000000007edc6ca0bb6834800080a05e77a04531c7c107af1882d76cbff9486d0a9aa53701c30888509d4f5f2b003a9400000000000000000000000000000000000000008303183da0bee7192e575af30420cae0c7776304ac196077ee72b048970549e4f08e8754530180018212c2821c2383312e35"
);
pub(crate) const HACK_RECEIPT_ENCODED_BLOCK_2: &[u8] = &hex!(
"f90271f9026e8080018301c60db9010000080000000200000000000000000008000000000000000000000100008000000000000000000000000000000000000000000000000000000000400000000000100000000000000000000000020000000000000000000000000000000000004000000000000000000000000000000000400000000400000000000000100000000000000000000000000000020000000000000000000000000000000000000000100000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000008400000000000000000010000000000000000020000000020000000000000000000000000000000000000000000002000f8faf89c948ce8c13d816fe6daf12d6fd9e4952e1fc88850aff884a092e98423f8adac6e64d0608e519fd1cefb861498385c6dee70d58fc926ddc68ca000000000000000000000000000000000000000000000000000000000d0ea0e40a00000000000000000000000000000000000000000000000000000000000014218a0000000000000000000000000e5e7492282fd1e3bfac337a0beccd29b15b7b24080f85a948ce8c13d816fe6daf12d6fd9e4952e1fc88850aff842a0fe25c73e3b9089fac37d55c4c7efcba6f04af04cebd2fc4d6d7dbb07e1e5234fa000000000000000000000000000000000000000000000007eda7867e0c7d4800080a0af6ed8a6864d44989adc47c84f6fe0aeb1819817505c42cde6cbbcd5e14dd3179400000000000000000000000000000000000000008301c60da045fd6ce41bb8ebb2bccdaa92dd1619e287704cb07722039901a7eba63dea1d130280018212c2821c2383312e35"
);
pub(crate) const HACK_RECEIPT_ENCODED_BLOCK_3: &[u8] = &hex!(
"f90271f9026e8080018301c60db9010000000000000000000000000000000000000000400000000000000000008000000000000000000000000000000000004000000000000000000000400004000000100000000000000000000000000000000000000000000000000000000000004000000000000000000000040000000000400080000400000000000000100000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000008100000000000000000000000000000000000004000000000000000000000000008000000000000000000010000000000000000000000000000400000000000000001000000000000000000000000002000f8faf89c948ce8c13d816fe6daf12d6fd9e4952e1fc88850aff884a092e98423f8adac6e64d0608e519fd1cefb861498385c6dee70d58fc926ddc68ca000000000000000000000000000000000000000000000000000000000d101e54ba00000000000000000000000000000000000000000000000000000000000014218a0000000000000000000000000fa011d8d6c26f13abe2cefed38226e401b2b8a9980f85a948ce8c13d816fe6daf12d6fd9e4952e1fc88850aff842a0fe25c73e3b9089fac37d55c4c7efcba6f04af04cebd2fc4d6d7dbb07e1e5234fa000000000000000000000000000000000000000000000007ed8842f062774800080a08fab01dcec1da547e90a77597999e9153ff788fa6451d1cc942064427bd995019400000000000000000000000000000000000000008301c60da0da4509fe0ca03202ddbe4f68692c132d689ee098433691040ece18c3a45d44c50380018212c2821c2383312e35"
);
fn hack_receipt_1() -> OpGethReceipt {
let receipt = receipt_block_1();
OpGethReceipt {
tx_type: receipt.receipt.tx_type() as u8,
post_state: Bytes::default(),
status: receipt.receipt.status() as u64,
cumulative_gas_used: receipt.receipt.cumulative_gas_used(),
bloom: Bloom::from(hex!(
"00000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000400000000000100000000000000200000000002000000000000001000000000000000000004000000000000000000000000000040000400000100400000000000000100000000000000000000000000000020000000000000000000000000000000000000000000000001000000000000000000000100000000000000000000000000000000000000000000000000000000000000088000000080000000000010000000000000000000000000000800008000120000000000000000000000000000000002000"
)),
logs: receipt.receipt.into_logs(),
tx_hash: b256!("0x5e77a04531c7c107af1882d76cbff9486d0a9aa53701c30888509d4f5f2b003a"), contract_address: Address::ZERO, gas_used: 202813,
block_hash: b256!("0xbee7192e575af30420cae0c7776304ac196077ee72b048970549e4f08e875453"),
block_number: receipt.number,
transaction_index: 0,
l1_gas_price: 1,
l1_gas_used: 4802,
l1_fee: 7203,
fee_scalar: String::from("1.5"),
}
}
pub(crate) fn receipt_block_1() -> ReceiptWithBlockNumber<OpReceipt> {
let log_1 = Log {
address: address!("0x8ce8c13d816fe6daf12d6fd9e4952e1fc88850af"),
data: LogData::new(
vec![
b256!("0x0109fc6f55cf40689f02fbaad7af7fe7bbac8a3d2186600afc7d3e10cac60271"),
b256!("0x0000000000000000000000000000000000000000000000000000000000014218"),
b256!("0x00000000000000000000000070b17c0fe982ab4a7ac17a4c25485643151a1f2d"),
],
Bytes::from(hex!(
"00000000000000000000000000000000000000000000000000000000618d8837"
)),
)
.unwrap(),
};
let log_2 = Log {
address: address!("0x8ce8c13d816fe6daf12d6fd9e4952e1fc88850af"),
data: LogData::new(
vec![
b256!("0x92e98423f8adac6e64d0608e519fd1cefb861498385c6dee70d58fc926ddc68c"),
b256!("0x00000000000000000000000000000000000000000000000000000000d0e3ebf0"),
b256!("0x0000000000000000000000000000000000000000000000000000000000014218"),
b256!("0x00000000000000000000000070b17c0fe982ab4a7ac17a4c25485643151a1f2d"),
],
Bytes::default(),
)
.unwrap(),
};
let log_3 = Log {
address: address!("0x8ce8c13d816fe6daf12d6fd9e4952e1fc88850af"),
data: LogData::new(
vec![
b256!("0xfe25c73e3b9089fac37d55c4c7efcba6f04af04cebd2fc4d6d7dbb07e1e5234f"),
b256!("0x00000000000000000000000000000000000000000000007edc6ca0bb68348000"),
],
Bytes::default(),
)
.unwrap(),
};
let receipt = OpReceipt::Legacy(Receipt {
status: true.into(),
cumulative_gas_used: 202813,
logs: vec![log_1, log_2, log_3],
});
ReceiptWithBlockNumber { receipt, number: 1 }
}
pub(crate) fn receipt_block_2() -> ReceiptWithBlockNumber<OpReceipt> {
let log_1 = Log {
address: address!("0x8ce8c13d816fe6daf12d6fd9e4952e1fc88850af"),
data: LogData::new(
vec![
b256!("0x92e98423f8adac6e64d0608e519fd1cefb861498385c6dee70d58fc926ddc68c"),
b256!("0x00000000000000000000000000000000000000000000000000000000d0ea0e40"),
b256!("0x0000000000000000000000000000000000000000000000000000000000014218"),
b256!("0x000000000000000000000000e5e7492282fd1e3bfac337a0beccd29b15b7b240"),
],
Bytes::default(),
)
.unwrap(),
};
let log_2 = Log {
address: address!("0x8ce8c13d816fe6daf12d6fd9e4952e1fc88850af"),
data: LogData::new(
vec![
b256!("0xfe25c73e3b9089fac37d55c4c7efcba6f04af04cebd2fc4d6d7dbb07e1e5234f"),
b256!("0x00000000000000000000000000000000000000000000007eda7867e0c7d48000"),
],
Bytes::default(),
)
.unwrap(),
};
let receipt = OpReceipt::Legacy(Receipt {
status: true.into(),
cumulative_gas_used: 116237,
logs: vec![log_1, log_2],
});
ReceiptWithBlockNumber { receipt, number: 2 }
}
pub(crate) fn receipt_block_3() -> ReceiptWithBlockNumber<OpReceipt> {
let log_1 = Log {
address: address!("0x8ce8c13d816fe6daf12d6fd9e4952e1fc88850af"),
data: LogData::new(
vec![
b256!("0x92e98423f8adac6e64d0608e519fd1cefb861498385c6dee70d58fc926ddc68c"),
b256!("0x00000000000000000000000000000000000000000000000000000000d101e54b"),
b256!("0x0000000000000000000000000000000000000000000000000000000000014218"),
b256!("0x000000000000000000000000fa011d8d6c26f13abe2cefed38226e401b2b8a99"),
],
Bytes::default(),
)
.unwrap(),
};
let log_2 = Log {
address: address!("0x8ce8c13d816fe6daf12d6fd9e4952e1fc88850af"),
data: LogData::new(
vec![
b256!("0xfe25c73e3b9089fac37d55c4c7efcba6f04af04cebd2fc4d6d7dbb07e1e5234f"),
b256!("0x00000000000000000000000000000000000000000000007ed8842f0627748000"),
],
Bytes::default(),
)
.unwrap(),
};
let receipt = OpReceipt::Legacy(Receipt {
status: true.into(),
cumulative_gas_used: 116237,
logs: vec![log_1, log_2],
});
ReceiptWithBlockNumber { receipt, number: 3 }
}
#[test]
fn decode_hack_receipt() {
let receipt = hack_receipt_1();
let decoded = OpGethReceiptContainer::decode(&mut &HACK_RECEIPT_ENCODED_BLOCK_1[..])
.unwrap()
.0
.unwrap();
assert_eq!(receipt, decoded);
}
#[test]
fn receipts_codec() {
// rig
let mut receipt_1_to_3 = HACK_RECEIPT_ENCODED_BLOCK_1.to_vec();
receipt_1_to_3.extend_from_slice(HACK_RECEIPT_ENCODED_BLOCK_2);
receipt_1_to_3.extend_from_slice(HACK_RECEIPT_ENCODED_BLOCK_3);
let encoded = &mut BytesMut::from(&receipt_1_to_3[..]);
let mut codec = OpGethReceiptFileCodec::default();
// test
let first_decoded_receipt = codec.decode(encoded).unwrap().unwrap().unwrap();
assert_eq!(receipt_block_1(), first_decoded_receipt);
let second_decoded_receipt = codec.decode(encoded).unwrap().unwrap().unwrap();
assert_eq!(receipt_block_2(), second_decoded_receipt);
let third_decoded_receipt = codec.decode(encoded).unwrap().unwrap().unwrap();
assert_eq!(receipt_block_3(), third_decoded_receipt);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/cli/src/chainspec.rs | crates/optimism/cli/src/chainspec.rs | use reth_cli::chainspec::{parse_genesis, ChainSpecParser};
use reth_optimism_chainspec::{generated_chain_value_parser, OpChainSpec, SUPPORTED_CHAINS};
use std::sync::Arc;
/// Optimism chain specification parser.
#[derive(Debug, Clone, Default)]
#[non_exhaustive]
pub struct OpChainSpecParser;
impl ChainSpecParser for OpChainSpecParser {
type ChainSpec = OpChainSpec;
const SUPPORTED_CHAINS: &'static [&'static str] = SUPPORTED_CHAINS;
fn parse(s: &str) -> eyre::Result<Arc<Self::ChainSpec>> {
chain_value_parser(s)
}
}
/// Clap value parser for [`OpChainSpec`]s.
///
/// The value parser matches either a known chain, the path
/// to a json file, or a json formatted string in-memory. The json needs to be a Genesis struct.
pub fn chain_value_parser(s: &str) -> eyre::Result<Arc<OpChainSpec>, eyre::Error> {
if let Some(op_chain_spec) = generated_chain_value_parser(s) {
Ok(op_chain_spec)
} else {
Ok(Arc::new(parse_genesis(s)?.into()))
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn parse_known_chain_spec() {
for &chain in OpChainSpecParser::SUPPORTED_CHAINS {
assert!(
<OpChainSpecParser as ChainSpecParser>::parse(chain).is_ok(),
"Failed to parse {chain}"
);
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/cli/src/commands/init_state.rs | crates/optimism/cli/src/commands/init_state.rs | //! Command that initializes the node from a genesis file.
use alloy_consensus::Header;
use clap::Parser;
use reth_cli::chainspec::ChainSpecParser;
use reth_cli_commands::common::{AccessRights, CliHeader, CliNodeTypes, Environment};
use reth_db_common::init::init_from_state_dump;
use reth_optimism_chainspec::OpChainSpec;
use reth_optimism_primitives::{
bedrock::{BEDROCK_HEADER, BEDROCK_HEADER_HASH, BEDROCK_HEADER_TTD},
OpPrimitives,
};
use reth_primitives_traits::SealedHeader;
use reth_provider::{
BlockNumReader, ChainSpecProvider, DatabaseProviderFactory, StaticFileProviderFactory,
StaticFileWriter,
};
use std::{io::BufReader, sync::Arc};
use tracing::info;
/// Initializes the database with the genesis block.
#[derive(Debug, Parser)]
pub struct InitStateCommandOp<C: ChainSpecParser> {
#[command(flatten)]
init_state: reth_cli_commands::init_state::InitStateCommand<C>,
/// **Optimism Mainnet Only**
///
/// Specifies whether to initialize the state without relying on OVM historical data.
///
/// When enabled, and before inserting the state, it creates a dummy chain up to the last OVM
/// block (#105235062) (14GB / 90 seconds). It then, appends the Bedrock block.
///
/// - **Note**: **Do not** import receipts and blocks beforehand, or this will fail or be
/// ignored.
#[arg(long, default_value = "false")]
without_ovm: bool,
}
impl<C: ChainSpecParser<ChainSpec = OpChainSpec>> InitStateCommandOp<C> {
/// Execute the `init` command
pub async fn execute<N: CliNodeTypes<ChainSpec = C::ChainSpec, Primitives = OpPrimitives>>(
self,
) -> eyre::Result<()> {
info!(target: "reth::cli", "Reth init-state starting");
let Environment { config, provider_factory, .. } =
self.init_state.env.init::<N>(AccessRights::RW)?;
let static_file_provider = provider_factory.static_file_provider();
let provider_rw = provider_factory.database_provider_rw()?;
// OP-Mainnet may want to bootstrap a chain without OVM historical data
if provider_factory.chain_spec().is_optimism_mainnet() && self.without_ovm {
let last_block_number = provider_rw.last_block_number()?;
if last_block_number == 0 {
reth_cli_commands::init_state::without_evm::setup_without_evm(
&provider_rw,
SealedHeader::new(BEDROCK_HEADER, BEDROCK_HEADER_HASH),
BEDROCK_HEADER_TTD,
|number| {
let mut header = Header::default();
header.set_number(number);
header
},
)?;
// SAFETY: it's safe to commit static files, since in the event of a crash, they
// will be unwound according to database checkpoints.
//
// Necessary to commit, so the BEDROCK_HEADER is accessible to provider_rw and
// init_state_dump
static_file_provider.commit()?;
} else if last_block_number > 0 && last_block_number < BEDROCK_HEADER.number {
return Err(eyre::eyre!(
"Data directory should be empty when calling init-state with --without-ovm."
))
}
}
info!(target: "reth::cli", "Initiating state dump");
let reader = BufReader::new(reth_fs_util::open(self.init_state.state)?);
let hash = init_from_state_dump(reader, &provider_rw, config.stages.etl)?;
provider_rw.commit()?;
info!(target: "reth::cli", hash = ?hash, "Genesis block written");
Ok(())
}
}
impl<C: ChainSpecParser> InitStateCommandOp<C> {
/// Returns the underlying chain being used to run this command
pub fn chain_spec(&self) -> Option<&Arc<C::ChainSpec>> {
self.init_state.chain_spec()
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/cli/src/commands/test_vectors.rs | crates/optimism/cli/src/commands/test_vectors.rs | //! Command for generating test vectors.
use clap::{Parser, Subcommand};
use op_alloy_consensus::TxDeposit;
use proptest::test_runner::TestRunner;
use reth_chainspec::ChainSpec;
use reth_cli_commands::{
compact_types,
test_vectors::{
compact,
compact::{
generate_vector, read_vector, GENERATE_VECTORS as ETH_GENERATE_VECTORS,
READ_VECTORS as ETH_READ_VECTORS,
},
tables,
},
};
use std::sync::Arc;
/// Generate test-vectors for different data types.
#[derive(Debug, Parser)]
pub struct Command {
#[command(subcommand)]
command: Subcommands,
}
#[derive(Subcommand, Debug)]
/// `reth test-vectors` subcommands
pub enum Subcommands {
/// Generates test vectors for specified tables. If no table is specified, generate for all.
Tables {
/// List of table names. Case-sensitive.
names: Vec<String>,
},
/// Generates test vectors for `Compact` types with `--write`. Reads and checks generated
/// vectors with `--read`.
#[group(multiple = false, required = true)]
Compact {
/// Write test vectors to a file.
#[arg(long)]
write: bool,
/// Read test vectors from a file.
#[arg(long)]
read: bool,
},
}
impl Command {
/// Execute the command
pub async fn execute(self) -> eyre::Result<()> {
match self.command {
Subcommands::Tables { names } => {
tables::generate_vectors(names)?;
}
Subcommands::Compact { write, .. } => {
compact_types!(
regular: [
TxDeposit
], identifier: []
);
if write {
compact::generate_vectors_with(ETH_GENERATE_VECTORS)?;
compact::generate_vectors_with(GENERATE_VECTORS)?;
} else {
compact::read_vectors_with(ETH_READ_VECTORS)?;
compact::read_vectors_with(READ_VECTORS)?;
}
}
}
Ok(())
}
/// Returns the underlying chain being used to run this command
pub const fn chain_spec(&self) -> Option<&Arc<ChainSpec>> {
None
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/cli/src/commands/import_receipts.rs | crates/optimism/cli/src/commands/import_receipts.rs | //! Command that imports OP mainnet receipts from Bedrock datadir, exported via
//! <https://github.com/testinprod-io/op-geth/pull/1>.
use crate::receipt_file_codec::OpGethReceiptFileCodec;
use clap::Parser;
use reth_cli::chainspec::ChainSpecParser;
use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs};
use reth_db_api::tables;
use reth_downloaders::{
file_client::{ChunkedFileReader, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE},
receipt_file_client::ReceiptFileClient,
};
use reth_execution_types::ExecutionOutcome;
use reth_node_builder::ReceiptTy;
use reth_node_core::version::version_metadata;
use reth_optimism_chainspec::OpChainSpec;
use reth_optimism_primitives::{bedrock::is_dup_tx, OpPrimitives, OpReceipt};
use reth_primitives_traits::NodePrimitives;
use reth_provider::{
providers::ProviderNodeTypes, writer::UnifiedStorageWriter, DatabaseProviderFactory,
OriginalValuesKnown, ProviderFactory, StageCheckpointReader, StageCheckpointWriter,
StateWriter, StaticFileProviderFactory, StatsReader, StorageLocation,
};
use reth_stages::{StageCheckpoint, StageId};
use reth_static_file_types::StaticFileSegment;
use std::{
path::{Path, PathBuf},
sync::Arc,
};
use tracing::{debug, info, trace, warn};
/// Initializes the database with the genesis block.
#[derive(Debug, Parser)]
pub struct ImportReceiptsOpCommand<C: ChainSpecParser> {
#[command(flatten)]
env: EnvironmentArgs<C>,
/// Chunk byte length to read from file.
#[arg(long, value_name = "CHUNK_LEN", verbatim_doc_comment)]
chunk_len: Option<u64>,
/// The path to a receipts file for import. File must use `OpGethReceiptFileCodec` (used for
/// exporting OP chain segment below Bedrock block via testinprod/op-geth).
///
/// <https://github.com/testinprod-io/op-geth/pull/1>
#[arg(value_name = "IMPORT_PATH", verbatim_doc_comment)]
path: PathBuf,
}
impl<C: ChainSpecParser<ChainSpec = OpChainSpec>> ImportReceiptsOpCommand<C> {
/// Execute `import` command
pub async fn execute<N: CliNodeTypes<ChainSpec = C::ChainSpec, Primitives = OpPrimitives>>(
self,
) -> eyre::Result<()> {
info!(target: "reth::cli", "reth {} starting", version_metadata().short_version);
debug!(target: "reth::cli",
chunk_byte_len=self.chunk_len.unwrap_or(DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE),
"Chunking receipts import"
);
let Environment { provider_factory, .. } = self.env.init::<N>(AccessRights::RW)?;
import_receipts_from_file(
provider_factory,
self.path,
self.chunk_len,
|first_block, receipts| {
let mut total_filtered_out_dup_txns = 0;
for (index, receipts_for_block) in receipts.iter_mut().enumerate() {
if is_dup_tx(first_block + index as u64) {
receipts_for_block.clear();
total_filtered_out_dup_txns += 1;
}
}
total_filtered_out_dup_txns
},
)
.await
}
}
impl<C: ChainSpecParser> ImportReceiptsOpCommand<C> {
/// Returns the underlying chain being used to run this command
pub const fn chain_spec(&self) -> Option<&Arc<C::ChainSpec>> {
Some(&self.env.chain)
}
}
/// Imports receipts to static files from file in chunks. See [`import_receipts_from_reader`].
pub async fn import_receipts_from_file<N, P, F>(
provider_factory: ProviderFactory<N>,
path: P,
chunk_len: Option<u64>,
filter: F,
) -> eyre::Result<()>
where
N: ProviderNodeTypes<ChainSpec = OpChainSpec, Primitives: NodePrimitives<Receipt = OpReceipt>>,
P: AsRef<Path>,
F: FnMut(u64, &mut Vec<Vec<OpReceipt>>) -> usize,
{
for stage in StageId::ALL {
let checkpoint = provider_factory.database_provider_ro()?.get_stage_checkpoint(stage)?;
trace!(target: "reth::cli",
?stage,
?checkpoint,
"Read stage checkpoints from db"
);
}
// open file
let reader = ChunkedFileReader::new(&path, chunk_len).await?;
// import receipts
let _ = import_receipts_from_reader(&provider_factory, reader, filter).await?;
info!(target: "reth::cli",
"Receipt file imported"
);
Ok(())
}
/// Imports receipts to static files. Takes a filter callback as parameter, that returns the total
/// number of filtered out receipts.
///
/// Caution! Filter callback must replace completely filtered out receipts for a block, with empty
/// vectors, rather than `vec!(None)`. This is since the code for writing to static files, expects
/// indices in the receipts list, to map to sequential block numbers.
pub async fn import_receipts_from_reader<N, F>(
provider_factory: &ProviderFactory<N>,
mut reader: ChunkedFileReader,
mut filter: F,
) -> eyre::Result<ImportReceiptsResult>
where
N: ProviderNodeTypes<Primitives: NodePrimitives<Receipt = OpReceipt>>,
F: FnMut(u64, &mut Vec<Vec<ReceiptTy<N>>>) -> usize,
{
let static_file_provider = provider_factory.static_file_provider();
// Ensure that receipts hasn't been initialized apart from `init_genesis`.
if let Some(num_receipts) =
static_file_provider.get_highest_static_file_tx(StaticFileSegment::Receipts)
{
if num_receipts > 0 {
eyre::bail!("Expected no receipts in storage, but found {num_receipts}.");
}
}
match static_file_provider.get_highest_static_file_block(StaticFileSegment::Receipts) {
Some(receipts_block) => {
if receipts_block > 0 {
eyre::bail!("Expected highest receipt block to be 0, but found {receipts_block}.");
}
}
None => {
eyre::bail!(
"Receipts was not initialized. Please import blocks and transactions before calling this command."
);
}
}
let provider = provider_factory.database_provider_rw()?;
let mut total_decoded_receipts = 0;
let mut total_receipts = 0;
let mut total_filtered_out_dup_txns = 0;
let mut highest_block_receipts = 0;
let highest_block_transactions = static_file_provider
.get_highest_static_file_block(StaticFileSegment::Transactions)
.expect("transaction static files must exist before importing receipts");
while let Some(file_client) =
reader.next_receipts_chunk::<ReceiptFileClient<OpGethReceiptFileCodec<OpReceipt>>>().await?
{
if highest_block_receipts == highest_block_transactions {
warn!(target: "reth::cli", highest_block_receipts, highest_block_transactions, "Ignoring all other blocks in the file since we have reached the desired height");
break
}
// create a new file client from chunk read from file
let ReceiptFileClient {
mut receipts,
mut first_block,
total_receipts: total_receipts_chunk,
..
} = file_client;
// mark these as decoded
total_decoded_receipts += total_receipts_chunk;
total_filtered_out_dup_txns += filter(first_block, &mut receipts);
info!(target: "reth::cli",
first_receipts_block=?first_block,
total_receipts_chunk,
"Importing receipt file chunk"
);
// It is possible for the first receipt returned by the file client to be the genesis
// block. In this case, we just prepend empty receipts to the current list of receipts.
// When initially writing to static files, the provider expects the first block to be block
// one. So, if the first block returned by the file client is the genesis block, we remove
// those receipts.
if first_block == 0 {
// remove the first empty receipts
let genesis_receipts = receipts.remove(0);
debug_assert!(genesis_receipts.is_empty());
// this ensures the execution outcome and static file producer start at block 1
first_block = 1;
}
highest_block_receipts = first_block + receipts.len() as u64 - 1;
// RLP file may have too many blocks. We ignore the excess, but warn the user.
if highest_block_receipts > highest_block_transactions {
let excess = highest_block_receipts - highest_block_transactions;
highest_block_receipts -= excess;
// Remove the last `excess` blocks
receipts.truncate(receipts.len() - excess as usize);
warn!(target: "reth::cli", highest_block_receipts, "Too many decoded blocks, ignoring the last {excess}.");
}
// Update total_receipts after all filtering
total_receipts += receipts.iter().map(|v| v.len()).sum::<usize>();
// We're reusing receipt writing code internal to
// `UnifiedStorageWriter::append_receipts_from_blocks`, so we just use a default empty
// `BundleState`.
let execution_outcome =
ExecutionOutcome::new(Default::default(), receipts, first_block, Default::default());
// finally, write the receipts
provider.write_state(
&execution_outcome,
OriginalValuesKnown::Yes,
StorageLocation::StaticFiles,
)?;
}
// Only commit if we have imported as many receipts as the number of transactions.
let total_imported_txns = static_file_provider
.count_entries::<tables::Transactions>()
.expect("transaction static files must exist before importing receipts");
if total_receipts != total_imported_txns {
eyre::bail!(
"Number of receipts ({total_receipts}) inconsistent with transactions {total_imported_txns}"
)
}
// Only commit if the receipt block height matches the one from transactions.
if highest_block_receipts != highest_block_transactions {
eyre::bail!(
"Receipt block height ({highest_block_receipts}) inconsistent with transactions' {highest_block_transactions}"
)
}
// Required or any access-write provider factory will attempt to unwind to 0.
provider
.save_stage_checkpoint(StageId::Execution, StageCheckpoint::new(highest_block_receipts))?;
UnifiedStorageWriter::commit(provider)?;
Ok(ImportReceiptsResult { total_decoded_receipts, total_filtered_out_dup_txns })
}
/// Result of importing receipts in chunks.
#[derive(Debug)]
pub struct ImportReceiptsResult {
/// Total decoded receipts.
pub total_decoded_receipts: usize,
/// Total filtered out receipts.
pub total_filtered_out_dup_txns: usize,
}
#[cfg(test)]
mod test {
use alloy_primitives::hex;
use reth_db_common::init::init_genesis;
use reth_optimism_chainspec::OP_MAINNET;
use reth_optimism_node::OpNode;
use reth_provider::test_utils::create_test_provider_factory_with_node_types;
use reth_stages::test_utils::TestStageDB;
use tempfile::tempfile;
use tokio::{
fs::File,
io::{AsyncSeekExt, AsyncWriteExt, SeekFrom},
};
use crate::receipt_file_codec::test::{
HACK_RECEIPT_ENCODED_BLOCK_1, HACK_RECEIPT_ENCODED_BLOCK_2, HACK_RECEIPT_ENCODED_BLOCK_3,
};
use super::*;
/// No receipts for genesis block
const EMPTY_RECEIPTS_GENESIS_BLOCK: &[u8] = &hex!("c0");
#[ignore]
#[tokio::test]
async fn filter_out_genesis_block_receipts() {
let mut f: File = tempfile().unwrap().into();
f.write_all(EMPTY_RECEIPTS_GENESIS_BLOCK).await.unwrap();
f.write_all(HACK_RECEIPT_ENCODED_BLOCK_1).await.unwrap();
f.write_all(HACK_RECEIPT_ENCODED_BLOCK_2).await.unwrap();
f.write_all(HACK_RECEIPT_ENCODED_BLOCK_3).await.unwrap();
f.flush().await.unwrap();
f.seek(SeekFrom::Start(0)).await.unwrap();
let reader =
ChunkedFileReader::from_file(f, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE).await.unwrap();
let db = TestStageDB::default();
init_genesis(&db.factory).unwrap();
let provider_factory =
create_test_provider_factory_with_node_types::<OpNode>(OP_MAINNET.clone());
let ImportReceiptsResult { total_decoded_receipts, total_filtered_out_dup_txns } =
import_receipts_from_reader(&provider_factory, reader, |_, _| 0).await.unwrap();
assert_eq!(total_decoded_receipts, 3);
assert_eq!(total_filtered_out_dup_txns, 0);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/cli/src/commands/mod.rs | crates/optimism/cli/src/commands/mod.rs | use crate::chainspec::OpChainSpecParser;
use clap::Subcommand;
use import::ImportOpCommand;
use import_receipts::ImportReceiptsOpCommand;
use reth_chainspec::{EthChainSpec, EthereumHardforks, Hardforks};
use reth_cli::chainspec::ChainSpecParser;
use reth_cli_commands::{
config_cmd, db, dump_genesis, init_cmd,
node::{self, NoArgs},
p2p, prune, re_execute, recover, stage,
};
use std::{fmt, sync::Arc};
pub mod import;
pub mod import_receipts;
pub mod init_state;
#[cfg(feature = "dev")]
pub mod test_vectors;
/// Commands to be executed
#[derive(Debug, Subcommand)]
pub enum Commands<Spec: ChainSpecParser = OpChainSpecParser, Ext: clap::Args + fmt::Debug = NoArgs>
{
/// Start the node
#[command(name = "node")]
Node(Box<node::NodeCommand<Spec, Ext>>),
/// Initialize the database from a genesis file.
#[command(name = "init")]
Init(init_cmd::InitCommand<Spec>),
/// Initialize the database from a state dump file.
#[command(name = "init-state")]
InitState(init_state::InitStateCommandOp<Spec>),
/// This syncs RLP encoded OP blocks below Bedrock from a file, without executing.
#[command(name = "import-op")]
ImportOp(ImportOpCommand<Spec>),
/// This imports RLP encoded receipts from a file.
#[command(name = "import-receipts-op")]
ImportReceiptsOp(ImportReceiptsOpCommand<Spec>),
/// Dumps genesis block JSON configuration to stdout.
DumpGenesis(dump_genesis::DumpGenesisCommand<Spec>),
/// Database debugging utilities
#[command(name = "db")]
Db(db::Command<Spec>),
/// Manipulate individual stages.
#[command(name = "stage")]
Stage(Box<stage::Command<Spec>>),
/// P2P Debugging utilities
#[command(name = "p2p")]
P2P(Box<p2p::Command<Spec>>),
/// Write config to stdout
#[command(name = "config")]
Config(config_cmd::Command),
/// Scripts for node recovery
#[command(name = "recover")]
Recover(recover::Command<Spec>),
/// Prune according to the configuration without any limits
#[command(name = "prune")]
Prune(prune::PruneCommand<Spec>),
/// Generate Test Vectors
#[cfg(feature = "dev")]
#[command(name = "test-vectors")]
TestVectors(test_vectors::Command),
/// Re-execute blocks in parallel to verify historical sync correctness.
#[command(name = "re-execute")]
ReExecute(re_execute::Command<Spec>),
}
impl<
C: ChainSpecParser<ChainSpec: EthChainSpec + Hardforks + EthereumHardforks>,
Ext: clap::Args + fmt::Debug,
> Commands<C, Ext>
{
/// Returns the underlying chain being used for commands
pub fn chain_spec(&self) -> Option<&Arc<C::ChainSpec>> {
match self {
Self::Node(cmd) => cmd.chain_spec(),
Self::Init(cmd) => cmd.chain_spec(),
Self::InitState(cmd) => cmd.chain_spec(),
Self::DumpGenesis(cmd) => cmd.chain_spec(),
Self::Db(cmd) => cmd.chain_spec(),
Self::Stage(cmd) => cmd.chain_spec(),
Self::P2P(cmd) => cmd.chain_spec(),
Self::Config(_) => None,
Self::Recover(cmd) => cmd.chain_spec(),
Self::Prune(cmd) => cmd.chain_spec(),
Self::ImportOp(cmd) => cmd.chain_spec(),
Self::ImportReceiptsOp(cmd) => cmd.chain_spec(),
#[cfg(feature = "dev")]
Self::TestVectors(_) => None,
Self::ReExecute(cmd) => cmd.chain_spec(),
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/cli/src/commands/import.rs | crates/optimism/cli/src/commands/import.rs | //! Command that initializes the node by importing OP Mainnet chain segment below Bedrock, from a
//! file.
use clap::Parser;
use reth_cli::chainspec::ChainSpecParser;
use reth_cli_commands::{
common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs},
import::build_import_pipeline,
};
use reth_consensus::noop::NoopConsensus;
use reth_db_api::{tables, transaction::DbTx};
use reth_downloaders::file_client::{ChunkedFileReader, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE};
use reth_node_builder::BlockTy;
use reth_node_core::version::version_metadata;
use reth_optimism_chainspec::OpChainSpec;
use reth_optimism_evm::OpExecutorProvider;
use reth_optimism_primitives::{bedrock::is_dup_tx, OpPrimitives};
use reth_provider::{BlockNumReader, ChainSpecProvider, HeaderProvider, StageCheckpointReader};
use reth_prune::PruneModes;
use reth_stages::StageId;
use reth_static_file::StaticFileProducer;
use std::{path::PathBuf, sync::Arc};
use tracing::{debug, error, info};
/// Syncs RLP encoded blocks from a file.
#[derive(Debug, Parser)]
pub struct ImportOpCommand<C: ChainSpecParser> {
#[command(flatten)]
env: EnvironmentArgs<C>,
/// Chunk byte length to read from file.
#[arg(long, value_name = "CHUNK_LEN", verbatim_doc_comment)]
chunk_len: Option<u64>,
/// The path to a block file for import.
///
/// The online stages (headers and bodies) are replaced by a file import, after which the
/// remaining stages are executed.
#[arg(value_name = "IMPORT_PATH", verbatim_doc_comment)]
path: PathBuf,
}
impl<C: ChainSpecParser<ChainSpec = OpChainSpec>> ImportOpCommand<C> {
/// Execute `import` command
pub async fn execute<N: CliNodeTypes<ChainSpec = C::ChainSpec, Primitives = OpPrimitives>>(
self,
) -> eyre::Result<()> {
info!(target: "reth::cli", "reth {} starting", version_metadata().short_version);
info!(target: "reth::cli",
"Disabled stages requiring state, since cannot execute OVM state changes"
);
debug!(target: "reth::cli",
chunk_byte_len=self.chunk_len.unwrap_or(DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE),
"Chunking chain import"
);
let Environment { provider_factory, config, .. } = self.env.init::<N>(AccessRights::RW)?;
// we use noop here because we expect the inputs to be valid
let consensus = Arc::new(NoopConsensus::default());
// open file
let mut reader = ChunkedFileReader::new(&self.path, self.chunk_len).await?;
let mut total_decoded_blocks = 0;
let mut total_decoded_txns = 0;
let mut total_filtered_out_dup_txns = 0;
let mut sealed_header = provider_factory
.sealed_header(provider_factory.last_block_number()?)?
.expect("should have genesis");
while let Some(mut file_client) =
reader.next_chunk::<BlockTy<N>>(consensus.clone(), Some(sealed_header)).await?
{
// create a new FileClient from chunk read from file
info!(target: "reth::cli",
"Importing chain file chunk"
);
let tip = file_client.tip().ok_or_else(|| eyre::eyre!("file client has no tip"))?;
info!(target: "reth::cli", "Chain file chunk read");
total_decoded_blocks += file_client.headers_len();
total_decoded_txns += file_client.total_transactions();
for (block_number, body) in file_client.bodies_iter_mut() {
body.transactions.retain(|_| {
if is_dup_tx(block_number) {
total_filtered_out_dup_txns += 1;
return false
}
true
})
}
let (mut pipeline, events) = build_import_pipeline(
&config,
provider_factory.clone(),
&consensus,
Arc::new(file_client),
StaticFileProducer::new(provider_factory.clone(), PruneModes::default()),
true,
OpExecutorProvider::optimism(provider_factory.chain_spec()),
)?;
// override the tip
pipeline.set_tip(tip);
debug!(target: "reth::cli", ?tip, "Tip manually set");
let provider = provider_factory.provider()?;
let latest_block_number =
provider.get_stage_checkpoint(StageId::Finish)?.map(|ch| ch.block_number);
tokio::spawn(reth_node_events::node::handle_events(None, latest_block_number, events));
// Run pipeline
info!(target: "reth::cli", "Starting sync pipeline");
tokio::select! {
res = pipeline.run() => res?,
_ = tokio::signal::ctrl_c() => {},
}
sealed_header = provider_factory
.sealed_header(provider_factory.last_block_number()?)?
.expect("should have genesis");
}
let provider = provider_factory.provider()?;
let total_imported_blocks = provider.tx_ref().entries::<tables::HeaderNumbers>()?;
let total_imported_txns = provider.tx_ref().entries::<tables::TransactionHashNumbers>()?;
if total_decoded_blocks != total_imported_blocks ||
total_decoded_txns != total_imported_txns + total_filtered_out_dup_txns
{
error!(target: "reth::cli",
total_decoded_blocks,
total_imported_blocks,
total_decoded_txns,
total_filtered_out_dup_txns,
total_imported_txns,
"Chain was partially imported"
);
}
info!(target: "reth::cli",
total_imported_blocks,
total_imported_txns,
total_decoded_blocks,
total_decoded_txns,
total_filtered_out_dup_txns,
"Chain file imported"
);
Ok(())
}
}
impl<C: ChainSpecParser> ImportOpCommand<C> {
/// Returns the underlying chain being used to run this command
pub const fn chain_spec(&self) -> Option<&Arc<C::ChainSpec>> {
Some(&self.env.chain)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/chainspec/src/op.rs | crates/optimism/chainspec/src/op.rs | //! Chain specification for the Optimism Mainnet network.
use crate::{make_op_genesis_header, LazyLock, OpChainSpec};
use alloc::{sync::Arc, vec};
use alloy_chains::Chain;
use alloy_primitives::{b256, U256};
use reth_chainspec::{BaseFeeParams, BaseFeeParamsKind, ChainSpec, Hardfork};
use reth_ethereum_forks::EthereumHardfork;
use reth_optimism_forks::{OpHardfork, OP_MAINNET_HARDFORKS};
use reth_primitives_traits::SealedHeader;
/// The Optimism Mainnet spec
pub static OP_MAINNET: LazyLock<Arc<OpChainSpec>> = LazyLock::new(|| {
// genesis contains empty alloc field because state at first bedrock block is imported
// manually from trusted source
let genesis = serde_json::from_str(include_str!("../res/genesis/optimism.json"))
.expect("Can't deserialize Optimism Mainnet genesis json");
let hardforks = OP_MAINNET_HARDFORKS.clone();
OpChainSpec {
inner: ChainSpec {
chain: Chain::optimism_mainnet(),
genesis_header: SealedHeader::new(
make_op_genesis_header(&genesis, &hardforks),
b256!("0x7ca38a1916c42007829c55e69d3e9a73265554b586a499015373241b8a3fa48b"),
),
genesis: genesis.into(),
paris_block_and_final_difficulty: Some((0, U256::from(0))),
hardforks,
base_fee_params: BaseFeeParamsKind::Variable(
vec![
(EthereumHardfork::London.boxed(), BaseFeeParams::optimism()),
(OpHardfork::Canyon.boxed(), BaseFeeParams::optimism_canyon()),
]
.into(),
),
prune_delete_limit: 10000,
..Default::default()
},
}
.into()
});
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/chainspec/src/dev.rs | crates/optimism/chainspec/src/dev.rs | //! Chain specification in dev mode for custom chain.
use alloc::sync::Arc;
use alloy_chains::Chain;
use alloy_primitives::U256;
use reth_chainspec::{BaseFeeParams, BaseFeeParamsKind, ChainSpec};
use reth_optimism_forks::DEV_HARDFORKS;
use reth_primitives_traits::SealedHeader;
use crate::{make_op_genesis_header, LazyLock, OpChainSpec};
/// OP dev testnet specification
///
/// Includes 20 prefunded accounts with `10_000` ETH each derived from mnemonic "test test test test
/// test test test test test test test junk".
pub static OP_DEV: LazyLock<Arc<OpChainSpec>> = LazyLock::new(|| {
let genesis = serde_json::from_str(include_str!("../res/genesis/dev.json"))
.expect("Can't deserialize Dev testnet genesis json");
let hardforks = DEV_HARDFORKS.clone();
let genesis_header = SealedHeader::seal_slow(make_op_genesis_header(&genesis, &hardforks));
OpChainSpec {
inner: ChainSpec {
chain: Chain::dev(),
genesis_header,
genesis: genesis.into(),
paris_block_and_final_difficulty: Some((0, U256::from(0))),
hardforks,
base_fee_params: BaseFeeParamsKind::Constant(BaseFeeParams::ethereum()),
..Default::default()
},
}
.into()
});
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/chainspec/src/base.rs | crates/optimism/chainspec/src/base.rs | //! Chain specification for the Base Mainnet network.
use alloc::{sync::Arc, vec};
use alloy_chains::Chain;
use alloy_primitives::{b256, U256};
use reth_chainspec::{BaseFeeParams, BaseFeeParamsKind, ChainSpec};
use reth_ethereum_forks::{EthereumHardfork, Hardfork};
use reth_optimism_forks::{OpHardfork, BASE_MAINNET_HARDFORKS};
use reth_primitives_traits::SealedHeader;
use crate::{make_op_genesis_header, LazyLock, OpChainSpec};
/// The Base mainnet spec
pub static BASE_MAINNET: LazyLock<Arc<OpChainSpec>> = LazyLock::new(|| {
let genesis = serde_json::from_str(include_str!("../res/genesis/base.json"))
.expect("Can't deserialize Base genesis json");
let hardforks = BASE_MAINNET_HARDFORKS.clone();
OpChainSpec {
inner: ChainSpec {
chain: Chain::base_mainnet(),
genesis_header: SealedHeader::new(
make_op_genesis_header(&genesis, &hardforks),
b256!("0xf712aa9241cc24369b143cf6dce85f0902a9731e70d66818a3a5845b296c73dd"),
),
genesis: genesis.into(),
paris_block_and_final_difficulty: Some((0, U256::from(0))),
hardforks,
base_fee_params: BaseFeeParamsKind::Variable(
vec![
(EthereumHardfork::London.boxed(), BaseFeeParams::optimism()),
(OpHardfork::Canyon.boxed(), BaseFeeParams::optimism_canyon()),
]
.into(),
),
..Default::default()
},
}
.into()
});
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/chainspec/src/lib.rs | crates/optimism/chainspec/src/lib.rs | //! OP-Reth chain specs.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(not(feature = "std"), no_std)]
// About the provided chain specs from `res/superchain-configs.tar`:
// The provided `OpChainSpec` structs are built from config files read from
// `superchain-configs.tar`. This `superchain-configs.tar` file contains the chain configs and
// genesis files for all chains. It is created by the `fetch_superchain_config.sh` script in
// the `res` directory. Where all configs are where initial loaded from
// <https://github.com/ethereum-optimism/superchain-registry>. See the script for more details.
//
// The file is a tar archive containing the following files:
// - `genesis/<environment>/<chain_name>.json.zz`: The genesis file compressed with deflate. It
// contains the initial accounts, etc.
// - `configs/<environment>/<chain_name>.json`: The chain metadata file containing the chain id,
// hard forks, etc.
//
// For example, for `UNICHAIN_MAINNET`, the `genesis/mainnet/unichain.json.zz` and
// `configs/mainnet/base.json` is loaded and combined into the `OpChainSpec` struct.
// See `read_superchain_genesis` in `configs.rs` for more details.
//
// To update the chain specs, run the `fetch_superchain_config.sh` script in the `res` directory.
// This will fetch the latest chain configs from the superchain registry and create a new
// `superchain-configs.tar` file. See the script for more details.
extern crate alloc;
mod base;
mod base_sepolia;
mod basefee;
pub mod constants;
mod dev;
mod op;
mod op_sepolia;
#[cfg(feature = "superchain-configs")]
mod superchain;
#[cfg(feature = "superchain-configs")]
pub use superchain::*;
pub use base::BASE_MAINNET;
pub use base_sepolia::BASE_SEPOLIA;
pub use basefee::*;
pub use dev::OP_DEV;
pub use op::OP_MAINNET;
pub use op_sepolia::OP_SEPOLIA;
/// Re-export for convenience
pub use reth_optimism_forks::*;
use alloc::{boxed::Box, vec, vec::Vec};
use alloy_chains::Chain;
use alloy_consensus::{proofs::storage_root_unhashed, BlockHeader, Header};
use alloy_eips::eip7840::BlobParams;
use alloy_genesis::Genesis;
use alloy_hardforks::Hardfork;
use alloy_primitives::{B256, U256};
use derive_more::{Constructor, Deref, From, Into};
use reth_chainspec::{
BaseFeeParams, BaseFeeParamsKind, ChainSpec, ChainSpecBuilder, DepositContract,
DisplayHardforks, EthChainSpec, EthereumHardforks, ForkFilter, ForkId, Hardforks, Head,
};
use reth_ethereum_forks::{ChainHardforks, EthereumHardfork, ForkCondition};
use reth_network_peers::NodeRecord;
use reth_optimism_primitives::ADDRESS_L2_TO_L1_MESSAGE_PASSER;
use reth_primitives_traits::{sync::LazyLock, SealedHeader};
/// Chain spec builder for a OP stack chain.
#[derive(Debug, Default, From)]
pub struct OpChainSpecBuilder {
/// [`ChainSpecBuilder`]
inner: ChainSpecBuilder,
}
impl OpChainSpecBuilder {
/// Construct a new builder from the base mainnet chain spec.
pub fn base_mainnet() -> Self {
let mut inner = ChainSpecBuilder::default()
.chain(BASE_MAINNET.chain)
.genesis(BASE_MAINNET.genesis.clone());
let forks = BASE_MAINNET.hardforks.clone();
inner = inner.with_forks(forks);
Self { inner }
}
/// Construct a new builder from the optimism mainnet chain spec.
pub fn optimism_mainnet() -> Self {
let mut inner =
ChainSpecBuilder::default().chain(OP_MAINNET.chain).genesis(OP_MAINNET.genesis.clone());
let forks = OP_MAINNET.hardforks.clone();
inner = inner.with_forks(forks);
Self { inner }
}
}
impl OpChainSpecBuilder {
/// Set the chain ID
pub fn chain(mut self, chain: Chain) -> Self {
self.inner = self.inner.chain(chain);
self
}
/// Set the genesis block.
pub fn genesis(mut self, genesis: Genesis) -> Self {
self.inner = self.inner.genesis(genesis.into());
self
}
/// Add the given fork with the given activation condition to the spec.
pub fn with_fork<H: Hardfork>(mut self, fork: H, condition: ForkCondition) -> Self {
self.inner = self.inner.with_fork(fork, condition);
self
}
/// Add the given forks with the given activation condition to the spec.
pub fn with_forks(mut self, forks: ChainHardforks) -> Self {
self.inner = self.inner.with_forks(forks);
self
}
/// Remove the given fork from the spec.
pub fn without_fork(mut self, fork: OpHardfork) -> Self {
self.inner = self.inner.without_fork(fork);
self
}
/// Enable Bedrock at genesis
pub fn bedrock_activated(mut self) -> Self {
self.inner = self.inner.paris_activated();
self.inner = self.inner.with_fork(OpHardfork::Bedrock, ForkCondition::Block(0));
self
}
/// Enable Regolith at genesis
pub fn regolith_activated(mut self) -> Self {
self = self.bedrock_activated();
self.inner = self.inner.with_fork(OpHardfork::Regolith, ForkCondition::Timestamp(0));
self
}
/// Enable Canyon at genesis
pub fn canyon_activated(mut self) -> Self {
self = self.regolith_activated();
// Canyon also activates changes from L1's Shanghai hardfork
self.inner = self.inner.with_fork(EthereumHardfork::Shanghai, ForkCondition::Timestamp(0));
self.inner = self.inner.with_fork(OpHardfork::Canyon, ForkCondition::Timestamp(0));
self
}
/// Enable Ecotone at genesis
pub fn ecotone_activated(mut self) -> Self {
self = self.canyon_activated();
self.inner = self.inner.with_fork(EthereumHardfork::Cancun, ForkCondition::Timestamp(0));
self.inner = self.inner.with_fork(OpHardfork::Ecotone, ForkCondition::Timestamp(0));
self
}
/// Enable Fjord at genesis
pub fn fjord_activated(mut self) -> Self {
self = self.ecotone_activated();
self.inner = self.inner.with_fork(OpHardfork::Fjord, ForkCondition::Timestamp(0));
self
}
/// Enable Granite at genesis
pub fn granite_activated(mut self) -> Self {
self = self.fjord_activated();
self.inner = self.inner.with_fork(OpHardfork::Granite, ForkCondition::Timestamp(0));
self
}
/// Enable Holocene at genesis
pub fn holocene_activated(mut self) -> Self {
self = self.granite_activated();
self.inner = self.inner.with_fork(OpHardfork::Holocene, ForkCondition::Timestamp(0));
self
}
/// Enable Isthmus at genesis
pub fn isthmus_activated(mut self) -> Self {
self = self.holocene_activated();
self.inner = self.inner.with_fork(OpHardfork::Isthmus, ForkCondition::Timestamp(0));
self
}
/// Enable Jovian at genesis
pub fn jovian_activated(mut self) -> Self {
self = self.isthmus_activated();
self.inner = self.inner.with_fork(OpHardfork::Jovian, ForkCondition::Timestamp(0));
self
}
/// Enable Interop at genesis
pub fn interop_activated(mut self) -> Self {
self = self.jovian_activated();
self.inner = self.inner.with_fork(OpHardfork::Interop, ForkCondition::Timestamp(0));
self
}
/// Build the resulting [`OpChainSpec`].
///
/// # Panics
///
/// This function panics if the chain ID and genesis is not set ([`Self::chain`] and
/// [`Self::genesis`])
pub fn build(self) -> OpChainSpec {
let mut inner = self.inner.build();
inner.genesis_header = SealedHeader::seal_slow(make_op_genesis_header(
&inner.genesis.clone(),
&inner.hardforks,
));
OpChainSpec { inner }
}
}
/// OP stack chain spec type.
#[derive(Debug, Clone, Deref, Into, Constructor, PartialEq, Eq)]
pub struct OpChainSpec {
/// [`ChainSpec`].
pub inner: ChainSpec,
}
impl OpChainSpec {
/// Converts the given [`Genesis`] into a [`OpChainSpec`].
pub fn from_genesis(genesis: Genesis) -> Self {
genesis.into()
}
}
impl EthChainSpec for OpChainSpec {
type Header = Header;
fn chain(&self) -> Chain {
self.inner.chain()
}
fn base_fee_params_at_timestamp(&self, timestamp: u64) -> BaseFeeParams {
self.inner.base_fee_params_at_timestamp(timestamp)
}
fn blob_params_at_timestamp(&self, timestamp: u64) -> Option<BlobParams> {
self.inner.blob_params_at_timestamp(timestamp)
}
fn deposit_contract(&self) -> Option<&DepositContract> {
self.inner.deposit_contract()
}
fn genesis_hash(&self) -> B256 {
self.inner.genesis_hash()
}
fn prune_delete_limit(&self) -> usize {
self.inner.prune_delete_limit()
}
fn display_hardforks(&self) -> Box<dyn core::fmt::Display> {
// filter only op hardforks
let op_forks = self.inner.hardforks.forks_iter().filter(|(fork, _)| {
!EthereumHardfork::VARIANTS.iter().any(|h| h.name() == (*fork).name())
});
Box::new(DisplayHardforks::new(op_forks))
}
fn genesis_header(&self) -> &Self::Header {
self.inner.genesis_header()
}
fn genesis(&self) -> &seismic_alloy_genesis::Genesis {
self.inner.genesis()
}
fn bootnodes(&self) -> Option<Vec<NodeRecord>> {
self.inner.bootnodes()
}
fn is_optimism(&self) -> bool {
true
}
fn final_paris_total_difficulty(&self) -> Option<U256> {
self.inner.final_paris_total_difficulty()
}
fn next_block_base_fee(&self, parent: &Header, target_timestamp: u64) -> Option<u64> {
if self.is_holocene_active_at_timestamp(parent.timestamp()) {
decode_holocene_base_fee(self, parent, target_timestamp).ok()
} else {
self.inner.next_block_base_fee(parent, target_timestamp)
}
}
}
impl Hardforks for OpChainSpec {
fn fork<H: Hardfork>(&self, fork: H) -> ForkCondition {
self.inner.fork(fork)
}
fn forks_iter(&self) -> impl Iterator<Item = (&dyn Hardfork, ForkCondition)> {
self.inner.forks_iter()
}
fn fork_id(&self, head: &Head) -> ForkId {
self.inner.fork_id(head)
}
fn latest_fork_id(&self) -> ForkId {
self.inner.latest_fork_id()
}
fn fork_filter(&self, head: Head) -> ForkFilter {
self.inner.fork_filter(head)
}
}
impl EthereumHardforks for OpChainSpec {
fn ethereum_fork_activation(&self, fork: EthereumHardfork) -> ForkCondition {
self.fork(fork)
}
}
impl OpHardforks for OpChainSpec {
fn op_fork_activation(&self, fork: OpHardfork) -> ForkCondition {
self.fork(fork)
}
}
impl From<Genesis> for OpChainSpec {
fn from(genesis: Genesis) -> Self {
use reth_optimism_forks::OpHardfork;
let optimism_genesis_info = OpGenesisInfo::extract_from(&genesis);
let genesis_info =
optimism_genesis_info.optimism_chain_info.genesis_info.unwrap_or_default();
// Block-based hardforks
let hardfork_opts = [
(EthereumHardfork::Frontier.boxed(), Some(0)),
(EthereumHardfork::Homestead.boxed(), genesis.config.homestead_block),
(EthereumHardfork::Tangerine.boxed(), genesis.config.eip150_block),
(EthereumHardfork::SpuriousDragon.boxed(), genesis.config.eip155_block),
(EthereumHardfork::Byzantium.boxed(), genesis.config.byzantium_block),
(EthereumHardfork::Constantinople.boxed(), genesis.config.constantinople_block),
(EthereumHardfork::Petersburg.boxed(), genesis.config.petersburg_block),
(EthereumHardfork::Istanbul.boxed(), genesis.config.istanbul_block),
(EthereumHardfork::MuirGlacier.boxed(), genesis.config.muir_glacier_block),
(EthereumHardfork::Berlin.boxed(), genesis.config.berlin_block),
(EthereumHardfork::London.boxed(), genesis.config.london_block),
(EthereumHardfork::ArrowGlacier.boxed(), genesis.config.arrow_glacier_block),
(EthereumHardfork::GrayGlacier.boxed(), genesis.config.gray_glacier_block),
(OpHardfork::Bedrock.boxed(), genesis_info.bedrock_block),
];
let mut block_hardforks = hardfork_opts
.into_iter()
.filter_map(|(hardfork, opt)| opt.map(|block| (hardfork, ForkCondition::Block(block))))
.collect::<Vec<_>>();
// We set the paris hardfork for OP networks to zero
block_hardforks.push((
EthereumHardfork::Paris.boxed(),
ForkCondition::TTD {
activation_block_number: 0,
total_difficulty: U256::ZERO,
fork_block: genesis.config.merge_netsplit_block,
},
));
// Time-based hardforks
let time_hardfork_opts = [
// L1
// we need to map the L1 hardforks to the activation timestamps of the correspondong op
// hardforks
(EthereumHardfork::Shanghai.boxed(), genesis_info.canyon_time),
(EthereumHardfork::Cancun.boxed(), genesis_info.ecotone_time),
(EthereumHardfork::Prague.boxed(), genesis_info.isthmus_time),
// OP
(OpHardfork::Regolith.boxed(), genesis_info.regolith_time),
(OpHardfork::Canyon.boxed(), genesis_info.canyon_time),
(OpHardfork::Ecotone.boxed(), genesis_info.ecotone_time),
(OpHardfork::Fjord.boxed(), genesis_info.fjord_time),
(OpHardfork::Granite.boxed(), genesis_info.granite_time),
(OpHardfork::Holocene.boxed(), genesis_info.holocene_time),
(OpHardfork::Isthmus.boxed(), genesis_info.isthmus_time),
(OpHardfork::Jovian.boxed(), genesis_info.jovian_time),
(OpHardfork::Interop.boxed(), genesis_info.interop_time),
];
let mut time_hardforks = time_hardfork_opts
.into_iter()
.filter_map(|(hardfork, opt)| {
opt.map(|time| (hardfork, ForkCondition::Timestamp(time)))
})
.collect::<Vec<_>>();
block_hardforks.append(&mut time_hardforks);
// Ordered Hardforks
let mainnet_hardforks = OP_MAINNET_HARDFORKS.clone();
let mainnet_order = mainnet_hardforks.forks_iter();
let mut ordered_hardforks = Vec::with_capacity(block_hardforks.len());
for (hardfork, _) in mainnet_order {
if let Some(pos) = block_hardforks.iter().position(|(e, _)| **e == *hardfork) {
ordered_hardforks.push(block_hardforks.remove(pos));
}
}
// append the remaining unknown hardforks to ensure we don't filter any out
ordered_hardforks.append(&mut block_hardforks);
let hardforks = ChainHardforks::new(ordered_hardforks);
let genesis_header =
SealedHeader::seal_slow(make_op_genesis_header(&genesis.clone().into(), &hardforks));
Self {
inner: ChainSpec {
chain: genesis.config.chain_id.into(),
genesis_header,
genesis: genesis.into(),
hardforks,
// We assume no OP network merges, and set the paris block and total difficulty to
// zero
paris_block_and_final_difficulty: Some((0, U256::ZERO)),
base_fee_params: optimism_genesis_info.base_fee_params,
..Default::default()
},
}
}
}
impl From<ChainSpec> for OpChainSpec {
fn from(value: ChainSpec) -> Self {
Self { inner: value }
}
}
#[derive(Default, Debug)]
struct OpGenesisInfo {
optimism_chain_info: op_alloy_rpc_types::OpChainInfo,
base_fee_params: BaseFeeParamsKind,
}
impl OpGenesisInfo {
fn extract_from(genesis: &Genesis) -> Self {
let mut info = Self {
optimism_chain_info: op_alloy_rpc_types::OpChainInfo::extract_from(
&genesis.config.extra_fields,
)
.unwrap_or_default(),
..Default::default()
};
if let Some(optimism_base_fee_info) = &info.optimism_chain_info.base_fee_info {
if let (Some(elasticity), Some(denominator)) = (
optimism_base_fee_info.eip1559_elasticity,
optimism_base_fee_info.eip1559_denominator,
) {
let base_fee_params = if let Some(canyon_denominator) =
optimism_base_fee_info.eip1559_denominator_canyon
{
BaseFeeParamsKind::Variable(
vec![
(
EthereumHardfork::London.boxed(),
BaseFeeParams::new(denominator as u128, elasticity as u128),
),
(
OpHardfork::Canyon.boxed(),
BaseFeeParams::new(canyon_denominator as u128, elasticity as u128),
),
]
.into(),
)
} else {
BaseFeeParams::new(denominator as u128, elasticity as u128).into()
};
info.base_fee_params = base_fee_params;
}
}
info
}
}
/// Helper method building a [`Header`] given [`Genesis`] and [`ChainHardforks`].
pub fn make_op_genesis_header(
genesis: &seismic_alloy_genesis::Genesis,
hardforks: &ChainHardforks,
) -> Header {
let mut header = reth_chainspec::make_genesis_header(&genesis.clone().into(), hardforks);
// If Isthmus is active, overwrite the withdrawals root with the storage root of predeploy
// `L2ToL1MessagePasser.sol`
if hardforks.fork(OpHardfork::Isthmus).active_at_timestamp(header.timestamp) {
if let Some(predeploy) = genesis.alloc.get(&ADDRESS_L2_TO_L1_MESSAGE_PASSER) {
if let Some(storage) = &predeploy.storage {
header.withdrawals_root =
Some(storage_root_unhashed(storage.iter().filter_map(|(k, v)| {
if v.is_zero() {
None
} else {
Some((*k, *v))
}
})));
}
}
}
header
}
#[cfg(test)]
mod tests {
use alloc::string::String;
use alloy_genesis::{ChainConfig, Genesis};
use alloy_primitives::b256;
use reth_chainspec::{test_fork_ids, BaseFeeParams, BaseFeeParamsKind};
use reth_ethereum_forks::{EthereumHardfork, ForkCondition, ForkHash, ForkId, Head};
use reth_optimism_forks::{OpHardfork, OpHardforks};
use crate::*;
#[test]
fn test_storage_root_consistency() {
use alloy_primitives::{B256, U256};
use std::str::FromStr;
let k1 =
B256::from_str("0x0000000000000000000000000000000000000000000000000000000000000001")
.unwrap();
let v1 =
U256::from_str("0x0000000000000000000000000000000000000000000000000000000000000000")
.unwrap();
let k2 =
B256::from_str("0x360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc")
.unwrap();
let v2 =
U256::from_str("0x000000000000000000000000c0d3c0d3c0d3c0d3c0d3c0d3c0d3c0d3c0d30016")
.unwrap();
let k3 =
B256::from_str("0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103")
.unwrap();
let v3 =
U256::from_str("0x0000000000000000000000004200000000000000000000000000000000000018")
.unwrap();
let origin_root =
B256::from_str("0x5d5ba3a8093ede3901ad7a569edfb7b9aecafa54730ba0bf069147cbcc00e345")
.unwrap();
let expected_root =
B256::from_str("0x8ed4baae3a927be3dea54996b4d5899f8c01e7594bf50b17dc1e741388ce3d12")
.unwrap();
let storage_origin = vec![(k1, v1), (k2, v2), (k3, v3)];
let storage_fix = vec![(k2, v2), (k3, v3)];
let root_origin = storage_root_unhashed(storage_origin);
let root_fix = storage_root_unhashed(storage_fix);
assert_ne!(root_origin, root_fix);
assert_eq!(root_origin, origin_root);
assert_eq!(root_fix, expected_root);
}
#[test]
fn base_mainnet_forkids() {
let mut base_mainnet = OpChainSpecBuilder::base_mainnet().build();
base_mainnet.inner.genesis_header.set_hash(BASE_MAINNET.genesis_hash());
test_fork_ids(
&BASE_MAINNET,
&[
(
Head { number: 0, ..Default::default() },
ForkId { hash: ForkHash([0x67, 0xda, 0x02, 0x60]), next: 1704992401 },
),
(
Head { number: 0, timestamp: 1704992400, ..Default::default() },
ForkId { hash: ForkHash([0x67, 0xda, 0x02, 0x60]), next: 1704992401 },
),
(
Head { number: 0, timestamp: 1704992401, ..Default::default() },
ForkId { hash: ForkHash([0x3c, 0x28, 0x3c, 0xb3]), next: 1710374401 },
),
(
Head { number: 0, timestamp: 1710374400, ..Default::default() },
ForkId { hash: ForkHash([0x3c, 0x28, 0x3c, 0xb3]), next: 1710374401 },
),
(
Head { number: 0, timestamp: 1710374401, ..Default::default() },
ForkId { hash: ForkHash([0x51, 0xcc, 0x98, 0xb3]), next: 1720627201 },
),
(
Head { number: 0, timestamp: 1720627200, ..Default::default() },
ForkId { hash: ForkHash([0x51, 0xcc, 0x98, 0xb3]), next: 1720627201 },
),
(
Head { number: 0, timestamp: 1720627201, ..Default::default() },
ForkId { hash: ForkHash([0xe4, 0x01, 0x0e, 0xb9]), next: 1726070401 },
),
(
Head { number: 0, timestamp: 1726070401, ..Default::default() },
ForkId { hash: ForkHash([0xbc, 0x38, 0xf9, 0xca]), next: 1736445601 },
),
(
Head { number: 0, timestamp: 1736445601, ..Default::default() },
ForkId { hash: ForkHash([0x3a, 0x2a, 0xf1, 0x83]), next: 1746806401 },
),
// Isthmus
(
Head { number: 0, timestamp: 1746806401, ..Default::default() },
ForkId { hash: ForkHash([0x86, 0x72, 0x8b, 0x4e]), next: 0 }, /* TODO: update timestamp when Jovian is planned */
),
// // Jovian
// (
// Head { number: 0, timestamp: u64::MAX, ..Default::default() }, /* TODO:
// update timestamp when Jovian is planned */ ForkId { hash:
// ForkHash([0xef, 0x0e, 0x58, 0x33]), next: 0 }, ),
],
);
}
#[test]
fn op_sepolia_forkids() {
test_fork_ids(
&OP_SEPOLIA,
&[
(
Head { number: 0, ..Default::default() },
ForkId { hash: ForkHash([0x67, 0xa4, 0x03, 0x28]), next: 1699981200 },
),
(
Head { number: 0, timestamp: 1699981199, ..Default::default() },
ForkId { hash: ForkHash([0x67, 0xa4, 0x03, 0x28]), next: 1699981200 },
),
(
Head { number: 0, timestamp: 1699981200, ..Default::default() },
ForkId { hash: ForkHash([0xa4, 0x8d, 0x6a, 0x00]), next: 1708534800 },
),
(
Head { number: 0, timestamp: 1708534799, ..Default::default() },
ForkId { hash: ForkHash([0xa4, 0x8d, 0x6a, 0x00]), next: 1708534800 },
),
(
Head { number: 0, timestamp: 1708534800, ..Default::default() },
ForkId { hash: ForkHash([0xcc, 0x17, 0xc7, 0xeb]), next: 1716998400 },
),
(
Head { number: 0, timestamp: 1716998399, ..Default::default() },
ForkId { hash: ForkHash([0xcc, 0x17, 0xc7, 0xeb]), next: 1716998400 },
),
(
Head { number: 0, timestamp: 1716998400, ..Default::default() },
ForkId { hash: ForkHash([0x54, 0x0a, 0x8c, 0x5d]), next: 1723478400 },
),
(
Head { number: 0, timestamp: 1723478399, ..Default::default() },
ForkId { hash: ForkHash([0x54, 0x0a, 0x8c, 0x5d]), next: 1723478400 },
),
(
Head { number: 0, timestamp: 1723478400, ..Default::default() },
ForkId { hash: ForkHash([0x75, 0xde, 0xa4, 0x1e]), next: 1732633200 },
),
(
Head { number: 0, timestamp: 1732633200, ..Default::default() },
ForkId { hash: ForkHash([0x4a, 0x1c, 0x79, 0x2e]), next: 1744905600 },
),
// Isthmus
(
Head { number: 0, timestamp: 1744905600, ..Default::default() },
ForkId { hash: ForkHash([0x6c, 0x62, 0x5e, 0xe1]), next: 0 }, /* TODO: update timestamp when Jovian is planned */
),
// // Jovian
// (
// Head { number: 0, timestamp: u64::MAX, ..Default::default() }, /* TODO:
// update timestamp when Jovian is planned */ ForkId { hash:
// ForkHash([0x04, 0x2a, 0x5c, 0x14]), next: 0 }, ),
],
);
}
#[test]
fn op_mainnet_forkids() {
let mut op_mainnet = OpChainSpecBuilder::optimism_mainnet().build();
// for OP mainnet we have to do this because the genesis header can't be properly computed
// from the genesis.json file
op_mainnet.inner.genesis_header.set_hash(OP_MAINNET.genesis_hash());
test_fork_ids(
&op_mainnet,
&[
(
Head { number: 0, ..Default::default() },
ForkId { hash: ForkHash([0xca, 0xf5, 0x17, 0xed]), next: 3950000 },
),
// London
(
Head { number: 105235063, ..Default::default() },
ForkId { hash: ForkHash([0xe3, 0x39, 0x8d, 0x7c]), next: 1704992401 },
),
// Bedrock
(
Head { number: 105235063, ..Default::default() },
ForkId { hash: ForkHash([0xe3, 0x39, 0x8d, 0x7c]), next: 1704992401 },
),
// Shanghai
(
Head { number: 105235063, timestamp: 1704992401, ..Default::default() },
ForkId { hash: ForkHash([0xbd, 0xd4, 0xfd, 0xb2]), next: 1710374401 },
),
// OP activation timestamps
// https://specs.optimism.io/protocol/superchain-upgrades.html#activation-timestamps
// Canyon
(
Head { number: 105235063, timestamp: 1704992401, ..Default::default() },
ForkId { hash: ForkHash([0xbd, 0xd4, 0xfd, 0xb2]), next: 1710374401 },
),
// Ecotone
(
Head { number: 105235063, timestamp: 1710374401, ..Default::default() },
ForkId { hash: ForkHash([0x19, 0xda, 0x4c, 0x52]), next: 1720627201 },
),
// Fjord
(
Head { number: 105235063, timestamp: 1720627201, ..Default::default() },
ForkId { hash: ForkHash([0x49, 0xfb, 0xfe, 0x1e]), next: 1726070401 },
),
// Granite
(
Head { number: 105235063, timestamp: 1726070401, ..Default::default() },
ForkId { hash: ForkHash([0x44, 0x70, 0x4c, 0xde]), next: 1736445601 },
),
// Holocene
(
Head { number: 105235063, timestamp: 1736445601, ..Default::default() },
ForkId { hash: ForkHash([0x2b, 0xd9, 0x3d, 0xc8]), next: 1746806401 },
),
// Isthmus
(
Head { number: 105235063, timestamp: 1746806401, ..Default::default() },
ForkId { hash: ForkHash([0x37, 0xbe, 0x75, 0x8f]), next: 0 }, /* TODO: update timestamp when Jovian is planned */
),
// Jovian
// (
// Head { number: 105235063, timestamp: u64::MAX, ..Default::default() }, /*
// TODO: update timestamp when Jovian is planned */ ForkId {
// hash: ForkHash([0x26, 0xce, 0xa1, 0x75]), next: 0 }, ),
],
);
}
#[test]
fn base_sepolia_forkids() {
test_fork_ids(
&BASE_SEPOLIA,
&[
(
Head { number: 0, ..Default::default() },
ForkId { hash: ForkHash([0xb9, 0x59, 0xb9, 0xf7]), next: 1699981200 },
),
(
Head { number: 0, timestamp: 1699981199, ..Default::default() },
ForkId { hash: ForkHash([0xb9, 0x59, 0xb9, 0xf7]), next: 1699981200 },
),
(
Head { number: 0, timestamp: 1699981200, ..Default::default() },
ForkId { hash: ForkHash([0x60, 0x7c, 0xd5, 0xa1]), next: 1708534800 },
),
(
Head { number: 0, timestamp: 1708534799, ..Default::default() },
ForkId { hash: ForkHash([0x60, 0x7c, 0xd5, 0xa1]), next: 1708534800 },
),
(
Head { number: 0, timestamp: 1708534800, ..Default::default() },
ForkId { hash: ForkHash([0xbe, 0x96, 0x9b, 0x17]), next: 1716998400 },
),
(
Head { number: 0, timestamp: 1716998399, ..Default::default() },
ForkId { hash: ForkHash([0xbe, 0x96, 0x9b, 0x17]), next: 1716998400 },
),
(
Head { number: 0, timestamp: 1716998400, ..Default::default() },
ForkId { hash: ForkHash([0x4e, 0x45, 0x7a, 0x49]), next: 1723478400 },
),
(
Head { number: 0, timestamp: 1723478399, ..Default::default() },
ForkId { hash: ForkHash([0x4e, 0x45, 0x7a, 0x49]), next: 1723478400 },
),
(
Head { number: 0, timestamp: 1723478400, ..Default::default() },
ForkId { hash: ForkHash([0x5e, 0xdf, 0xa3, 0xb6]), next: 1732633200 },
),
(
Head { number: 0, timestamp: 1732633200, ..Default::default() },
ForkId { hash: ForkHash([0x8b, 0x5e, 0x76, 0x29]), next: 1744905600 },
),
// Isthmus
(
Head { number: 0, timestamp: 1744905600, ..Default::default() },
ForkId { hash: ForkHash([0x06, 0x0a, 0x4d, 0x1d]), next: 0 }, /* TODO: update timestamp when Jovian is planned */
),
// // Jovian
// (
// Head { number: 0, timestamp: u64::MAX, ..Default::default() }, /* TODO:
// update timestamp when Jovian is planned */ ForkId { hash:
// ForkHash([0xcd, 0xfd, 0x39, 0x99]), next: 0 }, ),
],
);
}
#[test]
fn base_mainnet_genesis() {
let genesis = BASE_MAINNET.genesis_header();
assert_eq!(
genesis.hash_slow(),
b256!("0xf712aa9241cc24369b143cf6dce85f0902a9731e70d66818a3a5845b296c73dd")
);
let base_fee = BASE_MAINNET.next_block_base_fee(genesis, genesis.timestamp).unwrap();
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | true |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/chainspec/src/basefee.rs | crates/optimism/chainspec/src/basefee.rs | //! Base fee related utilities for Optimism chains.
use alloy_consensus::BlockHeader;
use op_alloy_consensus::{decode_holocene_extra_data, EIP1559ParamError};
use reth_chainspec::{BaseFeeParams, EthChainSpec};
use reth_optimism_forks::OpHardforks;
/// Extracts the Holocene 1599 parameters from the encoded extra data from the parent header.
///
/// Caution: Caller must ensure that holocene is active in the parent header.
///
/// See also [Base fee computation](https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/holocene/exec-engine.md#base-fee-computation)
pub fn decode_holocene_base_fee<H>(
chain_spec: impl EthChainSpec + OpHardforks,
parent: &H,
timestamp: u64,
) -> Result<u64, EIP1559ParamError>
where
H: BlockHeader,
{
let (elasticity, denominator) = decode_holocene_extra_data(parent.extra_data())?;
let base_fee_params = if elasticity == 0 && denominator == 0 {
chain_spec.base_fee_params_at_timestamp(timestamp)
} else {
BaseFeeParams::new(denominator as u128, elasticity as u128)
};
Ok(parent.next_block_base_fee(base_fee_params).unwrap_or_default())
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/chainspec/src/op_sepolia.rs | crates/optimism/chainspec/src/op_sepolia.rs | //! Chain specification for the Optimism Sepolia testnet network.
use crate::{make_op_genesis_header, LazyLock, OpChainSpec};
use alloc::{sync::Arc, vec};
use alloy_chains::{Chain, NamedChain};
use alloy_primitives::{b256, U256};
use reth_chainspec::{BaseFeeParams, BaseFeeParamsKind, ChainSpec, Hardfork};
use reth_ethereum_forks::EthereumHardfork;
use reth_optimism_forks::{OpHardfork, OP_SEPOLIA_HARDFORKS};
use reth_primitives_traits::SealedHeader;
/// The OP Sepolia spec
pub static OP_SEPOLIA: LazyLock<Arc<OpChainSpec>> = LazyLock::new(|| {
let genesis = serde_json::from_str(include_str!("../res/genesis/sepolia_op.json"))
.expect("Can't deserialize OP Sepolia genesis json");
let hardforks = OP_SEPOLIA_HARDFORKS.clone();
OpChainSpec {
inner: ChainSpec {
chain: Chain::from_named(NamedChain::OptimismSepolia),
genesis_header: SealedHeader::new(
make_op_genesis_header(&genesis, &hardforks),
b256!("0x102de6ffb001480cc9b8b548fd05c34cd4f46ae4aa91759393db90ea0409887d"),
),
genesis: genesis.into(),
paris_block_and_final_difficulty: Some((0, U256::from(0))),
hardforks,
base_fee_params: BaseFeeParamsKind::Variable(
vec![
(EthereumHardfork::London.boxed(), BaseFeeParams::optimism_sepolia()),
(OpHardfork::Canyon.boxed(), BaseFeeParams::optimism_sepolia_canyon()),
]
.into(),
),
prune_delete_limit: 10000,
..Default::default()
},
}
.into()
});
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/chainspec/src/constants.rs | crates/optimism/chainspec/src/constants.rs | //! OP stack variation of chain spec constants.
use alloy_primitives::hex;
//------------------------------- BASE MAINNET -------------------------------//
/// Max gas limit on Base: <https://basescan.org/block/17208876>
pub const BASE_MAINNET_MAX_GAS_LIMIT: u64 = 105_000_000;
//------------------------------- BASE SEPOLIA -------------------------------//
/// Max gas limit on Base Sepolia: <https://sepolia.basescan.org/block/12506483>
pub const BASE_SEPOLIA_MAX_GAS_LIMIT: u64 = 45_000_000;
//----------------------------------- DEV ------------------------------------//
/// Dummy system transaction for dev mode
/// OP Mainnet transaction at index 0 in block 124665056.
///
/// <https://optimistic.etherscan.io/tx/0x312e290cf36df704a2217b015d6455396830b0ce678b860ebfcc30f41403d7b1>
pub const TX_SET_L1_BLOCK_OP_MAINNET_BLOCK_124665056: [u8; 251] = hex!(
"7ef8f8a0683079df94aa5b9cf86687d739a60a9b4f0835e520ec4d664e2e415dca17a6df94deaddeaddeaddeaddeaddeaddeaddeaddead00019442000000000000000000000000000000000000158080830f424080b8a4440a5e200000146b000f79c500000000000000040000000066d052e700000000013ad8a3000000000000000000000000000000000000000000000000000000003ef1278700000000000000000000000000000000000000000000000000000000000000012fdf87b89884a61e74b322bbcf60386f543bfae7827725efaaf0ab1de2294a590000000000000000000000006887246668a3b87f54deb3b94ba47a6f63f32985"
);
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/chainspec/src/base_sepolia.rs | crates/optimism/chainspec/src/base_sepolia.rs | //! Chain specification for the Base Sepolia testnet network.
use alloc::{sync::Arc, vec};
use alloy_chains::Chain;
use alloy_primitives::{b256, U256};
use reth_chainspec::{BaseFeeParams, BaseFeeParamsKind, ChainSpec, Hardfork};
use reth_ethereum_forks::EthereumHardfork;
use reth_optimism_forks::{OpHardfork, BASE_SEPOLIA_HARDFORKS};
use reth_primitives_traits::SealedHeader;
use crate::{make_op_genesis_header, LazyLock, OpChainSpec};
/// The Base Sepolia spec
pub static BASE_SEPOLIA: LazyLock<Arc<OpChainSpec>> = LazyLock::new(|| {
let genesis = serde_json::from_str(include_str!("../res/genesis/sepolia_base.json"))
.expect("Can't deserialize Base Sepolia genesis json");
let hardforks = BASE_SEPOLIA_HARDFORKS.clone();
OpChainSpec {
inner: ChainSpec {
chain: Chain::base_sepolia(),
genesis_header: SealedHeader::new(
make_op_genesis_header(&genesis, &hardforks),
b256!("0x0dcc9e089e30b90ddfc55be9a37dd15bc551aeee999d2e2b51414c54eaf934e4"),
),
genesis: genesis.into(),
paris_block_and_final_difficulty: Some((0, U256::from(0))),
hardforks,
base_fee_params: BaseFeeParamsKind::Variable(
vec![
(EthereumHardfork::London.boxed(), BaseFeeParams::base_sepolia()),
(OpHardfork::Canyon.boxed(), BaseFeeParams::base_sepolia_canyon()),
]
.into(),
),
prune_delete_limit: 10000,
..Default::default()
},
}
.into()
});
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/chainspec/src/superchain/chain_metadata.rs | crates/optimism/chainspec/src/superchain/chain_metadata.rs | use alloy_chains::NamedChain;
use alloy_genesis::ChainConfig;
use alloy_primitives::{ChainId, U256};
use serde::{Deserialize, Serialize};
/// The chain metadata stored in a superchain toml config file.
/// Referring here as `ChainMetadata` to avoid confusion with `ChainConfig`.
/// Find configs here: `<https://github.com/ethereum-optimism/superchain-registry/tree/main/superchain/configs>`
/// This struct is stripped down to only include the necessary fields. We use JSON instead of
/// TOML to make it easier to work in a no-std environment.
#[derive(Clone, Debug, Deserialize)]
#[serde(rename_all = "snake_case")]
pub(crate) struct ChainMetadata {
pub chain_id: ChainId,
pub hardforks: HardforkConfig,
pub optimism: Option<OptimismConfig>,
}
#[derive(Clone, Debug, Deserialize)]
#[serde(rename_all = "snake_case")]
pub(crate) struct HardforkConfig {
pub canyon_time: Option<u64>,
pub delta_time: Option<u64>,
pub ecotone_time: Option<u64>,
pub fjord_time: Option<u64>,
pub granite_time: Option<u64>,
pub holocene_time: Option<u64>,
pub isthmus_time: Option<u64>,
pub jovian_time: Option<u64>,
}
#[derive(Clone, Debug, Deserialize)]
#[serde(rename_all = "snake_case")]
pub(crate) struct OptimismConfig {
pub eip1559_elasticity: u64,
pub eip1559_denominator: u64,
pub eip1559_denominator_canyon: Option<u64>,
}
#[derive(Clone, Debug, Serialize)]
#[serde(rename_all = "camelCase")]
pub(crate) struct ChainConfigExtraFields {
#[serde(skip_serializing_if = "Option::is_none")]
pub bedrock_block: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub regolith_time: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub canyon_time: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub delta_time: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub ecotone_time: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub fjord_time: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub granite_time: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub holocene_time: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub isthmus_time: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub jovian_time: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub optimism: Option<ChainConfigExtraFieldsOptimism>,
}
// Helper struct to serialize field for extra fields in ChainConfig
#[derive(Clone, Debug, Serialize)]
#[serde(rename_all = "camelCase")]
pub(crate) struct ChainConfigExtraFieldsOptimism {
pub eip1559_elasticity: u64,
pub eip1559_denominator: u64,
pub eip1559_denominator_canyon: Option<u64>,
}
impl From<&OptimismConfig> for ChainConfigExtraFieldsOptimism {
fn from(value: &OptimismConfig) -> Self {
Self {
eip1559_elasticity: value.eip1559_elasticity,
eip1559_denominator: value.eip1559_denominator,
eip1559_denominator_canyon: value.eip1559_denominator_canyon,
}
}
}
/// Returns a [`ChainConfig`] filled from [`ChainMetadata`] with extra fields and handling
/// special case for Optimism chain.
// Mimic the behavior from https://github.com/ethereum-optimism/op-geth/blob/35e2c852/params/superchain.go#L26
pub(crate) fn to_genesis_chain_config(chain_config: &ChainMetadata) -> ChainConfig {
let mut res = ChainConfig {
chain_id: chain_config.chain_id,
homestead_block: Some(0),
dao_fork_block: None,
dao_fork_support: false,
eip150_block: Some(0),
eip155_block: Some(0),
eip158_block: Some(0),
byzantium_block: Some(0),
constantinople_block: Some(0),
petersburg_block: Some(0),
istanbul_block: Some(0),
muir_glacier_block: Some(0),
berlin_block: Some(0),
london_block: Some(0),
arrow_glacier_block: Some(0),
gray_glacier_block: Some(0),
merge_netsplit_block: Some(0),
shanghai_time: chain_config.hardforks.canyon_time, // Shanghai activates with Canyon
cancun_time: chain_config.hardforks.ecotone_time, // Cancun activates with Ecotone
prague_time: chain_config.hardforks.isthmus_time, // Prague activates with Isthmus
osaka_time: None,
terminal_total_difficulty: Some(U256::ZERO),
terminal_total_difficulty_passed: true,
ethash: None,
clique: None,
..Default::default()
};
// Special case for Optimism chain
if chain_config.chain_id == NamedChain::Optimism as ChainId {
res.berlin_block = Some(3950000);
res.london_block = Some(105235063);
res.arrow_glacier_block = Some(105235063);
res.gray_glacier_block = Some(105235063);
res.merge_netsplit_block = Some(105235063);
}
// Add extra fields for ChainConfig from Genesis
let extra_fields = ChainConfigExtraFields {
bedrock_block: if chain_config.chain_id == NamedChain::Optimism as ChainId {
Some(105235063)
} else {
Some(0)
},
regolith_time: Some(0),
canyon_time: chain_config.hardforks.canyon_time,
delta_time: chain_config.hardforks.delta_time,
ecotone_time: chain_config.hardforks.ecotone_time,
fjord_time: chain_config.hardforks.fjord_time,
granite_time: chain_config.hardforks.granite_time,
holocene_time: chain_config.hardforks.holocene_time,
isthmus_time: chain_config.hardforks.isthmus_time,
jovian_time: chain_config.hardforks.jovian_time,
optimism: chain_config.optimism.as_ref().map(|o| o.into()),
};
res.extra_fields =
serde_json::to_value(extra_fields).unwrap_or_default().try_into().unwrap_or_default();
res
}
#[cfg(test)]
mod tests {
use super::*;
const BASE_CHAIN_METADATA: &str = r#"
{
"chain_id": 8453,
"hardforks": {
"canyon_time": 1704992401,
"delta_time": 1708560000,
"ecotone_time": 1710374401,
"fjord_time": 1720627201,
"granite_time": 1726070401,
"holocene_time": 1736445601,
"isthmus_time": 1746806401
},
"optimism": {
"eip1559_elasticity": 6,
"eip1559_denominator": 50,
"eip1559_denominator_canyon": 250
}
}
"#;
#[test]
fn test_deserialize_chain_config() {
let config: ChainMetadata = serde_json::from_str(BASE_CHAIN_METADATA).unwrap();
assert_eq!(config.chain_id, 8453);
// hardforks
assert_eq!(config.hardforks.canyon_time, Some(1704992401));
assert_eq!(config.hardforks.delta_time, Some(1708560000));
assert_eq!(config.hardforks.ecotone_time, Some(1710374401));
assert_eq!(config.hardforks.fjord_time, Some(1720627201));
assert_eq!(config.hardforks.granite_time, Some(1726070401));
assert_eq!(config.hardforks.holocene_time, Some(1736445601));
assert_eq!(config.hardforks.isthmus_time, Some(1746806401));
// optimism
assert_eq!(config.optimism.as_ref().unwrap().eip1559_elasticity, 6);
assert_eq!(config.optimism.as_ref().unwrap().eip1559_denominator, 50);
assert_eq!(config.optimism.as_ref().unwrap().eip1559_denominator_canyon, Some(250));
}
#[test]
fn test_chain_config_extra_fields() {
let extra_fields = ChainConfigExtraFields {
bedrock_block: Some(105235063),
regolith_time: Some(0),
canyon_time: Some(1704992401),
delta_time: Some(1708560000),
ecotone_time: Some(1710374401),
fjord_time: Some(1720627201),
granite_time: Some(1726070401),
holocene_time: Some(1736445601),
isthmus_time: Some(1746806401),
jovian_time: None,
optimism: Option::from(ChainConfigExtraFieldsOptimism {
eip1559_elasticity: 6,
eip1559_denominator: 50,
eip1559_denominator_canyon: Some(250),
}),
};
let value = serde_json::to_value(extra_fields).unwrap();
assert_eq!(value.get("bedrockBlock").unwrap(), 105235063);
assert_eq!(value.get("regolithTime").unwrap(), 0);
assert_eq!(value.get("canyonTime").unwrap(), 1704992401);
assert_eq!(value.get("deltaTime").unwrap(), 1708560000);
assert_eq!(value.get("ecotoneTime").unwrap(), 1710374401);
assert_eq!(value.get("fjordTime").unwrap(), 1720627201);
assert_eq!(value.get("graniteTime").unwrap(), 1726070401);
assert_eq!(value.get("holoceneTime").unwrap(), 1736445601);
assert_eq!(value.get("isthmusTime").unwrap(), 1746806401);
assert_eq!(value.get("jovianTime"), None);
let optimism = value.get("optimism").unwrap();
assert_eq!(optimism.get("eip1559Elasticity").unwrap(), 6);
assert_eq!(optimism.get("eip1559Denominator").unwrap(), 50);
assert_eq!(optimism.get("eip1559DenominatorCanyon").unwrap(), 250);
}
#[test]
fn test_convert_to_genesis_chain_config() {
let config: ChainMetadata = serde_json::from_str(BASE_CHAIN_METADATA).unwrap();
let chain_config = to_genesis_chain_config(&config);
assert_eq!(chain_config.chain_id, 8453);
assert_eq!(chain_config.homestead_block, Some(0));
assert_eq!(chain_config.dao_fork_block, None);
assert!(!chain_config.dao_fork_support);
assert_eq!(chain_config.eip150_block, Some(0));
assert_eq!(chain_config.eip155_block, Some(0));
assert_eq!(chain_config.eip158_block, Some(0));
assert_eq!(chain_config.byzantium_block, Some(0));
assert_eq!(chain_config.constantinople_block, Some(0));
assert_eq!(chain_config.petersburg_block, Some(0));
assert_eq!(chain_config.istanbul_block, Some(0));
assert_eq!(chain_config.muir_glacier_block, Some(0));
assert_eq!(chain_config.berlin_block, Some(0));
assert_eq!(chain_config.london_block, Some(0));
assert_eq!(chain_config.arrow_glacier_block, Some(0));
assert_eq!(chain_config.gray_glacier_block, Some(0));
assert_eq!(chain_config.merge_netsplit_block, Some(0));
assert_eq!(chain_config.shanghai_time, Some(1704992401));
assert_eq!(chain_config.cancun_time, Some(1710374401));
assert_eq!(chain_config.prague_time, Some(1746806401));
assert_eq!(chain_config.osaka_time, None);
assert_eq!(chain_config.terminal_total_difficulty, Some(U256::ZERO));
assert!(chain_config.terminal_total_difficulty_passed);
assert_eq!(chain_config.ethash, None);
assert_eq!(chain_config.clique, None);
assert_eq!(chain_config.extra_fields.get("bedrockBlock").unwrap(), 0);
assert_eq!(chain_config.extra_fields.get("regolithTime").unwrap(), 0);
assert_eq!(chain_config.extra_fields.get("canyonTime").unwrap(), 1704992401);
assert_eq!(chain_config.extra_fields.get("deltaTime").unwrap(), 1708560000);
assert_eq!(chain_config.extra_fields.get("ecotoneTime").unwrap(), 1710374401);
assert_eq!(chain_config.extra_fields.get("fjordTime").unwrap(), 1720627201);
assert_eq!(chain_config.extra_fields.get("graniteTime").unwrap(), 1726070401);
assert_eq!(chain_config.extra_fields.get("holoceneTime").unwrap(), 1736445601);
assert_eq!(chain_config.extra_fields.get("isthmusTime").unwrap(), 1746806401);
assert_eq!(chain_config.extra_fields.get("jovianTime"), None);
let optimism = chain_config.extra_fields.get("optimism").unwrap();
assert_eq!(optimism.get("eip1559Elasticity").unwrap(), 6);
assert_eq!(optimism.get("eip1559Denominator").unwrap(), 50);
assert_eq!(optimism.get("eip1559DenominatorCanyon").unwrap(), 250);
}
#[test]
fn test_convert_to_genesis_chain_config_op() {
const OP_CHAIN_METADATA: &str = r#"
{
"chain_id": 10,
"hardforks": {
"canyon_time": 1704992401,
"delta_time": 1708560000,
"ecotone_time": 1710374401,
"fjord_time": 1720627201,
"granite_time": 1726070401,
"holocene_time": 1736445601,
"isthmus_time": 1746806401
},
"optimism": {
"eip1559_elasticity": 6,
"eip1559_denominator": 50,
"eip1559_denominator_canyon": 250
}
}
"#;
let config: ChainMetadata = serde_json::from_str(OP_CHAIN_METADATA).unwrap();
assert_eq!(config.hardforks.canyon_time, Some(1704992401));
let chain_config = to_genesis_chain_config(&config);
assert_eq!(chain_config.chain_id, 10);
assert_eq!(chain_config.shanghai_time, Some(1704992401));
assert_eq!(chain_config.cancun_time, Some(1710374401));
assert_eq!(chain_config.prague_time, Some(1746806401));
assert_eq!(chain_config.berlin_block, Some(3950000));
assert_eq!(chain_config.london_block, Some(105235063));
assert_eq!(chain_config.arrow_glacier_block, Some(105235063));
assert_eq!(chain_config.gray_glacier_block, Some(105235063));
assert_eq!(chain_config.merge_netsplit_block, Some(105235063));
assert_eq!(chain_config.extra_fields.get("bedrockBlock").unwrap(), 105235063);
assert_eq!(chain_config.extra_fields.get("regolithTime").unwrap(), 0);
assert_eq!(chain_config.extra_fields.get("canyonTime").unwrap(), 1704992401);
assert_eq!(chain_config.extra_fields.get("deltaTime").unwrap(), 1708560000);
assert_eq!(chain_config.extra_fields.get("ecotoneTime").unwrap(), 1710374401);
assert_eq!(chain_config.extra_fields.get("fjordTime").unwrap(), 1720627201);
assert_eq!(chain_config.extra_fields.get("graniteTime").unwrap(), 1726070401);
assert_eq!(chain_config.extra_fields.get("holoceneTime").unwrap(), 1736445601);
assert_eq!(chain_config.extra_fields.get("isthmusTime").unwrap(), 1746806401);
assert_eq!(chain_config.extra_fields.get("jovianTime"), None);
let optimism = chain_config.extra_fields.get("optimism").unwrap();
assert_eq!(optimism.get("eip1559Elasticity").unwrap(), 6);
assert_eq!(optimism.get("eip1559Denominator").unwrap(), 50);
assert_eq!(optimism.get("eip1559DenominatorCanyon").unwrap(), 250);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/chainspec/src/superchain/chain_spec_macro.rs | crates/optimism/chainspec/src/superchain/chain_spec_macro.rs | /// Create a chain spec for a given superchain and environment.
#[macro_export]
macro_rules! create_chain_spec {
($name:expr, $environment:expr) => {
paste::paste! {
/// The Optimism $name $environment spec
pub static [<$name:upper _ $environment:upper>]: $crate::LazyLock<alloc::sync::Arc<$crate::OpChainSpec>> = $crate::LazyLock::new(|| {
$crate::OpChainSpec::from_genesis($crate::superchain::configs::read_superchain_genesis($name, $environment)
.expect(&alloc::format!("Can't read {}-{} genesis", $name, $environment)))
.into()
});
}
};
}
/// Generates the key string for a given name and environment pair.
#[macro_export]
macro_rules! key_for {
($name:expr, "mainnet") => {
$name
};
($name:expr, $env:expr) => {
concat!($name, "-", $env)
};
}
/// Create chain specs and an enum of every superchain (name, environment) pair.
#[macro_export]
macro_rules! create_superchain_specs {
( $( ($name:expr, $env:expr) ),+ $(,)? ) => {
$(
$crate::create_chain_spec!($name, $env);
)+
paste::paste! {
/// All available superchains as an enum
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
#[allow(non_camel_case_types)]
pub enum Superchain {
$(
#[doc = concat!("Superchain variant for `", $name, "-", $env, "`.")]
[<$name:camel _ $env:camel>],
)+
}
impl Superchain {
/// A slice of every superchain enum variant
pub const ALL: &'static [Self] = &[
$(
Self::[<$name:camel _ $env:camel>],
)+
];
/// Returns the original name
pub const fn name(self) -> &'static str {
match self {
$(
Self::[<$name:camel _ $env:camel>] => $name,
)+
}
}
/// Returns the original environment
pub const fn environment(self) -> &'static str {
match self {
$(
Self::[<$name:camel _ $env:camel>] => $env,
)+
}
}
}
/// All supported superchains, including both older and newer naming,
/// for backwards compatibility
pub const SUPPORTED_CHAINS: &'static [&'static str] = &[
"optimism",
"optimism_sepolia",
"optimism-sepolia",
"base",
"base_sepolia",
"base-sepolia",
$(
$crate::key_for!($name, $env),
)+
"dev",
];
/// Parses the chain into an [`$crate::OpChainSpec`], if recognized.
pub fn generated_chain_value_parser(s: &str) -> Option<alloc::sync::Arc<$crate::OpChainSpec>> {
match s {
"dev" => Some($crate::OP_DEV.clone()),
"optimism" => Some($crate::OP_MAINNET.clone()),
"optimism_sepolia" | "optimism-sepolia" => Some($crate::OP_SEPOLIA.clone()),
"base" => Some($crate::BASE_MAINNET.clone()),
"base_sepolia" | "base-sepolia" => Some($crate::BASE_SEPOLIA.clone()),
$(
$crate::key_for!($name, $env) => Some($crate::[<$name:upper _ $env:upper>].clone()),
)+
_ => None,
}
}
}
};
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/chainspec/src/superchain/mod.rs | crates/optimism/chainspec/src/superchain/mod.rs | //! Support for superchain registry.
mod chain_metadata;
mod chain_spec_macro;
mod chain_specs;
mod configs;
pub use chain_specs::*;
#[cfg(test)]
mod tests {
use super::Superchain;
#[test]
fn round_trip_superchain_enum_name_and_env() {
for &chain in Superchain::ALL {
let name = chain.name();
let env = chain.environment();
assert!(!name.is_empty(), "name() must not be empty");
assert!(!env.is_empty(), "environment() must not be empty");
}
}
#[test]
fn superchain_enum_has_funki_mainnet() {
assert!(
Superchain::ALL.iter().any(|&c| c.name() == "funki" && c.environment() == "mainnet"),
"Expected funki/mainnet in ALL"
);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/chainspec/src/superchain/chain_specs.rs | crates/optimism/chainspec/src/superchain/chain_specs.rs | // Generated by fetch_superchain_config.sh
use crate::create_superchain_specs;
create_superchain_specs!(
("arena-z", "mainnet"),
("arena-z-testnet", "sepolia"),
("automata", "mainnet"),
("base-devnet-0", "sepolia-dev-0"),
("bob", "mainnet"),
("boba", "sepolia"),
("creator-chain-testnet", "sepolia"),
("cyber", "mainnet"),
("cyber", "sepolia"),
("ethernity", "mainnet"),
("ethernity", "sepolia"),
("funki", "mainnet"),
("funki", "sepolia"),
("hashkeychain", "mainnet"),
("ink", "mainnet"),
("ink", "sepolia"),
("lisk", "mainnet"),
("lisk", "sepolia"),
("lyra", "mainnet"),
("metal", "mainnet"),
("metal", "sepolia"),
("mint", "mainnet"),
("mode", "mainnet"),
("mode", "sepolia"),
("oplabs-devnet-0", "sepolia-dev-0"),
("orderly", "mainnet"),
("pivotal", "sepolia"),
("polynomial", "mainnet"),
("race", "mainnet"),
("race", "sepolia"),
("redstone", "mainnet"),
("settlus-mainnet", "mainnet"),
("settlus-sepolia", "sepolia"),
("shape", "mainnet"),
("shape", "sepolia"),
("snax", "mainnet"),
("soneium", "mainnet"),
("soneium-minato", "sepolia"),
("sseed", "mainnet"),
("swan", "mainnet"),
("swell", "mainnet"),
("tbn", "mainnet"),
("tbn", "sepolia"),
("unichain", "mainnet"),
("unichain", "sepolia"),
("worldchain", "mainnet"),
("worldchain", "sepolia"),
("xterio-eth", "mainnet"),
("zora", "mainnet"),
("zora", "sepolia"),
);
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/chainspec/src/superchain/configs.rs | crates/optimism/chainspec/src/superchain/configs.rs | use crate::superchain::chain_metadata::{to_genesis_chain_config, ChainMetadata};
use alloc::{
format,
string::{String, ToString},
vec::Vec,
};
use alloy_genesis::Genesis;
use miniz_oxide::inflate::decompress_to_vec_zlib_with_limit;
use tar_no_std::{CorruptDataError, TarArchiveRef};
/// A genesis file can be up to 10MiB. This is a reasonable limit for the genesis file size.
const MAX_GENESIS_SIZE: usize = 16 * 1024 * 1024; // 16MiB
/// The tar file contains the chain configs and genesis files for all chains.
const SUPER_CHAIN_CONFIGS_TAR_BYTES: &[u8] = include_bytes!("../../res/superchain-configs.tar");
#[derive(Debug, thiserror::Error)]
pub(crate) enum SuperchainConfigError {
#[error("Error reading archive due to corrupt data: {0}")]
CorruptDataError(CorruptDataError),
#[error("Error converting bytes to UTF-8 String: {0}")]
FromUtf8Error(#[from] alloc::string::FromUtf8Error),
#[error("Error reading file: {0}")]
Utf8Error(#[from] core::str::Utf8Error),
#[error("Error deserializing JSON: {0}")]
JsonError(#[from] serde_json::Error),
#[error("File {0} not found in archive")]
FileNotFound(String),
#[error("Error decompressing file: {0}")]
DecompressError(String),
}
/// Reads the [`Genesis`] from the superchain config tar file for a superchain.
/// For example, `read_genesis_from_superchain_config("unichain", "mainnet")`.
pub(crate) fn read_superchain_genesis(
name: &str,
environment: &str,
) -> Result<Genesis, SuperchainConfigError> {
// Open the archive.
let archive = TarArchiveRef::new(SUPER_CHAIN_CONFIGS_TAR_BYTES)
.map_err(SuperchainConfigError::CorruptDataError)?;
// Read and decompress the genesis file.
let compressed_genesis_file =
read_file(&archive, &format!("genesis/{environment}/{name}.json.zz"))?;
let genesis_file =
decompress_to_vec_zlib_with_limit(&compressed_genesis_file, MAX_GENESIS_SIZE)
.map_err(|e| SuperchainConfigError::DecompressError(format!("{e}")))?;
// Load the genesis file.
let mut genesis: Genesis = serde_json::from_slice(&genesis_file)?;
// The "config" field is stripped (see fetch_superchain_config.sh) from the genesis file
// because it is not always populated. For that reason, we read the config from the chain
// metadata file. See: https://github.com/ethereum-optimism/superchain-registry/issues/901
genesis.config =
to_genesis_chain_config(&read_superchain_metadata(name, environment, &archive)?);
Ok(genesis)
}
/// Reads the [`ChainMetadata`] from the superchain config tar file for a superchain.
/// For example, `read_superchain_config("unichain", "mainnet")`.
fn read_superchain_metadata(
name: &str,
environment: &str,
archive: &TarArchiveRef<'_>,
) -> Result<ChainMetadata, SuperchainConfigError> {
let config_file = read_file(archive, &format!("configs/{environment}/{name}.json"))?;
let config_content = String::from_utf8(config_file)?;
let chain_config: ChainMetadata = serde_json::from_str(&config_content)?;
Ok(chain_config)
}
/// Reads a file from the tar archive. The file path is relative to the root of the tar archive.
fn read_file(
archive: &TarArchiveRef<'_>,
file_path: &str,
) -> Result<Vec<u8>, SuperchainConfigError> {
for entry in archive.entries() {
if entry.filename().as_str()? == file_path {
return Ok(entry.data().to_vec())
}
}
Err(SuperchainConfigError::FileNotFound(file_path.to_string()))
}
#[cfg(test)]
mod tests {
use super::*;
use crate::superchain::Superchain;
use reth_optimism_primitives::ADDRESS_L2_TO_L1_MESSAGE_PASSER;
use tar_no_std::TarArchiveRef;
#[test]
fn test_read_superchain_genesis() {
let genesis = read_superchain_genesis("unichain", "mainnet").unwrap();
assert_eq!(genesis.config.chain_id, 130);
assert_eq!(genesis.timestamp, 1730748359);
assert!(genesis.alloc.contains_key(&ADDRESS_L2_TO_L1_MESSAGE_PASSER));
}
#[test]
fn test_read_superchain_genesis_with_workaround() {
let genesis = read_superchain_genesis("funki", "mainnet").unwrap();
assert_eq!(genesis.config.chain_id, 33979);
assert_eq!(genesis.timestamp, 1721211095);
assert!(genesis.alloc.contains_key(&ADDRESS_L2_TO_L1_MESSAGE_PASSER));
}
#[test]
fn test_read_superchain_metadata() {
let archive = TarArchiveRef::new(SUPER_CHAIN_CONFIGS_TAR_BYTES).unwrap();
let chain_config = read_superchain_metadata("funki", "mainnet", &archive).unwrap();
assert_eq!(chain_config.chain_id, 33979);
}
#[test]
fn test_read_all_genesis_files() {
let archive = TarArchiveRef::new(SUPER_CHAIN_CONFIGS_TAR_BYTES).unwrap();
// Check that all genesis files can be read without errors.
for entry in archive.entries() {
let filename = entry
.filename()
.as_str()
.unwrap()
.split('/')
.map(|s| s.to_string())
.collect::<Vec<String>>();
if filename.first().unwrap().ne(&"genesis") {
continue
}
read_superchain_metadata(
&filename.get(2).unwrap().replace(".json.zz", ""),
filename.get(1).unwrap(),
&archive,
)
.unwrap();
}
}
#[test]
fn test_genesis_exists_for_all_available_chains() {
for &chain in Superchain::ALL {
let genesis = read_superchain_genesis(chain.name(), chain.environment());
assert!(
genesis.is_ok(),
"Genesis not found for chain: {}-{}",
chain.name(),
chain.environment()
);
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/payload/src/config.rs | crates/optimism/payload/src/config.rs | //! Additional configuration for the OP builder
use std::sync::{atomic::AtomicU64, Arc};
/// Settings for the OP builder.
#[derive(Debug, Clone, Default)]
pub struct OpBuilderConfig {
/// Data availability configuration for the OP builder.
pub da_config: OpDAConfig,
}
impl OpBuilderConfig {
/// Creates a new OP builder configuration with the given data availability configuration.
pub const fn new(da_config: OpDAConfig) -> Self {
Self { da_config }
}
/// Returns the Data Availability configuration for the OP builder, if it has configured
/// constraints.
pub fn constrained_da_config(&self) -> Option<&OpDAConfig> {
if self.da_config.is_empty() {
None
} else {
Some(&self.da_config)
}
}
}
/// Contains the Data Availability configuration for the OP builder.
///
/// This type is shareable and can be used to update the DA configuration for the OP payload
/// builder.
#[derive(Debug, Clone, Default)]
pub struct OpDAConfig {
inner: Arc<OpDAConfigInner>,
}
impl OpDAConfig {
/// Creates a new Data Availability configuration with the given maximum sizes.
pub fn new(max_da_tx_size: u64, max_da_block_size: u64) -> Self {
let this = Self::default();
this.set_max_da_size(max_da_tx_size, max_da_block_size);
this
}
/// Returns whether the configuration is empty.
pub fn is_empty(&self) -> bool {
self.max_da_tx_size().is_none() && self.max_da_block_size().is_none()
}
/// Returns the max allowed data availability size per transactions, if any.
pub fn max_da_tx_size(&self) -> Option<u64> {
let val = self.inner.max_da_tx_size.load(std::sync::atomic::Ordering::Relaxed);
if val == 0 {
None
} else {
Some(val)
}
}
/// Returns the max allowed data availability size per block, if any.
pub fn max_da_block_size(&self) -> Option<u64> {
let val = self.inner.max_da_block_size.load(std::sync::atomic::Ordering::Relaxed);
if val == 0 {
None
} else {
Some(val)
}
}
/// Sets the maximum data availability size currently allowed for inclusion. 0 means no maximum.
pub fn set_max_da_size(&self, max_da_tx_size: u64, max_da_block_size: u64) {
self.set_max_tx_size(max_da_tx_size);
self.set_max_block_size(max_da_block_size);
}
/// Sets the maximum data availability size per transaction currently allowed for inclusion. 0
/// means no maximum.
pub fn set_max_tx_size(&self, max_da_tx_size: u64) {
self.inner.max_da_tx_size.store(max_da_tx_size, std::sync::atomic::Ordering::Relaxed);
}
/// Sets the maximum data availability size per block currently allowed for inclusion. 0 means
/// no maximum.
pub fn set_max_block_size(&self, max_da_block_size: u64) {
self.inner.max_da_block_size.store(max_da_block_size, std::sync::atomic::Ordering::Relaxed);
}
}
#[derive(Debug, Default)]
struct OpDAConfigInner {
/// Don't include any transactions with data availability size larger than this in any built
/// block
///
/// 0 means no limit.
max_da_tx_size: AtomicU64,
/// Maximum total data availability size for a block
///
/// 0 means no limit.
max_da_block_size: AtomicU64,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_da() {
let da = OpDAConfig::default();
assert_eq!(da.max_da_tx_size(), None);
assert_eq!(da.max_da_block_size(), None);
da.set_max_da_size(100, 200);
assert_eq!(da.max_da_tx_size(), Some(100));
assert_eq!(da.max_da_block_size(), Some(200));
da.set_max_da_size(0, 0);
assert_eq!(da.max_da_tx_size(), None);
assert_eq!(da.max_da_block_size(), None);
}
#[test]
fn test_da_constrained() {
let config = OpBuilderConfig::default();
assert!(config.constrained_da_config().is_none());
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/payload/src/builder.rs | crates/optimism/payload/src/builder.rs | //! Optimism payload builder implementation.
use crate::{
config::{OpBuilderConfig, OpDAConfig},
error::OpPayloadBuilderError,
payload::OpBuiltPayload,
OpAttributes, OpPayloadBuilderAttributes, OpPayloadPrimitives,
};
use alloy_consensus::{BlockHeader, Transaction, Typed2718};
use alloy_primitives::{B256, U256};
use alloy_rpc_types_debug::ExecutionWitness;
use alloy_rpc_types_engine::PayloadId;
use reth_basic_payload_builder::*;
use reth_chain_state::{ExecutedBlock, ExecutedBlockWithTrieUpdates, ExecutedTrieUpdates};
use reth_chainspec::{ChainSpecProvider, EthChainSpec};
use reth_evm::{
execute::{
BlockBuilder, BlockBuilderOutcome, BlockExecutionError, BlockExecutor, BlockValidationError,
},
ConfigureEvm, Database, Evm,
};
use reth_execution_types::ExecutionOutcome;
use reth_optimism_forks::OpHardforks;
use reth_optimism_primitives::{transaction::OpTransaction, ADDRESS_L2_TO_L1_MESSAGE_PASSER};
use reth_optimism_txpool::{
estimated_da_size::DataAvailabilitySized,
interop::{is_valid_interop, MaybeInteropTransaction},
OpPooledTx,
};
use reth_payload_builder_primitives::PayloadBuilderError;
use reth_payload_primitives::{BuildNextEnv, PayloadBuilderAttributes};
use reth_payload_util::{BestPayloadTransactions, NoopPayloadTransactions, PayloadTransactions};
use reth_primitives_traits::{
HeaderTy, NodePrimitives, SealedHeader, SealedHeaderFor, SignedTransaction, TxTy,
};
use reth_revm::{
cancelled::CancelOnDrop, database::StateProviderDatabase, db::State,
witness::ExecutionWitnessRecord,
};
use reth_storage_api::{errors::ProviderError, StateProvider, StateProviderFactory};
use reth_transaction_pool::{BestTransactionsAttributes, PoolTransaction, TransactionPool};
use revm::context::{Block, BlockEnv};
use std::{marker::PhantomData, sync::Arc};
use tracing::{debug, trace, warn};
/// Optimism's payload builder
#[derive(Debug)]
pub struct OpPayloadBuilder<
Pool,
Client,
Evm,
Txs = (),
Attrs = OpPayloadBuilderAttributes<TxTy<<Evm as ConfigureEvm>::Primitives>>,
> {
/// The rollup's compute pending block configuration option.
// TODO(clabby): Implement this feature.
pub compute_pending_block: bool,
/// The type responsible for creating the evm.
pub evm_config: Evm,
/// Transaction pool.
pub pool: Pool,
/// Node client.
pub client: Client,
/// Settings for the builder, e.g. DA settings.
pub config: OpBuilderConfig,
/// The type responsible for yielding the best transactions for the payload if mempool
/// transactions are allowed.
pub best_transactions: Txs,
/// Marker for the payload attributes type.
_pd: PhantomData<Attrs>,
}
impl<Pool, Client, Evm, Txs, Attrs> Clone for OpPayloadBuilder<Pool, Client, Evm, Txs, Attrs>
where
Pool: Clone,
Client: Clone,
Evm: ConfigureEvm,
Txs: Clone,
{
fn clone(&self) -> Self {
Self {
evm_config: self.evm_config.clone(),
pool: self.pool.clone(),
client: self.client.clone(),
config: self.config.clone(),
best_transactions: self.best_transactions.clone(),
compute_pending_block: self.compute_pending_block,
_pd: PhantomData,
}
}
}
impl<Pool, Client, Evm, Attrs> OpPayloadBuilder<Pool, Client, Evm, (), Attrs> {
/// `OpPayloadBuilder` constructor.
///
/// Configures the builder with the default settings.
pub fn new(pool: Pool, client: Client, evm_config: Evm) -> Self {
Self::with_builder_config(pool, client, evm_config, Default::default())
}
/// Configures the builder with the given [`OpBuilderConfig`].
pub const fn with_builder_config(
pool: Pool,
client: Client,
evm_config: Evm,
config: OpBuilderConfig,
) -> Self {
Self {
pool,
client,
compute_pending_block: true,
evm_config,
config,
best_transactions: (),
_pd: PhantomData,
}
}
}
impl<Pool, Client, Evm, Txs, Attrs> OpPayloadBuilder<Pool, Client, Evm, Txs, Attrs> {
/// Sets the rollup's compute pending block configuration option.
pub const fn set_compute_pending_block(mut self, compute_pending_block: bool) -> Self {
self.compute_pending_block = compute_pending_block;
self
}
/// Configures the type responsible for yielding the transactions that should be included in the
/// payload.
pub fn with_transactions<T>(
self,
best_transactions: T,
) -> OpPayloadBuilder<Pool, Client, Evm, T, Attrs> {
let Self { pool, client, compute_pending_block, evm_config, config, .. } = self;
OpPayloadBuilder {
pool,
client,
compute_pending_block,
evm_config,
best_transactions,
config,
_pd: PhantomData,
}
}
/// Enables the rollup's compute pending block configuration option.
pub const fn compute_pending_block(self) -> Self {
self.set_compute_pending_block(true)
}
/// Returns the rollup's compute pending block configuration option.
pub const fn is_compute_pending_block(&self) -> bool {
self.compute_pending_block
}
}
impl<Pool, Client, Evm, N, T, Attrs> OpPayloadBuilder<Pool, Client, Evm, T, Attrs>
where
Pool: TransactionPool<Transaction: OpPooledTx<Consensus = N::SignedTx>>,
Client: StateProviderFactory + ChainSpecProvider<ChainSpec: OpHardforks>,
N: OpPayloadPrimitives,
Evm: ConfigureEvm<
Primitives = N,
NextBlockEnvCtx: BuildNextEnv<Attrs, N::BlockHeader, Client::ChainSpec>,
>,
Attrs: OpAttributes<Transaction = TxTy<Evm::Primitives>>,
{
/// Constructs an Optimism payload from the transactions sent via the
/// Payload attributes by the sequencer. If the `no_tx_pool` argument is passed in
/// the payload attributes, the transaction pool will be ignored and the only transactions
/// included in the payload will be those sent through the attributes.
///
/// Given build arguments including an Optimism client, transaction pool,
/// and configuration, this function creates a transaction payload. Returns
/// a result indicating success with the payload or an error in case of failure.
fn build_payload<'a, Txs>(
&self,
args: BuildArguments<Attrs, OpBuiltPayload<N>>,
best: impl FnOnce(BestTransactionsAttributes) -> Txs + Send + Sync + 'a,
) -> Result<BuildOutcome<OpBuiltPayload<N>>, PayloadBuilderError>
where
Txs:
PayloadTransactions<Transaction: PoolTransaction<Consensus = N::SignedTx> + OpPooledTx>,
{
let BuildArguments { mut cached_reads, config, cancel, best_payload } = args;
let ctx = OpPayloadBuilderCtx {
evm_config: self.evm_config.clone(),
da_config: self.config.da_config.clone(),
chain_spec: self.client.chain_spec(),
config,
cancel,
best_payload,
};
let builder = OpBuilder::new(best);
let state_provider = self.client.state_by_block_hash(ctx.parent().hash())?;
let state = StateProviderDatabase::new(&state_provider);
if ctx.attributes().no_tx_pool() {
builder.build(state, &state_provider, ctx)
} else {
// sequencer mode we can reuse cachedreads from previous runs
builder.build(cached_reads.as_db_mut(state), &state_provider, ctx)
}
.map(|out| out.with_cached_reads(cached_reads))
}
/// Computes the witness for the payload.
pub fn payload_witness(
&self,
parent: SealedHeader<N::BlockHeader>,
attributes: Attrs::RpcPayloadAttributes,
) -> Result<ExecutionWitness, PayloadBuilderError>
where
Attrs: PayloadBuilderAttributes,
{
let attributes =
Attrs::try_new(parent.hash(), attributes, 3).map_err(PayloadBuilderError::other)?;
let config = PayloadConfig { parent_header: Arc::new(parent), attributes };
let ctx = OpPayloadBuilderCtx {
evm_config: self.evm_config.clone(),
da_config: self.config.da_config.clone(),
chain_spec: self.client.chain_spec(),
config,
cancel: Default::default(),
best_payload: Default::default(),
};
let state_provider = self.client.state_by_block_hash(ctx.parent().hash())?;
let builder = OpBuilder::new(|_| NoopPayloadTransactions::<Pool::Transaction>::default());
builder.witness(state_provider, &ctx)
}
}
/// Implementation of the [`PayloadBuilder`] trait for [`OpPayloadBuilder`].
impl<Pool, Client, Evm, N, Txs, Attrs> PayloadBuilder
for OpPayloadBuilder<Pool, Client, Evm, Txs, Attrs>
where
N: OpPayloadPrimitives,
Client: StateProviderFactory + ChainSpecProvider<ChainSpec: OpHardforks> + Clone,
Pool: TransactionPool<Transaction: OpPooledTx<Consensus = N::SignedTx>>,
Evm: ConfigureEvm<
Primitives = N,
NextBlockEnvCtx: BuildNextEnv<Attrs, N::BlockHeader, Client::ChainSpec>,
>,
Txs: OpPayloadTransactions<Pool::Transaction>,
Attrs: OpAttributes<Transaction = N::SignedTx>,
{
type Attributes = Attrs;
type BuiltPayload = OpBuiltPayload<N>;
fn try_build(
&self,
args: BuildArguments<Self::Attributes, Self::BuiltPayload>,
) -> Result<BuildOutcome<Self::BuiltPayload>, PayloadBuilderError> {
let pool = self.pool.clone();
self.build_payload(args, |attrs| self.best_transactions.best_transactions(pool, attrs))
}
fn on_missing_payload(
&self,
_args: BuildArguments<Self::Attributes, Self::BuiltPayload>,
) -> MissingPayloadBehaviour<Self::BuiltPayload> {
// we want to await the job that's already in progress because that should be returned as
// is, there's no benefit in racing another job
MissingPayloadBehaviour::AwaitInProgress
}
// NOTE: this should only be used for testing purposes because this doesn't have access to L1
// system txs, hence on_missing_payload we return [MissingPayloadBehaviour::AwaitInProgress].
fn build_empty_payload(
&self,
config: PayloadConfig<Self::Attributes, N::BlockHeader>,
) -> Result<Self::BuiltPayload, PayloadBuilderError> {
let args = BuildArguments {
config,
cached_reads: Default::default(),
cancel: Default::default(),
best_payload: None,
};
self.build_payload(args, |_| NoopPayloadTransactions::<Pool::Transaction>::default())?
.into_payload()
.ok_or_else(|| PayloadBuilderError::MissingPayload)
}
}
/// The type that builds the payload.
///
/// Payload building for optimism is composed of several steps.
/// The first steps are mandatory and defined by the protocol.
///
/// 1. first all System calls are applied.
/// 2. After canyon the forced deployed `create2deployer` must be loaded
/// 3. all sequencer transactions are executed (part of the payload attributes)
///
/// Depending on whether the node acts as a sequencer and is allowed to include additional
/// transactions (`no_tx_pool == false`):
/// 4. include additional transactions
///
/// And finally
/// 5. build the block: compute all roots (txs, state)
#[derive(derive_more::Debug)]
pub struct OpBuilder<'a, Txs> {
/// Yields the best transaction to include if transactions from the mempool are allowed.
#[debug(skip)]
best: Box<dyn FnOnce(BestTransactionsAttributes) -> Txs + 'a>,
}
impl<'a, Txs> OpBuilder<'a, Txs> {
/// Creates a new [`OpBuilder`].
pub fn new(best: impl FnOnce(BestTransactionsAttributes) -> Txs + Send + Sync + 'a) -> Self {
Self { best: Box::new(best) }
}
}
impl<Txs> OpBuilder<'_, Txs> {
/// Builds the payload on top of the state.
pub fn build<Evm, ChainSpec, N, Attrs>(
self,
db: impl Database<Error = ProviderError>,
state_provider: impl StateProvider,
ctx: OpPayloadBuilderCtx<Evm, ChainSpec, Attrs>,
) -> Result<BuildOutcomeKind<OpBuiltPayload<N>>, PayloadBuilderError>
where
Evm: ConfigureEvm<
Primitives = N,
NextBlockEnvCtx: BuildNextEnv<Attrs, N::BlockHeader, ChainSpec>,
>,
ChainSpec: EthChainSpec + OpHardforks,
N: OpPayloadPrimitives,
Txs:
PayloadTransactions<Transaction: PoolTransaction<Consensus = N::SignedTx> + OpPooledTx>,
Attrs: OpAttributes<Transaction = N::SignedTx>,
{
let Self { best } = self;
debug!(target: "payload_builder", id=%ctx.payload_id(), parent_header = ?ctx.parent().hash(), parent_number = ctx.parent().number(), "building new payload");
let mut db = State::builder().with_database(db).with_bundle_update().build();
let mut builder = ctx.block_builder(&mut db)?;
// 1. apply pre-execution changes
builder.apply_pre_execution_changes().map_err(|err| {
warn!(target: "payload_builder", %err, "failed to apply pre-execution changes");
PayloadBuilderError::Internal(err.into())
})?;
// 2. execute sequencer transactions
let mut info = ctx.execute_sequencer_transactions(&mut builder)?;
// 3. if mem pool transactions are requested we execute them
if !ctx.attributes().no_tx_pool() {
let best_txs = best(ctx.best_transaction_attributes(builder.evm_mut().block()));
if ctx.execute_best_transactions(&mut info, &mut builder, best_txs)?.is_some() {
return Ok(BuildOutcomeKind::Cancelled)
}
// check if the new payload is even more valuable
if !ctx.is_better_payload(info.total_fees) {
// can skip building the block
return Ok(BuildOutcomeKind::Aborted { fees: info.total_fees })
}
}
let BlockBuilderOutcome { execution_result, hashed_state, trie_updates, block } =
builder.finish(state_provider)?;
let sealed_block = Arc::new(block.sealed_block().clone());
debug!(target: "payload_builder", id=%ctx.attributes().payload_id(), sealed_block_header = ?sealed_block.header(), "sealed built block");
let execution_outcome = ExecutionOutcome::new(
db.take_bundle(),
vec![execution_result.receipts],
block.number(),
Vec::new(),
);
// create the executed block data
let executed: ExecutedBlockWithTrieUpdates<N> = ExecutedBlockWithTrieUpdates {
block: ExecutedBlock {
recovered_block: Arc::new(block),
execution_output: Arc::new(execution_outcome),
hashed_state: Arc::new(hashed_state),
},
trie: ExecutedTrieUpdates::Present(Arc::new(trie_updates)),
};
let no_tx_pool = ctx.attributes().no_tx_pool();
let payload =
OpBuiltPayload::new(ctx.payload_id(), sealed_block, info.total_fees, Some(executed));
if no_tx_pool {
// if `no_tx_pool` is set only transactions from the payload attributes will be included
// in the payload. In other words, the payload is deterministic and we can
// freeze it once we've successfully built it.
Ok(BuildOutcomeKind::Freeze(payload))
} else {
Ok(BuildOutcomeKind::Better { payload })
}
}
/// Builds the payload and returns its [`ExecutionWitness`] based on the state after execution.
pub fn witness<Evm, ChainSpec, N, Attrs>(
self,
state_provider: impl StateProvider,
ctx: &OpPayloadBuilderCtx<Evm, ChainSpec, Attrs>,
) -> Result<ExecutionWitness, PayloadBuilderError>
where
Evm: ConfigureEvm<
Primitives = N,
NextBlockEnvCtx: BuildNextEnv<Attrs, N::BlockHeader, ChainSpec>,
>,
ChainSpec: EthChainSpec + OpHardforks,
N: OpPayloadPrimitives,
Txs: PayloadTransactions<Transaction: PoolTransaction<Consensus = N::SignedTx>>,
Attrs: OpAttributes<Transaction = N::SignedTx>,
{
let mut db = State::builder()
.with_database(StateProviderDatabase::new(&state_provider))
.with_bundle_update()
.build();
let mut builder = ctx.block_builder(&mut db)?;
builder.apply_pre_execution_changes()?;
ctx.execute_sequencer_transactions(&mut builder)?;
builder.into_executor().apply_post_execution_changes()?;
if ctx.chain_spec.is_isthmus_active_at_timestamp(ctx.attributes().timestamp()) {
// force load `L2ToL1MessagePasser.sol` so l2 withdrawals root can be computed even if
// no l2 withdrawals in block
_ = db.load_cache_account(ADDRESS_L2_TO_L1_MESSAGE_PASSER)?;
}
let ExecutionWitnessRecord { hashed_state, codes, keys, lowest_block_number: _ } =
ExecutionWitnessRecord::from_executed_state(&db);
let state = state_provider.witness(Default::default(), hashed_state)?;
Ok(ExecutionWitness {
state: state.into_iter().collect(),
codes,
keys,
..Default::default()
})
}
}
/// A type that returns a the [`PayloadTransactions`] that should be included in the pool.
pub trait OpPayloadTransactions<Transaction>: Clone + Send + Sync + Unpin + 'static {
/// Returns an iterator that yields the transaction in the order they should get included in the
/// new payload.
fn best_transactions<Pool: TransactionPool<Transaction = Transaction>>(
&self,
pool: Pool,
attr: BestTransactionsAttributes,
) -> impl PayloadTransactions<Transaction = Transaction>;
}
impl<T: PoolTransaction + MaybeInteropTransaction> OpPayloadTransactions<T> for () {
fn best_transactions<Pool: TransactionPool<Transaction = T>>(
&self,
pool: Pool,
attr: BestTransactionsAttributes,
) -> impl PayloadTransactions<Transaction = T> {
BestPayloadTransactions::new(pool.best_transactions_with_attributes(attr))
}
}
/// Holds the state after execution
#[derive(Debug)]
pub struct ExecutedPayload<N: NodePrimitives> {
/// Tracked execution info
pub info: ExecutionInfo,
/// Withdrawal hash.
pub withdrawals_root: Option<B256>,
/// The transaction receipts.
pub receipts: Vec<N::Receipt>,
/// The block env used during execution.
pub block_env: BlockEnv,
}
/// This acts as the container for executed transactions and its byproducts (receipts, gas used)
#[derive(Default, Debug)]
pub struct ExecutionInfo {
/// All gas used so far
pub cumulative_gas_used: u64,
/// Estimated DA size
pub cumulative_da_bytes_used: u64,
/// Tracks fees from executed mempool transactions
pub total_fees: U256,
}
impl ExecutionInfo {
/// Create a new instance with allocated slots.
pub const fn new() -> Self {
Self { cumulative_gas_used: 0, cumulative_da_bytes_used: 0, total_fees: U256::ZERO }
}
/// Returns true if the transaction would exceed the block limits:
/// - block gas limit: ensures the transaction still fits into the block.
/// - tx DA limit: if configured, ensures the tx does not exceed the maximum allowed DA limit
/// per tx.
/// - block DA limit: if configured, ensures the transaction's DA size does not exceed the
/// maximum allowed DA limit per block.
pub fn is_tx_over_limits(
&self,
tx_da_size: u64,
block_gas_limit: u64,
tx_data_limit: Option<u64>,
block_data_limit: Option<u64>,
tx_gas_limit: u64,
) -> bool {
if tx_data_limit.is_some_and(|da_limit| tx_da_size > da_limit) {
return true;
}
if block_data_limit
.is_some_and(|da_limit| self.cumulative_da_bytes_used + tx_da_size > da_limit)
{
return true;
}
self.cumulative_gas_used + tx_gas_limit > block_gas_limit
}
}
/// Container type that holds all necessities to build a new payload.
#[derive(derive_more::Debug)]
pub struct OpPayloadBuilderCtx<
Evm: ConfigureEvm,
ChainSpec,
Attrs = OpPayloadBuilderAttributes<TxTy<<Evm as ConfigureEvm>::Primitives>>,
> {
/// The type that knows how to perform system calls and configure the evm.
pub evm_config: Evm,
/// The DA config for the payload builder
pub da_config: OpDAConfig,
/// The chainspec
pub chain_spec: Arc<ChainSpec>,
/// How to build the payload.
pub config: PayloadConfig<Attrs, HeaderTy<Evm::Primitives>>,
/// Marker to check whether the job has been cancelled.
pub cancel: CancelOnDrop,
/// The currently best payload.
pub best_payload: Option<OpBuiltPayload<Evm::Primitives>>,
}
impl<Evm, ChainSpec, Attrs> OpPayloadBuilderCtx<Evm, ChainSpec, Attrs>
where
Evm: ConfigureEvm<
Primitives: OpPayloadPrimitives,
NextBlockEnvCtx: BuildNextEnv<Attrs, HeaderTy<Evm::Primitives>, ChainSpec>,
>,
ChainSpec: EthChainSpec + OpHardforks,
Attrs: OpAttributes<Transaction = TxTy<Evm::Primitives>>,
{
/// Returns the parent block the payload will be build on.
pub fn parent(&self) -> &SealedHeaderFor<Evm::Primitives> {
self.config.parent_header.as_ref()
}
/// Returns the builder attributes.
pub const fn attributes(&self) -> &Attrs {
&self.config.attributes
}
/// Returns the current fee settings for transactions from the mempool
pub fn best_transaction_attributes(&self, block_env: &BlockEnv) -> BestTransactionsAttributes {
BestTransactionsAttributes::new(
block_env.basefee,
block_env.blob_gasprice().map(|p| p as u64),
)
}
/// Returns the unique id for this payload job.
pub fn payload_id(&self) -> PayloadId {
self.attributes().payload_id()
}
/// Returns true if the fees are higher than the previous payload.
pub fn is_better_payload(&self, total_fees: U256) -> bool {
is_better_payload(self.best_payload.as_ref(), total_fees)
}
/// Prepares a [`BlockBuilder`] for the next block.
pub fn block_builder<'a, DB: Database>(
&'a self,
db: &'a mut State<DB>,
) -> Result<impl BlockBuilder<Primitives = Evm::Primitives> + 'a, PayloadBuilderError> {
self.evm_config
.builder_for_next_block(
db,
self.parent(),
Evm::NextBlockEnvCtx::build_next_env(
self.attributes(),
self.parent(),
self.chain_spec.as_ref(),
)
.map_err(PayloadBuilderError::other)?,
)
.map_err(PayloadBuilderError::other)
}
/// Executes all sequencer transactions that are included in the payload attributes.
pub fn execute_sequencer_transactions(
&self,
builder: &mut impl BlockBuilder<Primitives = Evm::Primitives>,
) -> Result<ExecutionInfo, PayloadBuilderError> {
let mut info = ExecutionInfo::new();
for sequencer_tx in self.attributes().sequencer_transactions() {
// A sequencer's block should never contain blob transactions.
if sequencer_tx.value().is_eip4844() {
return Err(PayloadBuilderError::other(
OpPayloadBuilderError::BlobTransactionRejected,
))
}
// Convert the transaction to a [RecoveredTx]. This is
// purely for the purposes of utilizing the `evm_config.tx_env`` function.
// Deposit transactions do not have signatures, so if the tx is a deposit, this
// will just pull in its `from` address.
let sequencer_tx = sequencer_tx.value().try_clone_into_recovered().map_err(|_| {
PayloadBuilderError::other(OpPayloadBuilderError::TransactionEcRecoverFailed)
})?;
let gas_used = match builder.execute_transaction(sequencer_tx.clone()) {
Ok(gas_used) => gas_used,
Err(BlockExecutionError::Validation(BlockValidationError::InvalidTx {
error,
..
})) => {
trace!(target: "payload_builder", %error, ?sequencer_tx, "Error in sequencer transaction, skipping.");
continue
}
Err(err) => {
// this is an error that we should treat as fatal for this attempt
return Err(PayloadBuilderError::EvmExecutionError(Box::new(err)))
}
};
// add gas used by the transaction to cumulative gas used, before creating the receipt
info.cumulative_gas_used += gas_used;
}
Ok(info)
}
/// Executes the given best transactions and updates the execution info.
///
/// Returns `Ok(Some(())` if the job was cancelled.
pub fn execute_best_transactions(
&self,
info: &mut ExecutionInfo,
builder: &mut impl BlockBuilder<Primitives = Evm::Primitives>,
mut best_txs: impl PayloadTransactions<
Transaction: PoolTransaction<Consensus = TxTy<Evm::Primitives>> + OpPooledTx,
>,
) -> Result<Option<()>, PayloadBuilderError> {
let block_gas_limit = builder.evm_mut().block().gas_limit;
let block_da_limit = self.da_config.max_da_block_size();
let tx_da_limit = self.da_config.max_da_tx_size();
let base_fee = builder.evm_mut().block().basefee;
while let Some(tx) = best_txs.next(()) {
let interop = tx.interop_deadline();
let tx_da_size = tx.estimated_da_size();
let tx = tx.into_consensus();
if info.is_tx_over_limits(
tx_da_size,
block_gas_limit,
tx_da_limit,
block_da_limit,
tx.gas_limit(),
) {
// we can't fit this transaction into the block, so we need to mark it as
// invalid which also removes all dependent transaction from
// the iterator before we can continue
best_txs.mark_invalid(tx.signer(), tx.nonce());
continue
}
// A sequencer's block should never contain blob or deposit transactions from the pool.
if tx.is_eip4844() || tx.is_deposit() {
best_txs.mark_invalid(tx.signer(), tx.nonce());
continue
}
// We skip invalid cross chain txs, they would be removed on the next block update in
// the maintenance job
if let Some(interop) = interop {
if !is_valid_interop(interop, self.config.attributes.timestamp()) {
best_txs.mark_invalid(tx.signer(), tx.nonce());
continue
}
}
// check if the job was cancelled, if so we can exit early
if self.cancel.is_cancelled() {
return Ok(Some(()))
}
let gas_used = match builder.execute_transaction(tx.clone()) {
Ok(gas_used) => gas_used,
Err(BlockExecutionError::Validation(BlockValidationError::InvalidTx {
error,
..
})) => {
if error.is_nonce_too_low() {
// if the nonce is too low, we can skip this transaction
trace!(target: "payload_builder", %error, ?tx, "skipping nonce too low transaction");
} else {
// if the transaction is invalid, we can skip it and all of its
// descendants
trace!(target: "payload_builder", %error, ?tx, "skipping invalid transaction and its descendants");
best_txs.mark_invalid(tx.signer(), tx.nonce());
}
continue
}
Err(err) => {
// this is an error that we should treat as fatal for this attempt
return Err(PayloadBuilderError::EvmExecutionError(Box::new(err)))
}
};
// add gas used by the transaction to cumulative gas used, before creating the
// receipt
info.cumulative_gas_used += gas_used;
info.cumulative_da_bytes_used += tx_da_size;
// update and add to total fees
let miner_fee = tx
.effective_tip_per_gas(base_fee)
.expect("fee is always valid; execution succeeded");
info.total_fees += U256::from(miner_fee) * U256::from(gas_used);
}
Ok(None)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/payload/src/lib.rs | crates/optimism/payload/src/lib.rs | //! Optimism's payload builder implementation.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![allow(clippy::useless_let_if_seq)]
extern crate alloc;
pub mod builder;
pub use builder::OpPayloadBuilder;
pub mod error;
pub mod payload;
use op_alloy_rpc_types_engine::OpExecutionData;
pub use payload::{
payload_id_optimism, OpBuiltPayload, OpPayloadAttributes, OpPayloadBuilderAttributes,
};
mod traits;
use reth_optimism_primitives::OpPrimitives;
use reth_payload_primitives::{BuiltPayload, PayloadTypes};
use reth_primitives_traits::{Block, NodePrimitives, SealedBlock};
pub use traits::*;
pub mod validator;
pub use validator::OpExecutionPayloadValidator;
pub mod config;
/// ZST that aggregates Optimism [`PayloadTypes`].
#[derive(Debug, Default, Clone, serde::Deserialize, serde::Serialize)]
#[non_exhaustive]
pub struct OpPayloadTypes<N: NodePrimitives = OpPrimitives>(core::marker::PhantomData<N>);
impl<N: NodePrimitives> PayloadTypes for OpPayloadTypes<N>
where
OpBuiltPayload<N>: BuiltPayload,
{
type ExecutionData = OpExecutionData;
type BuiltPayload = OpBuiltPayload<N>;
type PayloadAttributes = OpPayloadAttributes;
type PayloadBuilderAttributes = OpPayloadBuilderAttributes<N::SignedTx>;
fn block_to_payload(
block: SealedBlock<
<<Self::BuiltPayload as BuiltPayload>::Primitives as NodePrimitives>::Block,
>,
) -> Self::ExecutionData {
OpExecutionData::from_block_unchecked(
block.hash(),
&block.into_block().into_ethereum_block(),
)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/payload/src/error.rs | crates/optimism/payload/src/error.rs | //! Error type
/// Optimism specific payload building errors.
#[derive(Debug, thiserror::Error)]
pub enum OpPayloadBuilderError {
/// Thrown when a transaction fails to convert to a
/// [`alloy_consensus::transaction::Recovered`].
#[error("failed to convert deposit transaction to RecoveredTx")]
TransactionEcRecoverFailed,
/// Thrown when the L1 block info could not be parsed from the calldata of the
/// first transaction supplied in the payload attributes.
#[error("failed to parse L1 block info from L1 info tx calldata")]
L1BlockInfoParseFailed,
/// Thrown when a database account could not be loaded.
#[error("failed to load account {0}")]
AccountLoadFailed(alloy_primitives::Address),
/// Thrown when force deploy of create2deployer code fails.
#[error("failed to force create2deployer account code")]
ForceCreate2DeployerFail,
/// Thrown when a blob transaction is included in a sequencer's block.
#[error("blob transaction included in sequencer block")]
BlobTransactionRejected,
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/optimism/payload/src/payload.rs | crates/optimism/payload/src/payload.rs | //! Payload related types
use std::{fmt::Debug, sync::Arc};
use alloy_consensus::{Block, BlockHeader};
use alloy_eips::{
eip1559::BaseFeeParams, eip2718::Decodable2718, eip4895::Withdrawals, eip7685::Requests,
};
use alloy_primitives::{keccak256, Address, Bytes, B256, B64, U256};
use alloy_rlp::Encodable;
use alloy_rpc_types_engine::{
BlobsBundleV1, ExecutionPayloadEnvelopeV2, ExecutionPayloadFieldV2, ExecutionPayloadV1,
ExecutionPayloadV3, PayloadId,
};
use op_alloy_consensus::{encode_holocene_extra_data, EIP1559ParamError};
use op_alloy_rpc_types_engine::{
OpExecutionPayloadEnvelopeV3, OpExecutionPayloadEnvelopeV4, OpExecutionPayloadV4,
};
use reth_chain_state::ExecutedBlockWithTrieUpdates;
use reth_chainspec::EthChainSpec;
use reth_optimism_evm::OpNextBlockEnvAttributes;
use reth_optimism_forks::OpHardforks;
use reth_payload_builder::{EthPayloadBuilderAttributes, PayloadBuilderError};
use reth_payload_primitives::{BuildNextEnv, BuiltPayload, PayloadBuilderAttributes};
use reth_primitives_traits::{
NodePrimitives, SealedBlock, SealedHeader, SignedTransaction, WithEncoded,
};
/// Re-export for use in downstream arguments.
pub use op_alloy_rpc_types_engine::OpPayloadAttributes;
use reth_optimism_primitives::OpPrimitives;
/// Optimism Payload Builder Attributes
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct OpPayloadBuilderAttributes<T> {
/// Inner ethereum payload builder attributes
pub payload_attributes: EthPayloadBuilderAttributes,
/// `NoTxPool` option for the generated payload
pub no_tx_pool: bool,
/// Decoded transactions and the original EIP-2718 encoded bytes as received in the payload
/// attributes.
pub transactions: Vec<WithEncoded<T>>,
/// The gas limit for the generated payload
pub gas_limit: Option<u64>,
/// EIP-1559 parameters for the generated payload
pub eip_1559_params: Option<B64>,
}
impl<T> Default for OpPayloadBuilderAttributes<T> {
fn default() -> Self {
Self {
payload_attributes: Default::default(),
no_tx_pool: Default::default(),
gas_limit: Default::default(),
eip_1559_params: Default::default(),
transactions: Default::default(),
}
}
}
impl<T> OpPayloadBuilderAttributes<T> {
/// Extracts the `eip1559` parameters for the payload.
pub fn get_holocene_extra_data(
&self,
default_base_fee_params: BaseFeeParams,
) -> Result<Bytes, EIP1559ParamError> {
self.eip_1559_params
.map(|params| encode_holocene_extra_data(params, default_base_fee_params))
.ok_or(EIP1559ParamError::NoEIP1559Params)?
}
}
impl<T: Decodable2718 + Send + Sync + Debug + Unpin + 'static> PayloadBuilderAttributes
for OpPayloadBuilderAttributes<T>
{
type RpcPayloadAttributes = OpPayloadAttributes;
type Error = alloy_rlp::Error;
/// Creates a new payload builder for the given parent block and the attributes.
///
/// Derives the unique [`PayloadId`] for the given parent and attributes
fn try_new(
parent: B256,
attributes: OpPayloadAttributes,
version: u8,
) -> Result<Self, Self::Error> {
let id = payload_id_optimism(&parent, &attributes, version);
let transactions = attributes
.transactions
.unwrap_or_default()
.into_iter()
.map(|data| {
let mut buf = data.as_ref();
let tx = Decodable2718::decode_2718(&mut buf).map_err(alloy_rlp::Error::from)?;
if !buf.is_empty() {
return Err(alloy_rlp::Error::UnexpectedLength);
}
Ok(WithEncoded::new(data, tx))
})
.collect::<Result<_, _>>()?;
let payload_attributes = EthPayloadBuilderAttributes {
id,
parent,
timestamp: attributes.payload_attributes.timestamp,
suggested_fee_recipient: attributes.payload_attributes.suggested_fee_recipient,
prev_randao: attributes.payload_attributes.prev_randao,
withdrawals: attributes.payload_attributes.withdrawals.unwrap_or_default().into(),
parent_beacon_block_root: attributes.payload_attributes.parent_beacon_block_root,
};
Ok(Self {
payload_attributes,
no_tx_pool: attributes.no_tx_pool.unwrap_or_default(),
transactions,
gas_limit: attributes.gas_limit,
eip_1559_params: attributes.eip_1559_params,
})
}
fn payload_id(&self) -> PayloadId {
self.payload_attributes.id
}
fn parent(&self) -> B256 {
self.payload_attributes.parent
}
fn timestamp(&self) -> u64 {
self.payload_attributes.timestamp
}
fn parent_beacon_block_root(&self) -> Option<B256> {
self.payload_attributes.parent_beacon_block_root
}
fn suggested_fee_recipient(&self) -> Address {
self.payload_attributes.suggested_fee_recipient
}
fn prev_randao(&self) -> B256 {
self.payload_attributes.prev_randao
}
fn withdrawals(&self) -> &Withdrawals {
&self.payload_attributes.withdrawals
}
}
impl<OpTransactionSigned> From<EthPayloadBuilderAttributes>
for OpPayloadBuilderAttributes<OpTransactionSigned>
{
fn from(value: EthPayloadBuilderAttributes) -> Self {
Self { payload_attributes: value, ..Default::default() }
}
}
/// Contains the built payload.
#[derive(Debug, Clone)]
pub struct OpBuiltPayload<N: NodePrimitives = OpPrimitives> {
/// Identifier of the payload
pub(crate) id: PayloadId,
/// Sealed block
pub(crate) block: Arc<SealedBlock<N::Block>>,
/// Block execution data for the payload, if any.
pub(crate) executed_block: Option<ExecutedBlockWithTrieUpdates<N>>,
/// The fees of the block
pub(crate) fees: U256,
}
// === impl BuiltPayload ===
impl<N: NodePrimitives> OpBuiltPayload<N> {
/// Initializes the payload with the given initial block.
pub const fn new(
id: PayloadId,
block: Arc<SealedBlock<N::Block>>,
fees: U256,
executed_block: Option<ExecutedBlockWithTrieUpdates<N>>,
) -> Self {
Self { id, block, fees, executed_block }
}
/// Returns the identifier of the payload.
pub const fn id(&self) -> PayloadId {
self.id
}
/// Returns the built block(sealed)
pub fn block(&self) -> &SealedBlock<N::Block> {
&self.block
}
/// Fees of the block
pub const fn fees(&self) -> U256 {
self.fees
}
/// Converts the value into [`SealedBlock`].
pub fn into_sealed_block(self) -> SealedBlock<N::Block> {
Arc::unwrap_or_clone(self.block)
}
}
impl<N: NodePrimitives> BuiltPayload for OpBuiltPayload<N> {
type Primitives = N;
fn block(&self) -> &SealedBlock<N::Block> {
self.block()
}
fn fees(&self) -> U256 {
self.fees
}
fn executed_block(&self) -> Option<ExecutedBlockWithTrieUpdates<N>> {
self.executed_block.clone()
}
fn requests(&self) -> Option<Requests> {
None
}
}
// V1 engine_getPayloadV1 response
impl<T, N> From<OpBuiltPayload<N>> for ExecutionPayloadV1
where
T: SignedTransaction,
N: NodePrimitives<Block = Block<T>>,
{
fn from(value: OpBuiltPayload<N>) -> Self {
Self::from_block_unchecked(
value.block().hash(),
&Arc::unwrap_or_clone(value.block).into_block(),
)
}
}
// V2 engine_getPayloadV2 response
impl<T, N> From<OpBuiltPayload<N>> for ExecutionPayloadEnvelopeV2
where
T: SignedTransaction,
N: NodePrimitives<Block = Block<T>>,
{
fn from(value: OpBuiltPayload<N>) -> Self {
let OpBuiltPayload { block, fees, .. } = value;
Self {
block_value: fees,
execution_payload: ExecutionPayloadFieldV2::from_block_unchecked(
block.hash(),
&Arc::unwrap_or_clone(block).into_block(),
),
}
}
}
impl<T, N> From<OpBuiltPayload<N>> for OpExecutionPayloadEnvelopeV3
where
T: SignedTransaction,
N: NodePrimitives<Block = Block<T>>,
{
fn from(value: OpBuiltPayload<N>) -> Self {
let OpBuiltPayload { block, fees, .. } = value;
let parent_beacon_block_root = block.parent_beacon_block_root.unwrap_or_default();
Self {
execution_payload: ExecutionPayloadV3::from_block_unchecked(
block.hash(),
&Arc::unwrap_or_clone(block).into_block(),
),
block_value: fees,
// From the engine API spec:
//
// > Client software **MAY** use any heuristics to decide whether to set
// `shouldOverrideBuilder` flag or not. If client software does not implement any
// heuristic this flag **SHOULD** be set to `false`.
//
// Spec:
// <https://github.com/ethereum/execution-apis/blob/fe8e13c288c592ec154ce25c534e26cb7ce0530d/src/engine/cancun.md#specification-2>
should_override_builder: false,
// No blobs for OP.
blobs_bundle: BlobsBundleV1 { blobs: vec![], commitments: vec![], proofs: vec![] },
parent_beacon_block_root,
}
}
}
impl<T, N> From<OpBuiltPayload<N>> for OpExecutionPayloadEnvelopeV4
where
T: SignedTransaction,
N: NodePrimitives<Block = Block<T>>,
{
fn from(value: OpBuiltPayload<N>) -> Self {
let OpBuiltPayload { block, fees, .. } = value;
let parent_beacon_block_root = block.parent_beacon_block_root.unwrap_or_default();
let l2_withdrawals_root = block.withdrawals_root.unwrap_or_default();
let payload_v3 = ExecutionPayloadV3::from_block_unchecked(
block.hash(),
&Arc::unwrap_or_clone(block).into_block(),
);
Self {
execution_payload: OpExecutionPayloadV4::from_v3_with_withdrawals_root(
payload_v3,
l2_withdrawals_root,
),
block_value: fees,
// From the engine API spec:
//
// > Client software **MAY** use any heuristics to decide whether to set
// `shouldOverrideBuilder` flag or not. If client software does not implement any
// heuristic this flag **SHOULD** be set to `false`.
//
// Spec:
// <https://github.com/ethereum/execution-apis/blob/fe8e13c288c592ec154ce25c534e26cb7ce0530d/src/engine/cancun.md#specification-2>
should_override_builder: false,
// No blobs for OP.
blobs_bundle: BlobsBundleV1 { blobs: vec![], commitments: vec![], proofs: vec![] },
parent_beacon_block_root,
execution_requests: vec![],
}
}
}
/// Generates the payload id for the configured payload from the [`OpPayloadAttributes`].
///
/// Returns an 8-byte identifier by hashing the payload components with sha256 hash.
pub fn payload_id_optimism(
parent: &B256,
attributes: &OpPayloadAttributes,
payload_version: u8,
) -> PayloadId {
use sha2::Digest;
let mut hasher = sha2::Sha256::new();
hasher.update(parent.as_slice());
hasher.update(&attributes.payload_attributes.timestamp.to_be_bytes()[..]);
hasher.update(attributes.payload_attributes.prev_randao.as_slice());
hasher.update(attributes.payload_attributes.suggested_fee_recipient.as_slice());
if let Some(withdrawals) = &attributes.payload_attributes.withdrawals {
let mut buf = Vec::new();
withdrawals.encode(&mut buf);
hasher.update(buf);
}
if let Some(parent_beacon_block) = attributes.payload_attributes.parent_beacon_block_root {
hasher.update(parent_beacon_block);
}
let no_tx_pool = attributes.no_tx_pool.unwrap_or_default();
if no_tx_pool || attributes.transactions.as_ref().is_some_and(|txs| !txs.is_empty()) {
hasher.update([no_tx_pool as u8]);
let txs_len = attributes.transactions.as_ref().map(|txs| txs.len()).unwrap_or_default();
hasher.update(&txs_len.to_be_bytes()[..]);
if let Some(txs) = &attributes.transactions {
for tx in txs {
// we have to just hash the bytes here because otherwise we would need to decode
// the transactions here which really isn't ideal
let tx_hash = keccak256(tx);
// maybe we can try just taking the hash and not decoding
hasher.update(tx_hash)
}
}
}
if let Some(gas_limit) = attributes.gas_limit {
hasher.update(gas_limit.to_be_bytes());
}
if let Some(eip_1559_params) = attributes.eip_1559_params {
hasher.update(eip_1559_params.as_slice());
}
let mut out = hasher.finalize();
out[0] = payload_version;
PayloadId::new(out.as_slice()[..8].try_into().expect("sufficient length"))
}
impl<H, T, ChainSpec> BuildNextEnv<OpPayloadBuilderAttributes<T>, H, ChainSpec>
for OpNextBlockEnvAttributes
where
H: BlockHeader,
T: SignedTransaction,
ChainSpec: EthChainSpec + OpHardforks,
{
fn build_next_env(
attributes: &OpPayloadBuilderAttributes<T>,
parent: &SealedHeader<H>,
chain_spec: &ChainSpec,
) -> Result<Self, PayloadBuilderError> {
let extra_data = if chain_spec.is_holocene_active_at_timestamp(attributes.timestamp()) {
attributes
.get_holocene_extra_data(
chain_spec.base_fee_params_at_timestamp(attributes.timestamp()),
)
.map_err(PayloadBuilderError::other)?
} else {
Default::default()
};
Ok(Self {
timestamp: attributes.timestamp(),
suggested_fee_recipient: attributes.suggested_fee_recipient(),
prev_randao: attributes.prev_randao(),
gas_limit: attributes.gas_limit.unwrap_or_else(|| parent.gas_limit()),
parent_beacon_block_root: attributes.parent_beacon_block_root(),
extra_data,
})
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::OpPayloadAttributes;
use alloy_primitives::{address, b256, bytes, FixedBytes};
use alloy_rpc_types_engine::PayloadAttributes;
use reth_optimism_primitives::OpTransactionSigned;
use reth_payload_primitives::EngineApiMessageVersion;
use std::str::FromStr;
#[test]
fn test_payload_id_parity_op_geth() {
// INFO rollup_boost::server:received fork_choice_updated_v3 from builder and l2_client
// payload_id_builder="0x6ef26ca02318dcf9" payload_id_l2="0x03d2dae446d2a86a"
let expected =
PayloadId::new(FixedBytes::<8>::from_str("0x03d2dae446d2a86a").unwrap().into());
let attrs = OpPayloadAttributes {
payload_attributes: PayloadAttributes {
timestamp: 1728933301,
prev_randao: b256!("0x9158595abbdab2c90635087619aa7042bbebe47642dfab3c9bfb934f6b082765"),
suggested_fee_recipient: address!("0x4200000000000000000000000000000000000011"),
withdrawals: Some([].into()),
parent_beacon_block_root: b256!("0x8fe0193b9bf83cb7e5a08538e494fecc23046aab9a497af3704f4afdae3250ff").into(),
},
transactions: Some([bytes!("7ef8f8a0dc19cfa777d90980e4875d0a548a881baaa3f83f14d1bc0d3038bc329350e54194deaddeaddeaddeaddeaddeaddeaddeaddead00019442000000000000000000000000000000000000158080830f424080b8a4440a5e20000f424000000000000000000000000300000000670d6d890000000000000125000000000000000000000000000000000000000000000000000000000000000700000000000000000000000000000000000000000000000000000000000000014bf9181db6e381d4384bbf69c48b0ee0eed23c6ca26143c6d2544f9d39997a590000000000000000000000007f83d659683caf2767fd3c720981d51f5bc365bc")].into()),
no_tx_pool: None,
gas_limit: Some(30000000),
eip_1559_params: None,
};
// Reth's `PayloadId` should match op-geth's `PayloadId`. This fails
assert_eq!(
expected,
payload_id_optimism(
&b256!("0x3533bf30edaf9505d0810bf475cbe4e5f4b9889904b9845e83efdeab4e92eb1e"),
&attrs,
EngineApiMessageVersion::V3 as u8
)
);
}
#[test]
fn test_get_extra_data_post_holocene() {
let attributes: OpPayloadBuilderAttributes<OpTransactionSigned> =
OpPayloadBuilderAttributes {
eip_1559_params: Some(B64::from_str("0x0000000800000008").unwrap()),
..Default::default()
};
let extra_data = attributes.get_holocene_extra_data(BaseFeeParams::new(80, 60));
assert_eq!(extra_data.unwrap(), Bytes::copy_from_slice(&[0, 0, 0, 0, 8, 0, 0, 0, 8]));
}
#[test]
fn test_get_extra_data_post_holocene_default() {
let attributes: OpPayloadBuilderAttributes<OpTransactionSigned> =
OpPayloadBuilderAttributes { eip_1559_params: Some(B64::ZERO), ..Default::default() };
let extra_data = attributes.get_holocene_extra_data(BaseFeeParams::new(80, 60));
assert_eq!(extra_data.unwrap(), Bytes::copy_from_slice(&[0, 0, 0, 0, 80, 0, 0, 0, 60]));
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.