repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-eth-api/src/ext.rs | crates/rpc/rpc-eth-api/src/ext.rs | //! `eth_` Extension traits.
use alloy_primitives::{Bytes, B256};
use alloy_rpc_types_eth::erc4337::TransactionConditional;
use jsonrpsee::{core::RpcResult, proc_macros::rpc};
/// Extension trait for `eth_` namespace for L2s.
#[cfg_attr(not(feature = "client"), rpc(server, namespace = "eth"))]
#[cfg_attr(feature = "client", rpc(server, client, namespace = "eth"))]
pub trait L2EthApiExt {
/// Sends signed transaction with the given condition.
#[method(name = "sendRawTransactionConditional")]
async fn send_raw_transaction_conditional(
&self,
bytes: Bytes,
condition: TransactionConditional,
) -> RpcResult<B256>;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-eth-api/src/filter.rs | crates/rpc/rpc-eth-api/src/filter.rs | //! `eth_` RPC API for filtering.
use alloy_json_rpc::RpcObject;
use alloy_rpc_types_eth::{Filter, FilterChanges, FilterId, Log, PendingTransactionFilterKind};
use jsonrpsee::{core::RpcResult, proc_macros::rpc};
use std::future::Future;
/// Rpc Interface for poll-based ethereum filter API.
#[cfg_attr(not(feature = "client"), rpc(server, namespace = "eth"))]
#[cfg_attr(feature = "client", rpc(server, client, namespace = "eth"))]
pub trait EthFilterApi<T: RpcObject> {
/// Creates a new filter and returns its id.
#[method(name = "newFilter")]
async fn new_filter(&self, filter: Filter) -> RpcResult<FilterId>;
/// Creates a new block filter and returns its id.
#[method(name = "newBlockFilter")]
async fn new_block_filter(&self) -> RpcResult<FilterId>;
/// Creates a pending transaction filter and returns its id.
#[method(name = "newPendingTransactionFilter")]
async fn new_pending_transaction_filter(
&self,
kind: Option<PendingTransactionFilterKind>,
) -> RpcResult<FilterId>;
/// Returns all filter changes since last poll.
#[method(name = "getFilterChanges")]
async fn filter_changes(&self, id: FilterId) -> RpcResult<FilterChanges<T>>;
/// Returns all logs matching given filter (in a range 'from' - 'to').
#[method(name = "getFilterLogs")]
async fn filter_logs(&self, id: FilterId) -> RpcResult<Vec<Log>>;
/// Uninstalls filter.
#[method(name = "uninstallFilter")]
async fn uninstall_filter(&self, id: FilterId) -> RpcResult<bool>;
/// Returns logs matching given filter object.
#[method(name = "getLogs")]
async fn logs(&self, filter: Filter) -> RpcResult<Vec<Log>>;
}
/// Limits for logs queries
#[derive(Default, Debug, Clone, Copy)]
pub struct QueryLimits {
/// Maximum number of blocks that could be scanned per filter
pub max_blocks_per_filter: Option<u64>,
/// Maximum number of logs that can be returned in a response
pub max_logs_per_response: Option<usize>,
}
impl QueryLimits {
/// Construct an object with no limits (more explicit than using default constructor)
pub fn no_limits() -> Self {
Default::default()
}
}
/// Rpc Interface for poll-based ethereum filter API, implementing only the `eth_getLogs` method.
/// Used for the engine API, with possibility to specify [`QueryLimits`].
pub trait EngineEthFilter: Send + Sync + 'static {
/// Returns logs matching given filter object.
fn logs(
&self,
filter: Filter,
limits: QueryLimits,
) -> impl Future<Output = RpcResult<Vec<Log>>> + Send;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-eth-api/src/core.rs | crates/rpc/rpc-eth-api/src/core.rs | //! Implementation of the [`jsonrpsee`] generated [`EthApiServer`] trait. Handles RPC requests for
//! the `eth_` namespace.
use crate::{
helpers::{EthApiSpec, EthBlocks, EthCall, EthFees, EthState, EthTransactions, FullEthApi},
RpcBlock, RpcHeader, RpcReceipt, RpcTransaction,
};
use alloy_dyn_abi::TypedData;
use alloy_eips::{eip2930::AccessListResult, BlockId, BlockNumberOrTag};
use alloy_json_rpc::RpcObject;
use alloy_primitives::{Address, Bytes, B256, B64, U256, U64};
use alloy_rpc_types_eth::{
simulate::{SimulatePayload, SimulatedBlock},
state::{EvmOverrides, StateOverride},
BlockOverrides, Bundle, EIP1186AccountProofResponse, EthCallResponse, FeeHistory, Index,
StateContext, SyncStatus, Work,
};
use alloy_serde::JsonStorageKey;
use jsonrpsee::{core::RpcResult, proc_macros::rpc};
use reth_rpc_convert::RpcTxReq;
use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult};
use tracing::trace;
/// Helper trait, unifies functionality that must be supported to implement all RPC methods for
/// server.
pub trait FullEthApiServer:
EthApiServer<
RpcTxReq<Self::NetworkTypes>,
RpcTransaction<Self::NetworkTypes>,
RpcBlock<Self::NetworkTypes>,
RpcReceipt<Self::NetworkTypes>,
RpcHeader<Self::NetworkTypes>,
> + FullEthApi
+ Clone
{
}
impl<T> FullEthApiServer for T where
T: EthApiServer<
RpcTxReq<T::NetworkTypes>,
RpcTransaction<T::NetworkTypes>,
RpcBlock<T::NetworkTypes>,
RpcReceipt<T::NetworkTypes>,
RpcHeader<T::NetworkTypes>,
> + FullEthApi
+ Clone
{
}
/// Eth rpc interface: <https://ethereum.github.io/execution-apis/api-documentation>
#[cfg_attr(not(feature = "client"), rpc(server, namespace = "eth"))]
#[cfg_attr(feature = "client", rpc(server, client, namespace = "eth"))]
pub trait EthApi<TxReq: RpcObject, T: RpcObject, B: RpcObject, R: RpcObject, H: RpcObject> {
/// Returns the protocol version encoded as a string.
#[method(name = "protocolVersion")]
async fn protocol_version(&self) -> RpcResult<U64>;
/// Returns an object with data about the sync status or false.
#[method(name = "syncing")]
fn syncing(&self) -> RpcResult<SyncStatus>;
/// Returns the client coinbase address.
#[method(name = "coinbase")]
async fn author(&self) -> RpcResult<Address>;
/// Returns a list of addresses owned by client.
#[method(name = "accounts")]
fn accounts(&self) -> RpcResult<Vec<Address>>;
/// Returns the number of most recent block.
#[method(name = "blockNumber")]
fn block_number(&self) -> RpcResult<U256>;
/// Returns the chain ID of the current network.
#[method(name = "chainId")]
async fn chain_id(&self) -> RpcResult<Option<U64>>;
/// Returns information about a block by hash.
#[method(name = "getBlockByHash")]
async fn block_by_hash(&self, hash: B256, full: bool) -> RpcResult<Option<B>>;
/// Returns information about a block by number.
#[method(name = "getBlockByNumber")]
async fn block_by_number(&self, number: BlockNumberOrTag, full: bool) -> RpcResult<Option<B>>;
/// Returns the number of transactions in a block from a block matching the given block hash.
#[method(name = "getBlockTransactionCountByHash")]
async fn block_transaction_count_by_hash(&self, hash: B256) -> RpcResult<Option<U256>>;
/// Returns the number of transactions in a block matching the given block number.
#[method(name = "getBlockTransactionCountByNumber")]
async fn block_transaction_count_by_number(
&self,
number: BlockNumberOrTag,
) -> RpcResult<Option<U256>>;
/// Returns the number of uncles in a block from a block matching the given block hash.
#[method(name = "getUncleCountByBlockHash")]
async fn block_uncles_count_by_hash(&self, hash: B256) -> RpcResult<Option<U256>>;
/// Returns the number of uncles in a block with given block number.
#[method(name = "getUncleCountByBlockNumber")]
async fn block_uncles_count_by_number(
&self,
number: BlockNumberOrTag,
) -> RpcResult<Option<U256>>;
/// Returns all transaction receipts for a given block.
#[method(name = "getBlockReceipts")]
async fn block_receipts(&self, block_id: BlockId) -> RpcResult<Option<Vec<R>>>;
/// Returns an uncle block of the given block and index.
#[method(name = "getUncleByBlockHashAndIndex")]
async fn uncle_by_block_hash_and_index(&self, hash: B256, index: Index)
-> RpcResult<Option<B>>;
/// Returns an uncle block of the given block and index.
#[method(name = "getUncleByBlockNumberAndIndex")]
async fn uncle_by_block_number_and_index(
&self,
number: BlockNumberOrTag,
index: Index,
) -> RpcResult<Option<B>>;
/// Returns the EIP-2718 encoded transaction if it exists.
///
/// If this is a EIP-4844 transaction that is in the pool it will include the sidecar.
#[method(name = "getRawTransactionByHash")]
async fn raw_transaction_by_hash(&self, hash: B256) -> RpcResult<Option<Bytes>>;
/// Returns the information about a transaction requested by transaction hash.
#[method(name = "getTransactionByHash")]
async fn transaction_by_hash(&self, hash: B256) -> RpcResult<Option<T>>;
/// Returns information about a raw transaction by block hash and transaction index position.
#[method(name = "getRawTransactionByBlockHashAndIndex")]
async fn raw_transaction_by_block_hash_and_index(
&self,
hash: B256,
index: Index,
) -> RpcResult<Option<Bytes>>;
/// Returns information about a transaction by block hash and transaction index position.
#[method(name = "getTransactionByBlockHashAndIndex")]
async fn transaction_by_block_hash_and_index(
&self,
hash: B256,
index: Index,
) -> RpcResult<Option<T>>;
/// Returns information about a raw transaction by block number and transaction index
/// position.
#[method(name = "getRawTransactionByBlockNumberAndIndex")]
async fn raw_transaction_by_block_number_and_index(
&self,
number: BlockNumberOrTag,
index: Index,
) -> RpcResult<Option<Bytes>>;
/// Returns information about a transaction by block number and transaction index position.
#[method(name = "getTransactionByBlockNumberAndIndex")]
async fn transaction_by_block_number_and_index(
&self,
number: BlockNumberOrTag,
index: Index,
) -> RpcResult<Option<T>>;
/// Returns information about a transaction by sender and nonce.
#[method(name = "getTransactionBySenderAndNonce")]
async fn transaction_by_sender_and_nonce(
&self,
address: Address,
nonce: U64,
) -> RpcResult<Option<T>>;
/// Returns the receipt of a transaction by transaction hash.
#[method(name = "getTransactionReceipt")]
async fn transaction_receipt(&self, hash: B256) -> RpcResult<Option<R>>;
/// Returns the balance of the account of given address.
#[method(name = "getBalance")]
async fn balance(&self, address: Address, block_number: Option<BlockId>) -> RpcResult<U256>;
/// Returns the value from a storage position at a given address
#[method(name = "getStorageAt")]
async fn storage_at(
&self,
address: Address,
index: JsonStorageKey,
block_number: Option<BlockId>,
) -> RpcResult<B256>;
/// Returns the number of transactions sent from an address at given block number.
#[method(name = "getTransactionCount")]
async fn transaction_count(
&self,
address: Address,
block_number: Option<BlockId>,
) -> RpcResult<U256>;
/// Returns code at a given address at given block number.
#[method(name = "getCode")]
async fn get_code(&self, address: Address, block_number: Option<BlockId>) -> RpcResult<Bytes>;
/// Returns the block's header at given number.
#[method(name = "getHeaderByNumber")]
async fn header_by_number(&self, hash: BlockNumberOrTag) -> RpcResult<Option<H>>;
/// Returns the block's header at given hash.
#[method(name = "getHeaderByHash")]
async fn header_by_hash(&self, hash: B256) -> RpcResult<Option<H>>;
/// `eth_simulateV1` executes an arbitrary number of transactions on top of the requested state.
/// The transactions are packed into individual blocks. Overrides can be provided.
#[method(name = "simulateV1")]
async fn simulate_v1(
&self,
opts: SimulatePayload<TxReq>,
block_number: Option<BlockId>,
) -> RpcResult<Vec<SimulatedBlock<B>>>;
/// Executes a new message call immediately without creating a transaction on the block chain.
#[method(name = "call")]
async fn call(
&self,
request: TxReq,
block_number: Option<BlockId>,
state_overrides: Option<StateOverride>,
block_overrides: Option<Box<BlockOverrides>>,
) -> RpcResult<Bytes>;
/// Simulate arbitrary number of transactions at an arbitrary blockchain index, with the
/// optionality of state overrides
#[method(name = "callMany")]
async fn call_many(
&self,
bundles: Vec<Bundle<TxReq>>,
state_context: Option<StateContext>,
state_override: Option<StateOverride>,
) -> RpcResult<Vec<Vec<EthCallResponse>>>;
/// Generates an access list for a transaction.
///
/// This method creates an [EIP2930](https://eips.ethereum.org/EIPS/eip-2930) type accessList based on a given Transaction.
///
/// An access list contains all storage slots and addresses touched by the transaction, except
/// for the sender account and the chain's precompiles.
///
/// It returns list of addresses and storage keys used by the transaction, plus the gas
/// consumed when the access list is added. That is, it gives you the list of addresses and
/// storage keys that will be used by that transaction, plus the gas consumed if the access
/// list is included. Like `eth_estimateGas`, this is an estimation; the list could change
/// when the transaction is actually mined. Adding an accessList to your transaction does
/// not necessary result in lower gas usage compared to a transaction without an access
/// list.
#[method(name = "createAccessList")]
async fn create_access_list(
&self,
request: TxReq,
block_number: Option<BlockId>,
state_override: Option<StateOverride>,
) -> RpcResult<AccessListResult>;
/// Generates and returns an estimate of how much gas is necessary to allow the transaction to
/// complete.
#[method(name = "estimateGas")]
async fn estimate_gas(
&self,
request: TxReq,
block_number: Option<BlockId>,
state_override: Option<StateOverride>,
) -> RpcResult<U256>;
/// Returns the current price per gas in wei.
#[method(name = "gasPrice")]
async fn gas_price(&self) -> RpcResult<U256>;
/// Returns the account details by specifying an address and a block number/tag
#[method(name = "getAccount")]
async fn get_account(
&self,
address: Address,
block: BlockId,
) -> RpcResult<Option<alloy_rpc_types_eth::Account>>;
/// Introduced in EIP-1559, returns suggestion for the priority for dynamic fee transactions.
#[method(name = "maxPriorityFeePerGas")]
async fn max_priority_fee_per_gas(&self) -> RpcResult<U256>;
/// Introduced in EIP-4844, returns the current blob base fee in wei.
#[method(name = "blobBaseFee")]
async fn blob_base_fee(&self) -> RpcResult<U256>;
/// Returns the Transaction fee history
///
/// Introduced in EIP-1559 for getting information on the appropriate priority fee to use.
///
/// Returns transaction base fee per gas and effective priority fee per gas for the
/// requested/supported block range. The returned Fee history for the returned block range
/// can be a subsection of the requested range if not all blocks are available.
#[method(name = "feeHistory")]
async fn fee_history(
&self,
block_count: U64,
newest_block: BlockNumberOrTag,
reward_percentiles: Option<Vec<f64>>,
) -> RpcResult<FeeHistory>;
/// Returns whether the client is actively mining new blocks.
#[method(name = "mining")]
async fn is_mining(&self) -> RpcResult<bool>;
/// Returns the number of hashes per second that the node is mining with.
#[method(name = "hashrate")]
async fn hashrate(&self) -> RpcResult<U256>;
/// Returns the hash of the current block, the seedHash, and the boundary condition to be met
/// (`target`)
#[method(name = "getWork")]
async fn get_work(&self) -> RpcResult<Work>;
/// Used for submitting mining hashrate.
///
/// Can be used for remote miners to submit their hash rate.
/// It accepts the miner hash rate and an identifier which must be unique between nodes.
/// Returns `true` if the block was successfully submitted, `false` otherwise.
#[method(name = "submitHashrate")]
async fn submit_hashrate(&self, hashrate: U256, id: B256) -> RpcResult<bool>;
/// Used for submitting a proof-of-work solution.
#[method(name = "submitWork")]
async fn submit_work(&self, nonce: B64, pow_hash: B256, mix_digest: B256) -> RpcResult<bool>;
/// Sends transaction; will block waiting for signer to return the
/// transaction hash.
#[method(name = "sendTransaction")]
async fn send_transaction(&self, request: TxReq) -> RpcResult<B256>;
/// Sends signed transaction, returning its hash.
#[method(name = "sendRawTransaction")]
async fn send_raw_transaction(&self, bytes: Bytes) -> RpcResult<B256>;
/// Sends a signed transaction and awaits the transaction receipt.
///
/// This will return a timeout error if the transaction isn't included within some time period.
#[method(name = "sendRawTransactionSync")]
async fn send_raw_transaction_sync(&self, bytes: Bytes) -> RpcResult<R>;
/// Returns an Ethereum specific signature with: sign(keccak256("\x19Ethereum Signed Message:\n"
/// + len(message) + message))).
#[method(name = "sign")]
async fn sign(&self, address: Address, message: Bytes) -> RpcResult<Bytes>;
/// Signs a transaction that can be submitted to the network at a later time using with
/// `sendRawTransaction.`
#[method(name = "signTransaction")]
async fn sign_transaction(&self, transaction: TxReq) -> RpcResult<Bytes>;
/// Signs data via [EIP-712](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-712.md).
#[method(name = "signTypedData")]
async fn sign_typed_data(&self, address: Address, data: TypedData) -> RpcResult<Bytes>;
/// Returns the account and storage values of the specified account including the Merkle-proof.
/// This call can be used to verify that the data you are pulling from is not tampered with.
#[method(name = "getProof")]
async fn get_proof(
&self,
address: Address,
keys: Vec<JsonStorageKey>,
block_number: Option<BlockId>,
) -> RpcResult<EIP1186AccountProofResponse>;
/// Returns the account's balance, nonce, and code.
///
/// This is similar to `eth_getAccount` but does not return the storage root.
#[method(name = "getAccountInfo")]
async fn get_account_info(
&self,
address: Address,
block: BlockId,
) -> RpcResult<alloy_rpc_types_eth::AccountInfo>;
}
#[async_trait::async_trait]
impl<T>
EthApiServer<
RpcTxReq<T::NetworkTypes>,
RpcTransaction<T::NetworkTypes>,
RpcBlock<T::NetworkTypes>,
RpcReceipt<T::NetworkTypes>,
RpcHeader<T::NetworkTypes>,
> for T
where
T: FullEthApi,
jsonrpsee_types::error::ErrorObject<'static>: From<T::Error>,
{
/// Handler for: `eth_protocolVersion`
async fn protocol_version(&self) -> RpcResult<U64> {
trace!(target: "rpc::eth", "Serving eth_protocolVersion");
EthApiSpec::protocol_version(self).await.to_rpc_result()
}
/// Handler for: `eth_syncing`
fn syncing(&self) -> RpcResult<SyncStatus> {
trace!(target: "rpc::eth", "Serving eth_syncing");
EthApiSpec::sync_status(self).to_rpc_result()
}
/// Handler for: `eth_coinbase`
async fn author(&self) -> RpcResult<Address> {
Err(internal_rpc_err("unimplemented"))
}
/// Handler for: `eth_accounts`
fn accounts(&self) -> RpcResult<Vec<Address>> {
trace!(target: "rpc::eth", "Serving eth_accounts");
Ok(EthApiSpec::accounts(self))
}
/// Handler for: `eth_blockNumber`
fn block_number(&self) -> RpcResult<U256> {
trace!(target: "rpc::eth", "Serving eth_blockNumber");
Ok(U256::from(
EthApiSpec::chain_info(self).with_message("failed to read chain info")?.best_number,
))
}
/// Handler for: `eth_chainId`
async fn chain_id(&self) -> RpcResult<Option<U64>> {
trace!(target: "rpc::eth", "Serving eth_chainId");
Ok(Some(EthApiSpec::chain_id(self)))
}
/// Handler for: `eth_getBlockByHash`
async fn block_by_hash(
&self,
hash: B256,
full: bool,
) -> RpcResult<Option<RpcBlock<T::NetworkTypes>>> {
trace!(target: "rpc::eth", ?hash, ?full, "Serving eth_getBlockByHash");
Ok(EthBlocks::rpc_block(self, hash.into(), full).await?)
}
/// Handler for: `eth_getBlockByNumber`
async fn block_by_number(
&self,
number: BlockNumberOrTag,
full: bool,
) -> RpcResult<Option<RpcBlock<T::NetworkTypes>>> {
trace!(target: "rpc::eth", ?number, ?full, "Serving eth_getBlockByNumber");
Ok(EthBlocks::rpc_block(self, number.into(), full).await?)
}
/// Handler for: `eth_getBlockTransactionCountByHash`
async fn block_transaction_count_by_hash(&self, hash: B256) -> RpcResult<Option<U256>> {
trace!(target: "rpc::eth", ?hash, "Serving eth_getBlockTransactionCountByHash");
Ok(EthBlocks::block_transaction_count(self, hash.into()).await?.map(U256::from))
}
/// Handler for: `eth_getBlockTransactionCountByNumber`
async fn block_transaction_count_by_number(
&self,
number: BlockNumberOrTag,
) -> RpcResult<Option<U256>> {
trace!(target: "rpc::eth", ?number, "Serving eth_getBlockTransactionCountByNumber");
Ok(EthBlocks::block_transaction_count(self, number.into()).await?.map(U256::from))
}
/// Handler for: `eth_getUncleCountByBlockHash`
async fn block_uncles_count_by_hash(&self, hash: B256) -> RpcResult<Option<U256>> {
trace!(target: "rpc::eth", ?hash, "Serving eth_getUncleCountByBlockHash");
if let Some(block) = self.block_by_hash(hash, false).await? {
Ok(Some(U256::from(block.uncles.len())))
} else {
Ok(None)
}
}
/// Handler for: `eth_getUncleCountByBlockNumber`
async fn block_uncles_count_by_number(
&self,
number: BlockNumberOrTag,
) -> RpcResult<Option<U256>> {
trace!(target: "rpc::eth", ?number, "Serving eth_getUncleCountByBlockNumber");
if let Some(block) = self.block_by_number(number, false).await? {
Ok(Some(U256::from(block.uncles.len())))
} else {
Ok(None)
}
}
/// Handler for: `eth_getBlockReceipts`
async fn block_receipts(
&self,
block_id: BlockId,
) -> RpcResult<Option<Vec<RpcReceipt<T::NetworkTypes>>>> {
trace!(target: "rpc::eth", ?block_id, "Serving eth_getBlockReceipts");
Ok(EthBlocks::block_receipts(self, block_id).await?)
}
/// Handler for: `eth_getUncleByBlockHashAndIndex`
async fn uncle_by_block_hash_and_index(
&self,
hash: B256,
index: Index,
) -> RpcResult<Option<RpcBlock<T::NetworkTypes>>> {
trace!(target: "rpc::eth", ?hash, ?index, "Serving eth_getUncleByBlockHashAndIndex");
Ok(EthBlocks::ommer_by_block_and_index(self, hash.into(), index).await?)
}
/// Handler for: `eth_getUncleByBlockNumberAndIndex`
async fn uncle_by_block_number_and_index(
&self,
number: BlockNumberOrTag,
index: Index,
) -> RpcResult<Option<RpcBlock<T::NetworkTypes>>> {
trace!(target: "rpc::eth", ?number, ?index, "Serving eth_getUncleByBlockNumberAndIndex");
Ok(EthBlocks::ommer_by_block_and_index(self, number.into(), index).await?)
}
/// Handler for: `eth_getRawTransactionByHash`
async fn raw_transaction_by_hash(&self, hash: B256) -> RpcResult<Option<Bytes>> {
trace!(target: "rpc::eth", ?hash, "Serving eth_getRawTransactionByHash");
Ok(EthTransactions::raw_transaction_by_hash(self, hash).await?)
}
/// Handler for: `eth_getTransactionByHash`
async fn transaction_by_hash(
&self,
hash: B256,
) -> RpcResult<Option<RpcTransaction<T::NetworkTypes>>> {
trace!(target: "rpc::eth", ?hash, "Serving eth_getTransactionByHash");
Ok(EthTransactions::transaction_by_hash(self, hash)
.await?
.map(|tx| tx.into_transaction(self.tx_resp_builder()))
.transpose()?)
}
/// Handler for: `eth_getRawTransactionByBlockHashAndIndex`
async fn raw_transaction_by_block_hash_and_index(
&self,
hash: B256,
index: Index,
) -> RpcResult<Option<Bytes>> {
trace!(target: "rpc::eth", ?hash, ?index, "Serving eth_getRawTransactionByBlockHashAndIndex");
Ok(EthTransactions::raw_transaction_by_block_and_tx_index(self, hash.into(), index.into())
.await?)
}
/// Handler for: `eth_getTransactionByBlockHashAndIndex`
async fn transaction_by_block_hash_and_index(
&self,
hash: B256,
index: Index,
) -> RpcResult<Option<RpcTransaction<T::NetworkTypes>>> {
trace!(target: "rpc::eth", ?hash, ?index, "Serving eth_getTransactionByBlockHashAndIndex");
Ok(EthTransactions::transaction_by_block_and_tx_index(self, hash.into(), index.into())
.await?)
}
/// Handler for: `eth_getRawTransactionByBlockNumberAndIndex`
async fn raw_transaction_by_block_number_and_index(
&self,
number: BlockNumberOrTag,
index: Index,
) -> RpcResult<Option<Bytes>> {
trace!(target: "rpc::eth", ?number, ?index, "Serving eth_getRawTransactionByBlockNumberAndIndex");
Ok(EthTransactions::raw_transaction_by_block_and_tx_index(
self,
number.into(),
index.into(),
)
.await?)
}
/// Handler for: `eth_getTransactionByBlockNumberAndIndex`
async fn transaction_by_block_number_and_index(
&self,
number: BlockNumberOrTag,
index: Index,
) -> RpcResult<Option<RpcTransaction<T::NetworkTypes>>> {
trace!(target: "rpc::eth", ?number, ?index, "Serving eth_getTransactionByBlockNumberAndIndex");
Ok(EthTransactions::transaction_by_block_and_tx_index(self, number.into(), index.into())
.await?)
}
/// Handler for: `eth_getTransactionBySenderAndNonce`
async fn transaction_by_sender_and_nonce(
&self,
sender: Address,
nonce: U64,
) -> RpcResult<Option<RpcTransaction<T::NetworkTypes>>> {
trace!(target: "rpc::eth", ?sender, ?nonce, "Serving eth_getTransactionBySenderAndNonce");
Ok(EthTransactions::get_transaction_by_sender_and_nonce(self, sender, nonce.to(), true)
.await?)
}
/// Handler for: `eth_getTransactionReceipt`
async fn transaction_receipt(
&self,
hash: B256,
) -> RpcResult<Option<RpcReceipt<T::NetworkTypes>>> {
trace!(target: "rpc::eth", ?hash, "Serving eth_getTransactionReceipt");
Ok(EthTransactions::transaction_receipt(self, hash).await?)
}
/// Handler for: `eth_getBalance`
async fn balance(&self, address: Address, block_number: Option<BlockId>) -> RpcResult<U256> {
trace!(target: "rpc::eth", ?address, ?block_number, "Serving eth_getBalance");
Ok(EthState::balance(self, address, block_number).await?)
}
/// Handler for: `eth_getStorageAt`
async fn storage_at(
&self,
address: Address,
index: JsonStorageKey,
block_number: Option<BlockId>,
) -> RpcResult<B256> {
trace!(target: "rpc::eth", ?address, ?block_number, "Serving eth_getStorageAt");
Ok(EthState::storage_at(self, address, index, block_number).await?)
}
/// Handler for: `eth_getTransactionCount`
async fn transaction_count(
&self,
address: Address,
block_number: Option<BlockId>,
) -> RpcResult<U256> {
trace!(target: "rpc::eth", ?address, ?block_number, "Serving eth_getTransactionCount");
Ok(EthState::transaction_count(self, address, block_number).await?)
}
/// Handler for: `eth_getCode`
async fn get_code(&self, address: Address, block_number: Option<BlockId>) -> RpcResult<Bytes> {
trace!(target: "rpc::eth", ?address, ?block_number, "Serving eth_getCode");
Ok(EthState::get_code(self, address, block_number).await?)
}
/// Handler for: `eth_getHeaderByNumber`
async fn header_by_number(
&self,
block_number: BlockNumberOrTag,
) -> RpcResult<Option<RpcHeader<T::NetworkTypes>>> {
trace!(target: "rpc::eth", ?block_number, "Serving eth_getHeaderByNumber");
Ok(EthBlocks::rpc_block_header(self, block_number.into()).await?)
}
/// Handler for: `eth_getHeaderByHash`
async fn header_by_hash(&self, hash: B256) -> RpcResult<Option<RpcHeader<T::NetworkTypes>>> {
trace!(target: "rpc::eth", ?hash, "Serving eth_getHeaderByHash");
Ok(EthBlocks::rpc_block_header(self, hash.into()).await?)
}
/// Handler for: `eth_simulateV1`
async fn simulate_v1(
&self,
payload: SimulatePayload<RpcTxReq<T::NetworkTypes>>,
block_number: Option<BlockId>,
) -> RpcResult<Vec<SimulatedBlock<RpcBlock<T::NetworkTypes>>>> {
trace!(target: "rpc::eth", ?block_number, "Serving eth_simulateV1");
let _permit = self.tracing_task_guard().clone().acquire_owned().await;
Ok(EthCall::simulate_v1(self, payload, block_number).await?)
}
/// Handler for: `eth_call`
async fn call(
&self,
request: RpcTxReq<T::NetworkTypes>,
block_number: Option<BlockId>,
state_overrides: Option<StateOverride>,
block_overrides: Option<Box<BlockOverrides>>,
) -> RpcResult<Bytes> {
trace!(target: "rpc::eth", ?request, ?block_number, ?state_overrides, ?block_overrides, "Serving eth_call");
Ok(EthCall::call(
self,
request,
block_number,
EvmOverrides::new(state_overrides, block_overrides),
)
.await?)
}
/// Handler for: `eth_callMany`
async fn call_many(
&self,
bundles: Vec<Bundle<RpcTxReq<T::NetworkTypes>>>,
state_context: Option<StateContext>,
state_override: Option<StateOverride>,
) -> RpcResult<Vec<Vec<EthCallResponse>>> {
trace!(target: "rpc::eth", ?bundles, ?state_context, ?state_override, "Serving eth_callMany");
Ok(EthCall::call_many(self, bundles, state_context, state_override).await?)
}
/// Handler for: `eth_createAccessList`
async fn create_access_list(
&self,
request: RpcTxReq<T::NetworkTypes>,
block_number: Option<BlockId>,
state_override: Option<StateOverride>,
) -> RpcResult<AccessListResult> {
trace!(target: "rpc::eth", ?request, ?block_number, ?state_override, "Serving eth_createAccessList");
Ok(EthCall::create_access_list_at(self, request, block_number, state_override).await?)
}
/// Handler for: `eth_estimateGas`
async fn estimate_gas(
&self,
request: RpcTxReq<T::NetworkTypes>,
block_number: Option<BlockId>,
state_override: Option<StateOverride>,
) -> RpcResult<U256> {
trace!(target: "rpc::eth", ?request, ?block_number, "Serving eth_estimateGas");
Ok(EthCall::estimate_gas_at(
self,
request,
block_number.unwrap_or_default(),
state_override,
)
.await?)
}
/// Handler for: `eth_gasPrice`
async fn gas_price(&self) -> RpcResult<U256> {
trace!(target: "rpc::eth", "Serving eth_gasPrice");
Ok(EthFees::gas_price(self).await?)
}
/// Handler for: `eth_getAccount`
async fn get_account(
&self,
address: Address,
block: BlockId,
) -> RpcResult<Option<alloy_rpc_types_eth::Account>> {
trace!(target: "rpc::eth", "Serving eth_getAccount");
Ok(EthState::get_account(self, address, block).await?)
}
/// Handler for: `eth_maxPriorityFeePerGas`
async fn max_priority_fee_per_gas(&self) -> RpcResult<U256> {
trace!(target: "rpc::eth", "Serving eth_maxPriorityFeePerGas");
Ok(EthFees::suggested_priority_fee(self).await?)
}
/// Handler for: `eth_blobBaseFee`
async fn blob_base_fee(&self) -> RpcResult<U256> {
trace!(target: "rpc::eth", "Serving eth_blobBaseFee");
Ok(EthFees::blob_base_fee(self).await?)
}
// FeeHistory is calculated based on lazy evaluation of fees for historical blocks, and further
// caching of it in the LRU cache.
// When new RPC call is executed, the cache gets locked, we check it for the historical fees
// according to the requested block range, and fill any cache misses (in both RPC response
// and cache itself) with the actual data queried from the database.
// To minimize the number of database seeks required to query the missing data, we calculate the
// first non-cached block number and last non-cached block number. After that, we query this
// range of consecutive blocks from the database.
/// Handler for: `eth_feeHistory`
async fn fee_history(
&self,
block_count: U64,
newest_block: BlockNumberOrTag,
reward_percentiles: Option<Vec<f64>>,
) -> RpcResult<FeeHistory> {
trace!(target: "rpc::eth", ?block_count, ?newest_block, ?reward_percentiles, "Serving eth_feeHistory");
Ok(EthFees::fee_history(self, block_count.to(), newest_block, reward_percentiles).await?)
}
/// Handler for: `eth_mining`
async fn is_mining(&self) -> RpcResult<bool> {
Err(internal_rpc_err("unimplemented"))
}
/// Handler for: `eth_hashrate`
async fn hashrate(&self) -> RpcResult<U256> {
Ok(U256::ZERO)
}
/// Handler for: `eth_getWork`
async fn get_work(&self) -> RpcResult<Work> {
Err(internal_rpc_err("unimplemented"))
}
/// Handler for: `eth_submitHashrate`
async fn submit_hashrate(&self, _hashrate: U256, _id: B256) -> RpcResult<bool> {
Ok(false)
}
/// Handler for: `eth_submitWork`
async fn submit_work(
&self,
_nonce: B64,
_pow_hash: B256,
_mix_digest: B256,
) -> RpcResult<bool> {
Err(internal_rpc_err("unimplemented"))
}
/// Handler for: `eth_sendTransaction`
async fn send_transaction(&self, request: RpcTxReq<T::NetworkTypes>) -> RpcResult<B256> {
trace!(target: "rpc::eth", ?request, "Serving eth_sendTransaction");
Ok(EthTransactions::send_transaction(self, request).await?)
}
/// Handler for: `eth_sendRawTransaction`
async fn send_raw_transaction(&self, tx: Bytes) -> RpcResult<B256> {
trace!(target: "rpc::eth", ?tx, "Serving eth_sendRawTransaction");
Ok(EthTransactions::send_raw_transaction(self, tx).await?)
}
/// Handler for: `eth_sendRawTransactionSync`
async fn send_raw_transaction_sync(&self, tx: Bytes) -> RpcResult<RpcReceipt<T::NetworkTypes>> {
trace!(target: "rpc::eth", ?tx, "Serving eth_sendRawTransactionSync");
Ok(EthTransactions::send_raw_transaction_sync(self, tx).await?)
}
/// Handler for: `eth_sign`
async fn sign(&self, address: Address, message: Bytes) -> RpcResult<Bytes> {
trace!(target: "rpc::eth", ?address, ?message, "Serving eth_sign");
Ok(EthTransactions::sign(self, address, message).await?)
}
/// Handler for: `eth_signTransaction`
async fn sign_transaction(&self, request: RpcTxReq<T::NetworkTypes>) -> RpcResult<Bytes> {
trace!(target: "rpc::eth", ?request, "Serving eth_signTransaction");
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | true |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-eth-api/src/pubsub.rs | crates/rpc/rpc-eth-api/src/pubsub.rs | //! `eth_` RPC API for pubsub subscription.
use alloy_json_rpc::RpcObject;
use alloy_rpc_types_eth::pubsub::{Params, SubscriptionKind};
use jsonrpsee::proc_macros::rpc;
/// Ethereum pub-sub rpc interface.
#[rpc(server, namespace = "eth")]
pub trait EthPubSubApi<T: RpcObject> {
/// Create an ethereum subscription for the given params
#[subscription(
name = "subscribe" => "subscription",
unsubscribe = "unsubscribe",
item = alloy_rpc_types::pubsub::SubscriptionResult
)]
async fn subscribe(
&self,
kind: SubscriptionKind,
params: Option<Params>,
) -> jsonrpsee::core::SubscriptionResult;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-eth-api/src/types.rs | crates/rpc/rpc-eth-api/src/types.rs | //! Trait for specifying `eth` network dependent API types.
use crate::{AsEthApiError, FromEthApiError, RpcNodeCore};
use alloy_rpc_types_eth::Block;
use reth_chain_state::CanonStateSubscriptions;
use reth_rpc_convert::RpcConvert;
pub use reth_rpc_convert::{RpcTransaction, RpcTxReq, RpcTypes};
use reth_storage_api::{ProviderTx, ReceiptProvider, TransactionsProvider};
use reth_transaction_pool::{PoolTransaction, TransactionPool};
use std::{
error::Error,
fmt::{self},
};
/// Network specific `eth` API types.
///
/// This trait defines the network specific rpc types and helpers required for the `eth_` and
/// adjacent endpoints. `NetworkTypes` is [`alloy_network::Network`] as defined by the alloy crate,
/// see also [`alloy_network::Ethereum`].
///
/// This type is stateful so that it can provide additional context if necessary, e.g. populating
/// receipts with additional data.
pub trait EthApiTypes: Send + Sync + Clone {
/// Extension of [`FromEthApiError`], with network specific errors.
type Error: Into<jsonrpsee_types::error::ErrorObject<'static>>
+ FromEthApiError
+ AsEthApiError
+ Error
+ Send
+ Sync;
/// Blockchain primitive types, specific to network, e.g. block and transaction.
type NetworkTypes: RpcTypes;
/// Conversion methods for transaction RPC type.
type RpcConvert: Send + Sync + Clone + fmt::Debug;
/// Returns reference to transaction response builder.
fn tx_resp_builder(&self) -> &Self::RpcConvert;
}
/// Adapter for network specific block type.
pub type RpcBlock<T> = Block<RpcTransaction<T>, RpcHeader<T>>;
/// Adapter for network specific receipt type.
pub type RpcReceipt<T> = <T as RpcTypes>::Receipt;
/// Adapter for network specific header type.
pub type RpcHeader<T> = <T as RpcTypes>::Header;
/// Adapter for network specific error type.
pub type RpcError<T> = <T as EthApiTypes>::Error;
/// Helper trait holds necessary trait bounds on [`EthApiTypes`] to implement `eth` API.
pub trait FullEthApiTypes
where
Self: RpcNodeCore<
Provider: TransactionsProvider + ReceiptProvider + CanonStateSubscriptions,
Pool: TransactionPool<
Transaction: PoolTransaction<Consensus = ProviderTx<Self::Provider>>,
>,
> + EthApiTypes<
RpcConvert: RpcConvert<
Primitives = Self::Primitives,
Network = Self::NetworkTypes,
Error = RpcError<Self>,
>,
>,
{
}
impl<T> FullEthApiTypes for T where
T: RpcNodeCore<
Provider: TransactionsProvider + ReceiptProvider + CanonStateSubscriptions,
Pool: TransactionPool<
Transaction: PoolTransaction<Consensus = ProviderTx<Self::Provider>>,
>,
> + EthApiTypes<
RpcConvert: RpcConvert<
Primitives = <Self as RpcNodeCore>::Primitives,
Network = Self::NetworkTypes,
Error = RpcError<T>,
>,
>
{
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-eth-api/src/helpers/config.rs | crates/rpc/rpc-eth-api/src/helpers/config.rs | //! Loads chain configuration.
use alloy_consensus::{BlockHeader, Header};
use alloy_eips::eip7910::{EthConfig, EthForkConfig, SystemContract};
use alloy_primitives::Address;
use jsonrpsee::{core::RpcResult, proc_macros::rpc};
use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardforks, Hardforks, Head};
use reth_errors::{ProviderError, RethError};
use reth_evm::{
precompiles::{Precompile, PrecompilesMap},
ConfigureEvm, Evm,
};
use reth_node_api::NodePrimitives;
use reth_rpc_eth_types::EthApiError;
use reth_storage_api::BlockReaderIdExt;
use revm::precompile::PrecompileId;
use std::{borrow::Borrow, collections::BTreeMap};
#[cfg_attr(not(feature = "client"), rpc(server, namespace = "eth"))]
#[cfg_attr(feature = "client", rpc(server, client, namespace = "eth"))]
pub trait EthConfigApi {
/// Returns an object with data about recent and upcoming fork configurations.
#[method(name = "config")]
fn config(&self) -> RpcResult<EthConfig>;
}
/// Handler for the `eth_config` RPC endpoint.
///
/// Ref: <https://eips.ethereum.org/EIPS/eip-7910>
#[derive(Debug, Clone)]
pub struct EthConfigHandler<Provider, Evm> {
provider: Provider,
#[allow(unused)]
evm_config: Evm,
}
impl<Provider, Evm> EthConfigHandler<Provider, Evm>
where
Provider: ChainSpecProvider<ChainSpec: Hardforks + EthereumHardforks>
+ BlockReaderIdExt<Header = Header>
+ 'static,
Evm: ConfigureEvm<Primitives: NodePrimitives<BlockHeader = Header>> + 'static,
{
/// Creates a new [`EthConfigHandler`].
pub const fn new(provider: Provider, evm_config: Evm) -> Self {
Self { provider, evm_config }
}
/// Returns fork config for specific timestamp.
/// Returns [`None`] if no blob params were found for this fork.
fn build_fork_config_at(
&self,
timestamp: u64,
precompiles: BTreeMap<String, Address>,
) -> Option<EthForkConfig> {
let chain_spec = self.provider.chain_spec();
let mut system_contracts = BTreeMap::<SystemContract, Address>::default();
if chain_spec.is_cancun_active_at_timestamp(timestamp) {
system_contracts.extend(SystemContract::cancun());
}
if chain_spec.is_prague_active_at_timestamp(timestamp) {
system_contracts
.extend(SystemContract::prague(chain_spec.deposit_contract().map(|c| c.address)));
}
// Fork config only exists for timestamp-based hardforks.
let fork_id = chain_spec
.fork_id(&Head { timestamp, number: u64::MAX, ..Default::default() })
.hash
.0
.into();
Some(EthForkConfig {
activation_time: timestamp,
blob_schedule: chain_spec.blob_params_at_timestamp(timestamp)?,
chain_id: chain_spec.chain().id(),
fork_id,
precompiles,
system_contracts,
})
}
fn config(&self) -> Result<EthConfig, RethError> {
let chain_spec = self.provider.chain_spec();
let latest = self
.provider
.latest_header()?
.ok_or_else(|| ProviderError::BestBlockNotFound)?
.into_header();
// Short-circuit if Cancun is not active.
if !chain_spec.is_cancun_active_at_timestamp(latest.timestamp()) {
return Err(RethError::msg("cancun has not been activated"))
}
/*
let current_precompiles =
evm_to_precompiles_map(self.evm_config.evm_for_block(EmptyDB::default(), &latest));
*/
let current_precompiles = BTreeMap::new();
let mut fork_timestamps =
chain_spec.forks_iter().filter_map(|(_, cond)| cond.as_timestamp()).collect::<Vec<_>>();
fork_timestamps.dedup();
#[allow(unused_variables)]
let (current_fork_idx, current_fork_timestamp) = fork_timestamps
.iter()
.position(|ts| &latest.timestamp < ts)
.and_then(|idx| idx.checked_sub(1))
.or_else(|| fork_timestamps.len().checked_sub(1))
.and_then(|idx| fork_timestamps.get(idx).map(|ts| (idx, *ts)))
.ok_or_else(|| RethError::msg("no active timestamp fork found"))?;
let current = self
.build_fork_config_at(current_fork_timestamp, current_precompiles)
.ok_or_else(|| RethError::msg("no fork config for current fork"))?;
let config = EthConfig { current, next: None, last: None };
/*
let mut config = EthConfig { current, next: None, last: None };
if let Some(last_fork_idx) = current_fork_idx.checked_sub(1) {
if let Some(last_fork_timestamp) = fork_timestamps.get(last_fork_idx).copied() {
let fake_header = {
let mut header = latest.clone();
header.timestamp = last_fork_timestamp;
header
};
let last_precompiles = evm_to_precompiles_map(
self.evm_config.evm_for_block(EmptyDB::default(), &fake_header),
);
config.last = self.build_fork_config_at(last_fork_timestamp, last_precompiles);
}
}
if let Some(next_fork_timestamp) = fork_timestamps.get(current_fork_idx + 1).copied() {
let fake_header = {
let mut header = latest;
header.timestamp = next_fork_timestamp;
header
};
let next_precompiles = evm_to_precompiles_map(
self.evm_config.evm_for_block(EmptyDB::default(), &fake_header),
);
config.next = self.build_fork_config_at(next_fork_timestamp, next_precompiles);
}
*/
Ok(config)
}
}
impl<Provider, Evm> EthConfigApiServer for EthConfigHandler<Provider, Evm>
where
Provider: ChainSpecProvider<ChainSpec: Hardforks + EthereumHardforks>
+ BlockReaderIdExt<Header = Header>
+ 'static,
Evm: ConfigureEvm<Primitives: NodePrimitives<BlockHeader = Header>> + 'static,
{
fn config(&self) -> RpcResult<EthConfig> {
Ok(self.config().map_err(EthApiError::from)?)
}
}
#[allow(unused)]
fn evm_to_precompiles_map(
evm: impl Evm<Precompiles = PrecompilesMap>,
) -> BTreeMap<String, Address> {
let precompiles = evm.precompiles();
precompiles
.addresses()
.filter_map(|address| {
Some((precompile_to_str(precompiles.get(address)?.precompile_id()), *address))
})
.collect()
}
// TODO: move
fn precompile_to_str(id: &PrecompileId) -> String {
let str = match id {
PrecompileId::EcRec => "ECREC",
PrecompileId::Sha256 => "SHA256",
PrecompileId::Ripemd160 => "RIPEMD160",
PrecompileId::Identity => "ID",
PrecompileId::ModExp => "MODEXP",
PrecompileId::Bn254Add => "BN254_ADD",
PrecompileId::Bn254Mul => "BN254_MUL",
PrecompileId::Bn254Pairing => "BN254_PAIRING",
PrecompileId::Blake2F => "BLAKE2F",
PrecompileId::KzgPointEvaluation => "KZG_POINT_EVALUATION",
PrecompileId::Bls12G1Add => "BLS12_G1ADD",
PrecompileId::Bls12G1Msm => "BLS12_G1MSM",
PrecompileId::Bls12G2Add => "BLS12_G2ADD",
PrecompileId::Bls12G2Msm => "BLS12_G2MSM",
PrecompileId::Bls12Pairing => "BLS12_PAIRING_CHECK",
PrecompileId::Bls12MapFpToGp1 => "BLS12_MAP_FP_TO_G1",
PrecompileId::Bls12MapFp2ToGp2 => "BLS12_MAP_FP2_TO_G2",
PrecompileId::P256Verify => "P256_VERIFY",
PrecompileId::Custom(custom) => custom.borrow(),
};
str.to_owned()
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs | crates/rpc/rpc-eth-api/src/helpers/pending_block.rs | //! Loads a pending block from database. Helper trait for `eth_` block, transaction, call and trace
//! RPC methods.
use super::SpawnBlocking;
use crate::{EthApiTypes, FromEthApiError, FromEvmError, RpcNodeCore};
use alloy_consensus::{BlockHeader, Transaction};
use alloy_eips::eip7840::BlobParams;
use alloy_primitives::{B256, U256};
use alloy_rpc_types_eth::BlockNumberOrTag;
use futures::Future;
use reth_chain_state::{BlockState, ExecutedBlock};
use reth_chainspec::{ChainSpecProvider, EthChainSpec};
use reth_errors::{BlockExecutionError, BlockValidationError, ProviderError, RethError};
use reth_evm::{
execute::{BlockBuilder, BlockBuilderOutcome, ExecutionOutcome},
ConfigureEvm, Evm, NextBlockEnvAttributes, SpecFor,
};
use reth_primitives_traits::{transaction::error::InvalidTransactionError, HeaderTy, SealedHeader};
use reth_revm::{database::StateProviderDatabase, db::State};
use reth_rpc_convert::RpcConvert;
use reth_rpc_eth_types::{
builder::config::PendingBlockKind, pending_block::PendingBlockAndReceipts, EthApiError,
PendingBlock, PendingBlockEnv, PendingBlockEnvOrigin,
};
use reth_storage_api::{
BlockReader, BlockReaderIdExt, ProviderBlock, ProviderHeader, ProviderReceipt, ProviderTx,
ReceiptProvider, StateProviderBox, StateProviderFactory,
};
use reth_transaction_pool::{
error::InvalidPoolTransactionError, BestTransactions, BestTransactionsAttributes,
PoolTransaction, TransactionPool,
};
use revm::context_interface::Block;
use std::{
sync::Arc,
time::{Duration, Instant},
};
use tokio::sync::Mutex;
use tracing::debug;
/// Loads a pending block from database.
///
/// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` blocks RPC methods.
pub trait LoadPendingBlock:
EthApiTypes<
Error: FromEvmError<Self::Evm>,
RpcConvert: RpcConvert<Network = Self::NetworkTypes>,
> + RpcNodeCore
{
/// Returns a handle to the pending block.
///
/// Data access in default (L1) trait method implementations.
fn pending_block(&self) -> &Mutex<Option<PendingBlock<Self::Primitives>>>;
/// Returns a [`PendingEnvBuilder`] for the pending block.
fn pending_env_builder(&self) -> &dyn PendingEnvBuilder<Self::Evm>;
/// Returns the pending block kind
fn pending_block_kind(&self) -> PendingBlockKind;
/// Configures the [`PendingBlockEnv`] for the pending block
///
/// If no pending block is available, this will derive it from the `latest` block
#[expect(clippy::type_complexity)]
fn pending_block_env_and_cfg(
&self,
) -> Result<
PendingBlockEnv<
ProviderBlock<Self::Provider>,
ProviderReceipt<Self::Provider>,
SpecFor<Self::Evm>,
>,
Self::Error,
> {
if let Some(block) = self.provider().pending_block().map_err(Self::Error::from_eth_err)? {
if let Some(receipts) = self
.provider()
.receipts_by_block(block.hash().into())
.map_err(Self::Error::from_eth_err)?
{
// Note: for the PENDING block we assume it is past the known merge block and
// thus this will not fail when looking up the total
// difficulty value for the blockenv.
let evm_env = self.evm_config().evm_env(block.header());
return Ok(PendingBlockEnv::new(
evm_env,
PendingBlockEnvOrigin::ActualPending(Arc::new(block), Arc::new(receipts)),
));
}
}
// no pending block from the CL yet, so we use the latest block and modify the env
// values that we can
let latest = self
.provider()
.latest_header()
.map_err(Self::Error::from_eth_err)?
.ok_or(EthApiError::HeaderNotFound(BlockNumberOrTag::Latest.into()))?;
let evm_env = self
.evm_config()
.next_evm_env(&latest, &self.next_env_attributes(&latest)?)
.map_err(RethError::other)
.map_err(Self::Error::from_eth_err)?;
Ok(PendingBlockEnv::new(evm_env, PendingBlockEnvOrigin::DerivedFromLatest(latest)))
}
/// Returns [`ConfigureEvm::NextBlockEnvCtx`] for building a local pending block.
fn next_env_attributes(
&self,
parent: &SealedHeader<ProviderHeader<Self::Provider>>,
) -> Result<<Self::Evm as ConfigureEvm>::NextBlockEnvCtx, Self::Error> {
Ok(self.pending_env_builder().pending_env_attributes(parent)?)
}
/// Returns a [`StateProviderBox`] on a mem-pool built pending block overlaying latest.
fn local_pending_state(
&self,
) -> impl Future<Output = Result<Option<StateProviderBox>, Self::Error>> + Send
where
Self: SpawnBlocking,
{
async move {
let Some(pending_block) = self.pool_pending_block().await? else {
return Ok(None);
};
let latest_historical = self
.provider()
.history_by_block_hash(pending_block.block().parent_hash())
.map_err(Self::Error::from_eth_err)?;
let state = BlockState::from(pending_block);
Ok(Some(Box::new(state.state_provider(latest_historical)) as StateProviderBox))
}
}
/// Returns a mem-pool built pending block.
fn pool_pending_block(
&self,
) -> impl Future<Output = Result<Option<PendingBlock<Self::Primitives>>, Self::Error>> + Send
where
Self: SpawnBlocking,
{
async move {
if self.pending_block_kind().is_none() {
return Ok(None);
}
let pending = self.pending_block_env_and_cfg()?;
let parent = match pending.origin {
PendingBlockEnvOrigin::ActualPending(..) => return Ok(None),
PendingBlockEnvOrigin::DerivedFromLatest(parent) => parent,
};
// we couldn't find the real pending block, so we need to build it ourselves
let mut lock = self.pending_block().lock().await;
let now = Instant::now();
// Is the pending block cached?
if let Some(pending_block) = lock.as_ref() {
// Is the cached block not expired and latest is its parent?
if pending.evm_env.block_env.number == U256::from(pending_block.block().number()) &&
parent.hash() == pending_block.block().parent_hash() &&
now <= pending_block.expires_at
{
return Ok(Some(pending_block.clone()));
}
}
let executed_block = match self
.spawn_blocking_io(move |this| {
// we rebuild the block
this.build_block(&parent)
})
.await
{
Ok(block) => block,
Err(err) => {
debug!(target: "rpc", "Failed to build pending block: {:?}", err);
return Ok(None)
}
};
let pending = PendingBlock::with_executed_block(
Instant::now() + Duration::from_secs(1),
executed_block,
);
*lock = Some(pending.clone());
Ok(Some(pending))
}
}
/// Returns the locally built pending block
fn local_pending_block(
&self,
) -> impl Future<Output = Result<Option<PendingBlockAndReceipts<Self::Primitives>>, Self::Error>>
+ Send
where
Self: SpawnBlocking,
Self::Pool:
TransactionPool<Transaction: PoolTransaction<Consensus = ProviderTx<Self::Provider>>>,
{
async move {
if self.pending_block_kind().is_none() {
return Ok(None);
}
let pending = self.pending_block_env_and_cfg()?;
Ok(match pending.origin {
PendingBlockEnvOrigin::ActualPending(block, receipts) => Some((block, receipts)),
PendingBlockEnvOrigin::DerivedFromLatest(..) => {
self.pool_pending_block().await?.map(PendingBlock::into_block_and_receipts)
}
})
}
}
/// Builds a pending block using the configured provider and pool.
///
/// If the origin is the actual pending block, the block is built with withdrawals.
///
/// After Cancun, if the origin is the actual pending block, the block includes the EIP-4788 pre
/// block contract call using the parent beacon block root received from the CL.
fn build_block(
&self,
parent: &SealedHeader<ProviderHeader<Self::Provider>>,
) -> Result<ExecutedBlock<Self::Primitives>, Self::Error>
where
Self::Pool:
TransactionPool<Transaction: PoolTransaction<Consensus = ProviderTx<Self::Provider>>>,
EthApiError: From<ProviderError>,
{
let state_provider = self
.provider()
.history_by_block_hash(parent.hash())
.map_err(Self::Error::from_eth_err)?;
let state = StateProviderDatabase::new(&state_provider);
let mut db = State::builder().with_database(state).with_bundle_update().build();
let mut builder = self
.evm_config()
.builder_for_next_block(&mut db, parent, self.next_env_attributes(parent)?)
.map_err(RethError::other)
.map_err(Self::Error::from_eth_err)?;
builder.apply_pre_execution_changes().map_err(Self::Error::from_eth_err)?;
let block_env = builder.evm_mut().block().clone();
let blob_params = self
.provider()
.chain_spec()
.blob_params_at_timestamp(parent.timestamp_seconds())
.unwrap_or_else(BlobParams::cancun);
let mut cumulative_gas_used = 0;
let mut sum_blob_gas_used = 0;
let block_gas_limit: u64 = block_env.gas_limit;
// Only include transactions if not configured as Empty
if !self.pending_block_kind().is_empty() {
let mut best_txs = self
.pool()
.best_transactions_with_attributes(BestTransactionsAttributes::new(
block_env.basefee,
block_env.blob_gasprice().map(|gasprice| gasprice as u64),
))
// freeze to get a block as fast as possible
.without_updates();
while let Some(pool_tx) = best_txs.next() {
// ensure we still have capacity for this transaction
if cumulative_gas_used + pool_tx.gas_limit() > block_gas_limit {
// we can't fit this transaction into the block, so we need to mark it as
// invalid which also removes all dependent transaction from
// the iterator before we can continue
best_txs.mark_invalid(
&pool_tx,
InvalidPoolTransactionError::ExceedsGasLimit(
pool_tx.gas_limit(),
block_gas_limit,
),
);
continue
}
if pool_tx.origin.is_private() {
// we don't want to leak any state changes made by private transactions, so we
// mark them as invalid here which removes all dependent
// transactions from the iteratorbefore we can continue
best_txs.mark_invalid(
&pool_tx,
InvalidPoolTransactionError::Consensus(
InvalidTransactionError::TxTypeNotSupported,
),
);
continue
}
// convert tx to a signed transaction
let tx = pool_tx.to_consensus();
// There's only limited amount of blob space available per block, so we need to
// check if the EIP-4844 can still fit in the block
if let Some(tx_blob_gas) = tx.blob_gas_used() {
if sum_blob_gas_used + tx_blob_gas > blob_params.max_blob_gas_per_block() {
// we can't fit this _blob_ transaction into the block, so we mark it as
// invalid, which removes its dependent transactions from
// the iterator. This is similar to the gas limit condition
// for regular transactions above.
best_txs.mark_invalid(
&pool_tx,
InvalidPoolTransactionError::ExceedsGasLimit(
tx_blob_gas,
blob_params.max_blob_gas_per_block(),
),
);
continue
}
}
let gas_used = match builder.execute_transaction(tx.clone()) {
Ok(gas_used) => gas_used,
Err(BlockExecutionError::Validation(BlockValidationError::InvalidTx {
error,
..
})) => {
if error.is_nonce_too_low() {
// if the nonce is too low, we can skip this transaction
} else {
// if the transaction is invalid, we can skip it and all of its
// descendants
best_txs.mark_invalid(
&pool_tx,
InvalidPoolTransactionError::Consensus(
InvalidTransactionError::TxTypeNotSupported,
),
);
}
continue
}
// this is an error that we should treat as fatal for this attempt
Err(err) => return Err(Self::Error::from_eth_err(err)),
};
// add to the total blob gas used if the transaction successfully executed
if let Some(tx_blob_gas) = tx.blob_gas_used() {
sum_blob_gas_used += tx_blob_gas;
// if we've reached the max data gas per block, we can skip blob txs entirely
if sum_blob_gas_used == blob_params.max_blob_gas_per_block() {
best_txs.skip_blobs();
}
}
// add gas used by the transaction to cumulative gas used, before creating the
// receipt
cumulative_gas_used += gas_used;
}
}
let BlockBuilderOutcome { execution_result, block, hashed_state, .. } =
builder.finish(&state_provider).map_err(Self::Error::from_eth_err)?;
let execution_outcome = ExecutionOutcome::new(
db.take_bundle(),
vec![execution_result.receipts],
block.number(),
vec![execution_result.requests],
);
Ok(ExecutedBlock {
recovered_block: block.into(),
execution_output: Arc::new(execution_outcome),
hashed_state: Arc::new(hashed_state),
})
}
}
/// A type that knows how to build a [`ConfigureEvm::NextBlockEnvCtx`] for a pending block.
pub trait PendingEnvBuilder<Evm: ConfigureEvm>: Send + Sync + Unpin + 'static {
/// Builds a [`ConfigureEvm::NextBlockEnvCtx`] for pending block.
fn pending_env_attributes(
&self,
parent: &SealedHeader<HeaderTy<Evm::Primitives>>,
) -> Result<Evm::NextBlockEnvCtx, EthApiError>;
}
/// Trait that should be implemented on [`ConfigureEvm::NextBlockEnvCtx`] to provide a way for it to
/// build an environment for pending block.
///
/// This assumes that next environment building doesn't require any additional context, for more
/// complex implementations one should implement [`PendingEnvBuilder`] on their custom type.
pub trait BuildPendingEnv<Header> {
/// Builds a [`ConfigureEvm::NextBlockEnvCtx`] for pending block.
fn build_pending_env(parent: &SealedHeader<Header>) -> Self;
}
impl<Evm> PendingEnvBuilder<Evm> for ()
where
Evm: ConfigureEvm<NextBlockEnvCtx: BuildPendingEnv<HeaderTy<Evm::Primitives>>>,
{
fn pending_env_attributes(
&self,
parent: &SealedHeader<HeaderTy<Evm::Primitives>>,
) -> Result<Evm::NextBlockEnvCtx, EthApiError> {
Ok(Evm::NextBlockEnvCtx::build_pending_env(parent))
}
}
impl<H: BlockHeader> BuildPendingEnv<H> for NextBlockEnvAttributes {
fn build_pending_env(parent: &SealedHeader<H>) -> Self {
// NOTE: 12000 is block time in ms (MODIFIED)
let td = if cfg!(feature = "timestamp-in-seconds") { 12 } else { 12000 };
Self {
timestamp: parent.timestamp().saturating_add(td),
suggested_fee_recipient: parent.beneficiary(),
prev_randao: B256::random(),
gas_limit: parent.gas_limit(),
parent_beacon_block_root: parent.parent_beacon_block_root().map(|_| B256::ZERO),
withdrawals: parent.withdrawals_root().map(|_| Default::default()),
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-eth-api/src/helpers/signer.rs | crates/rpc/rpc-eth-api/src/helpers/signer.rs | //! An abstraction over ethereum signers.
use alloy_dyn_abi::TypedData;
use alloy_primitives::{Address, Signature};
use alloy_rpc_types_eth::TransactionRequest;
use dyn_clone::DynClone;
use reth_rpc_eth_types::SignError;
use std::result;
/// Result returned by [`EthSigner`] methods.
pub type Result<T> = result::Result<T, SignError>;
/// An Ethereum Signer used via RPC.
#[async_trait::async_trait]
pub trait EthSigner<T, TxReq = TransactionRequest>: Send + Sync + DynClone {
/// Returns the available accounts for this signer.
fn accounts(&self) -> Vec<Address>;
/// Returns `true` whether this signer can sign for this address
fn is_signer_for(&self, addr: &Address) -> bool {
self.accounts().contains(addr)
}
/// Returns the signature
async fn sign(&self, address: Address, message: &[u8]) -> Result<Signature>;
/// signs a transaction request using the given account in request
async fn sign_transaction(&self, request: TxReq, address: &Address) -> Result<T>;
/// Encodes and signs the typed data according EIP-712. Payload must implement Eip712 trait.
fn sign_typed_data(&self, address: Address, payload: &TypedData) -> Result<Signature>;
}
dyn_clone::clone_trait_object!(<T> EthSigner<T>);
/// Adds 20 random dev signers for access via the API. Used in dev mode.
#[auto_impl::auto_impl(&)]
pub trait AddDevSigners {
/// Generates 20 random developer accounts.
/// Used in DEV mode.
fn with_dev_accounts(&self);
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-eth-api/src/helpers/estimate.rs | crates/rpc/rpc-eth-api/src/helpers/estimate.rs | //! Estimate gas needed implementation
use super::{Call, LoadPendingBlock};
use crate::{AsEthApiError, FromEthApiError, IntoEthApiError};
use alloy_evm::overrides::apply_state_overrides;
use alloy_network::TransactionBuilder;
use alloy_primitives::{TxKind, U256};
use alloy_rpc_types_eth::{state::StateOverride, BlockId};
use futures::Future;
use reth_chainspec::MIN_TRANSACTION_GAS;
use reth_errors::ProviderError;
use reth_evm::{ConfigureEvm, Database, Evm, EvmEnvFor, EvmFor, TransactionEnv, TxEnvFor};
use reth_revm::{database::StateProviderDatabase, db::CacheDB};
use reth_rpc_convert::{RpcConvert, RpcTxReq};
use reth_rpc_eth_types::{
error::{api::FromEvmHalt, FromEvmError},
EthApiError, RevertError, RpcInvalidTransactionError,
};
use reth_rpc_server_types::constants::gas_oracle::{CALL_STIPEND_GAS, ESTIMATE_GAS_ERROR_RATIO};
use reth_storage_api::StateProvider;
use revm::context_interface::{result::ExecutionResult, Transaction};
use tracing::trace;
/// Gas execution estimates
pub trait EstimateCall: Call {
/// Estimates the gas usage of the `request` with the state.
///
/// This will execute the [`RpcTxReq`] and find the best gas limit via binary search.
///
/// ## EVM settings
///
/// This modifies certain EVM settings to mirror geth's `SkipAccountChecks` when transacting requests, see also: <https://github.com/ethereum/go-ethereum/blob/380688c636a654becc8f114438c2a5d93d2db032/core/state_transition.go#L145-L148>:
///
/// - `disable_eip3607` is set to `true`
/// - `disable_base_fee` is set to `true`
/// - `nonce` is set to `None`
fn estimate_gas_with<S>(
&self,
mut evm_env: EvmEnvFor<Self::Evm>,
mut request: RpcTxReq<<Self::RpcConvert as RpcConvert>::Network>,
state: S,
state_override: Option<StateOverride>,
) -> Result<U256, Self::Error>
where
S: StateProvider,
{
// Disabled because eth_estimateGas is sometimes used with eoa senders
// See <https://github.com/paradigmxyz/reth/issues/1959>
evm_env.cfg_env.disable_eip3607 = true;
// The basefee should be ignored for eth_estimateGas and similar
// See:
// <https://github.com/ethereum/go-ethereum/blob/ee8e83fa5f6cb261dad2ed0a7bbcde4930c41e6c/internal/ethapi/api.go#L985>
evm_env.cfg_env.disable_base_fee = true;
// set nonce to None so that the correct nonce is chosen by the EVM
request.as_mut().take_nonce();
// Keep a copy of gas related request values
let tx_request_gas_limit = request.as_ref().gas_limit();
let tx_request_gas_price = request.as_ref().gas_price();
// the gas limit of the corresponding block
let max_gas_limit = evm_env
.cfg_env
.tx_gas_limit_cap
.map_or(evm_env.block_env.gas_limit, |cap| cap.min(evm_env.block_env.gas_limit));
// Determine the highest possible gas limit, considering both the request's specified limit
// and the block's limit.
let mut highest_gas_limit = tx_request_gas_limit
.map(|mut tx_gas_limit| {
if max_gas_limit < tx_gas_limit {
// requested gas limit is higher than the allowed gas limit, capping
tx_gas_limit = max_gas_limit;
}
tx_gas_limit
})
.unwrap_or(max_gas_limit);
// Configure the evm env
let mut db = CacheDB::new(StateProviderDatabase::new(state));
// Apply any state overrides if specified.
if let Some(state_override) = state_override {
apply_state_overrides(state_override, &mut db).map_err(Self::Error::from_eth_err)?;
}
let mut tx_env = self.create_txn_env(&evm_env, request, &mut db)?;
// Check if this is a basic transfer (no input data to account with no code)
let mut is_basic_transfer = false;
if tx_env.input().is_empty() {
if let TxKind::Call(to) = tx_env.kind() {
if let Ok(code) = db.db.account_code(&to) {
is_basic_transfer = code.map(|code| code.is_empty()).unwrap_or(true);
}
}
}
// Check funds of the sender (only useful to check if transaction gas price is more than 0).
//
// The caller allowance is check by doing `(account.balance - tx.value) / tx.gas_price`
if tx_env.gas_price() > 0 {
// cap the highest gas limit by max gas caller can afford with given gas price
highest_gas_limit =
highest_gas_limit.min(self.caller_gas_allowance(&mut db, &evm_env, &tx_env)?);
}
// If the provided gas limit is less than computed cap, use that
tx_env.set_gas_limit(tx_env.gas_limit().min(highest_gas_limit));
// Create EVM instance once and reuse it throughout the entire estimation process
let mut evm = self.evm_config().evm_with_env(&mut db, evm_env);
// For basic transfers, try using minimum gas before running full binary search
if is_basic_transfer {
// If the tx is a simple transfer (call to an account with no code) we can
// shortcircuit. But simply returning
// `MIN_TRANSACTION_GAS` is dangerous because there might be additional
// field combos that bump the price up, so we try executing the function
// with the minimum gas limit to make sure.
let mut min_tx_env = tx_env.clone();
min_tx_env.set_gas_limit(MIN_TRANSACTION_GAS);
// Reuse the same EVM instance
if let Ok(res) = evm.transact(min_tx_env).map_err(Self::Error::from_evm_err) {
if res.result.is_success() {
return Ok(U256::from(MIN_TRANSACTION_GAS))
}
}
}
trace!(target: "rpc::eth::estimate", ?tx_env, gas_limit = tx_env.gas_limit(), is_basic_transfer, "Starting gas estimation");
// Execute the transaction with the highest possible gas limit.
let mut res = match evm.transact(tx_env.clone()).map_err(Self::Error::from_evm_err) {
// Handle the exceptional case where the transaction initialization uses too much
// gas. If the gas price or gas limit was specified in the request,
// retry the transaction with the block's gas limit to determine if
// the failure was due to insufficient gas.
Err(err)
if err.is_gas_too_high() &&
(tx_request_gas_limit.is_some() || tx_request_gas_price.is_some()) =>
{
return Self::map_out_of_gas_err(&mut evm, tx_env, max_gas_limit);
}
Err(err) if err.is_gas_too_low() => {
// This failed because the configured gas cost of the tx was lower than what
// actually consumed by the tx This can happen if the
// request provided fee values manually and the resulting gas cost exceeds the
// sender's allowance, so we return the appropriate error here
return Err(RpcInvalidTransactionError::GasRequiredExceedsAllowance {
gas_limit: tx_env.gas_limit(),
}
.into_eth_err())
}
// Propagate other results (successful or other errors).
ethres => ethres?,
};
let gas_refund = match res.result {
ExecutionResult::Success { gas_refunded, .. } => gas_refunded,
ExecutionResult::Halt { reason, .. } => {
// here we don't check for invalid opcode because already executed with highest gas
// limit
return Err(Self::Error::from_evm_halt(reason, tx_env.gas_limit()))
}
ExecutionResult::Revert { output, .. } => {
// if price or limit was included in the request then we can execute the request
// again with the block's gas limit to check if revert is gas related or not
return if tx_request_gas_limit.is_some() || tx_request_gas_price.is_some() {
Self::map_out_of_gas_err(&mut evm, tx_env, max_gas_limit)
} else {
// the transaction did revert
Err(RpcInvalidTransactionError::Revert(RevertError::new(output)).into_eth_err())
}
}
};
// At this point we know the call succeeded but want to find the _best_ (lowest) gas the
// transaction succeeds with. We find this by doing a binary search over the possible range.
// we know the tx succeeded with the configured gas limit, so we can use that as the
// highest, in case we applied a gas cap due to caller allowance above
highest_gas_limit = tx_env.gas_limit();
// NOTE: this is the gas the transaction used, which is less than the
// transaction requires to succeed.
let mut gas_used = res.result.gas_used();
// the lowest value is capped by the gas used by the unconstrained transaction
let mut lowest_gas_limit = gas_used.saturating_sub(1);
// As stated in Geth, there is a good chance that the transaction will pass if we set the
// gas limit to the execution gas used plus the gas refund, so we check this first
// <https://github.com/ethereum/go-ethereum/blob/a5a4fa7032bb248f5a7c40f4e8df2b131c4186a4/eth/gasestimator/gasestimator.go#L135
//
// Calculate the optimistic gas limit by adding gas used and gas refund,
// then applying a 64/63 multiplier to account for gas forwarding rules.
let optimistic_gas_limit = (gas_used + gas_refund + CALL_STIPEND_GAS) * 64 / 63;
if optimistic_gas_limit < highest_gas_limit {
// Set the transaction's gas limit to the calculated optimistic gas limit.
let mut optimistic_tx_env = tx_env.clone();
optimistic_tx_env.set_gas_limit(optimistic_gas_limit);
// Re-execute the transaction with the new gas limit and update the result and
// environment.
res = evm.transact(optimistic_tx_env).map_err(Self::Error::from_evm_err)?;
// Update the gas used based on the new result.
gas_used = res.result.gas_used();
// Update the gas limit estimates (highest and lowest) based on the execution result.
update_estimated_gas_range(
res.result,
optimistic_gas_limit,
&mut highest_gas_limit,
&mut lowest_gas_limit,
)?;
};
// Pick a point that's close to the estimated gas
let mut mid_gas_limit = std::cmp::min(
gas_used * 3,
((highest_gas_limit as u128 + lowest_gas_limit as u128) / 2) as u64,
);
trace!(target: "rpc::eth::estimate", ?highest_gas_limit, ?lowest_gas_limit, ?mid_gas_limit, "Starting binary search for gas");
// Binary search narrows the range to find the minimum gas limit needed for the transaction
// to succeed.
while lowest_gas_limit + 1 < highest_gas_limit {
// An estimation error is allowed once the current gas limit range used in the binary
// search is small enough (less than 1.5% of the highest gas limit)
// <https://github.com/ethereum/go-ethereum/blob/a5a4fa7032bb248f5a7c40f4e8df2b131c4186a4/eth/gasestimator/gasestimator.go#L152
if (highest_gas_limit - lowest_gas_limit) as f64 / (highest_gas_limit as f64) <
ESTIMATE_GAS_ERROR_RATIO
{
break
};
let mut mid_tx_env = tx_env.clone();
mid_tx_env.set_gas_limit(mid_gas_limit);
// Execute transaction and handle potential gas errors, adjusting limits accordingly.
match evm.transact(mid_tx_env).map_err(Self::Error::from_evm_err) {
Err(err) if err.is_gas_too_high() => {
// Decrease the highest gas limit if gas is too high
highest_gas_limit = mid_gas_limit;
}
Err(err) if err.is_gas_too_low() => {
// Increase the lowest gas limit if gas is too low
lowest_gas_limit = mid_gas_limit;
}
// Handle other cases, including successful transactions.
ethres => {
// Unpack the result and environment if the transaction was successful.
res = ethres?;
// Update the estimated gas range based on the transaction result.
update_estimated_gas_range(
res.result,
mid_gas_limit,
&mut highest_gas_limit,
&mut lowest_gas_limit,
)?;
}
}
// New midpoint
mid_gas_limit = ((highest_gas_limit as u128 + lowest_gas_limit as u128) / 2) as u64;
}
Ok(U256::from(highest_gas_limit))
}
/// Estimate gas needed for execution of the `request` at the [`BlockId`].
fn estimate_gas_at(
&self,
request: RpcTxReq<<Self::RpcConvert as RpcConvert>::Network>,
at: BlockId,
state_override: Option<StateOverride>,
) -> impl Future<Output = Result<U256, Self::Error>> + Send
where
Self: LoadPendingBlock,
{
async move {
let (evm_env, at) = self.evm_env_at(at).await?;
self.spawn_blocking_io_fut(move |this| async move {
let state = this.state_at_block_id(at).await?;
EstimateCall::estimate_gas_with(&this, evm_env, request, state, state_override)
})
.await
}
}
/// Executes the requests again after an out of gas error to check if the error is gas related
/// or not
#[inline]
fn map_out_of_gas_err<DB>(
evm: &mut EvmFor<Self::Evm, DB>,
mut tx_env: TxEnvFor<Self::Evm>,
max_gas_limit: u64,
) -> Result<U256, Self::Error>
where
DB: Database<Error = ProviderError>,
EthApiError: From<DB::Error>,
{
let req_gas_limit = tx_env.gas_limit();
tx_env.set_gas_limit(max_gas_limit);
let retry_res = evm.transact(tx_env).map_err(Self::Error::from_evm_err)?;
match retry_res.result {
ExecutionResult::Success { .. } => {
// Transaction succeeded by manually increasing the gas limit,
// which means the caller lacks funds to pay for the tx
Err(RpcInvalidTransactionError::BasicOutOfGas(req_gas_limit).into_eth_err())
}
ExecutionResult::Revert { output, .. } => {
// reverted again after bumping the limit
Err(RpcInvalidTransactionError::Revert(RevertError::new(output)).into_eth_err())
}
ExecutionResult::Halt { reason, .. } => {
Err(Self::Error::from_evm_halt(reason, req_gas_limit))
}
}
}
}
/// Updates the highest and lowest gas limits for binary search based on the execution result.
///
/// This function refines the gas limit estimates used in a binary search to find the optimal
/// gas limit for a transaction. It adjusts the highest or lowest gas limits depending on
/// whether the execution succeeded, reverted, or halted due to specific reasons.
#[inline]
pub fn update_estimated_gas_range<Halt>(
result: ExecutionResult<Halt>,
tx_gas_limit: u64,
highest_gas_limit: &mut u64,
lowest_gas_limit: &mut u64,
) -> Result<(), EthApiError> {
match result {
ExecutionResult::Success { .. } => {
// Cap the highest gas limit with the succeeding gas limit.
*highest_gas_limit = tx_gas_limit;
}
ExecutionResult::Revert { .. } | ExecutionResult::Halt { .. } => {
// We know that transaction succeeded with a higher gas limit before, so any failure
// means that we need to increase it.
//
// We are ignoring all halts here, and not just OOG errors because there are cases when
// non-OOG halt might flag insufficient gas limit as well.
//
// Common usage of invalid opcode in OpenZeppelin:
// <https://github.com/OpenZeppelin/openzeppelin-contracts/blob/94697be8a3f0dfcd95dfb13ffbd39b5973f5c65d/contracts/metatx/ERC2771Forwarder.sol#L360-L367>
*lowest_gas_limit = tx_gas_limit;
}
};
Ok(())
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-eth-api/src/helpers/call.rs | crates/rpc/rpc-eth-api/src/helpers/call.rs | //! Loads a pending block from database. Helper trait for `eth_` transaction, call and trace RPC
//! methods.
use core::fmt;
use super::{LoadBlock, LoadPendingBlock, LoadState, LoadTransaction, SpawnBlocking, Trace};
use crate::{
helpers::estimate::EstimateCall, FromEvmError, FullEthApiTypes, RpcBlock, RpcNodeCore,
};
use alloy_consensus::BlockHeader;
use alloy_eips::eip2930::AccessListResult;
use alloy_evm::overrides::{apply_block_overrides, apply_state_overrides, OverrideBlockHashes};
use alloy_network::TransactionBuilder;
use alloy_primitives::{Bytes, B256, U256};
use alloy_rpc_types_eth::{
simulate::{SimBlock, SimulatePayload, SimulatedBlock},
state::{EvmOverrides, StateOverride},
BlockId, Bundle, EthCallResponse, StateContext, TransactionInfo,
};
use futures::Future;
use reth_errors::{ProviderError, RethError};
use reth_evm::{
ConfigureEvm, Evm, EvmEnv, EvmEnvFor, HaltReasonFor, InspectorFor, SpecFor, TransactionEnv,
TxEnvFor,
};
use reth_node_api::BlockBody;
use reth_primitives_traits::{Recovered, SignedTransaction};
use reth_revm::{
database::StateProviderDatabase,
db::{CacheDB, State},
};
use reth_rpc_convert::{RpcConvert, RpcTxReq};
use reth_rpc_eth_types::{
cache::db::{StateCacheDbRefMutWrapper, StateProviderTraitObjWrapper},
error::{api::FromEvmHalt, ensure_success, FromEthApiError},
simulate::{self, EthSimulateError},
EthApiError, RevertError, StateCacheDb,
};
use reth_storage_api::{BlockIdReader, ProviderTx};
use revm::{
context_interface::{
result::{ExecutionResult, ResultAndState},
Transaction,
},
Database, DatabaseCommit,
};
use revm_inspectors::{access_list::AccessListInspector, transfer::TransferInspector};
use tracing::{trace, warn};
/// Result type for `eth_simulateV1` RPC method.
pub type SimulatedBlocksResult<N, E> = Result<Vec<SimulatedBlock<RpcBlock<N>>>, E>;
/// Execution related functions for the [`EthApiServer`](crate::EthApiServer) trait in
/// the `eth_` namespace.
pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthApiTypes {
/// Estimate gas needed for execution of the `request` at the [`BlockId`].
fn estimate_gas_at(
&self,
request: RpcTxReq<<Self::RpcConvert as RpcConvert>::Network>,
at: BlockId,
state_override: Option<StateOverride>,
) -> impl Future<Output = Result<U256, Self::Error>> + Send {
EstimateCall::estimate_gas_at(self, request, at, state_override)
}
/// `eth_simulateV1` executes an arbitrary number of transactions on top of the requested state.
/// The transactions are packed into individual blocks. Overrides can be provided.
///
/// See also: <https://github.com/ethereum/go-ethereum/pull/27720>
fn simulate_v1(
&self,
payload: SimulatePayload<RpcTxReq<<Self::RpcConvert as RpcConvert>::Network>>,
block: Option<BlockId>,
) -> impl Future<Output = SimulatedBlocksResult<Self::NetworkTypes, Self::Error>> + Send {
async move {
if payload.block_state_calls.len() > self.max_simulate_blocks() as usize {
return Err(EthApiError::InvalidParams("too many blocks.".to_string()).into())
}
let block = block.unwrap_or_default();
let SimulatePayload {
block_state_calls,
trace_transfers,
validation,
return_full_transactions,
} = payload;
if block_state_calls.is_empty() {
return Err(EthApiError::InvalidParams(String::from("calls are empty.")).into())
}
let base_block =
self.recovered_block(block).await?.ok_or(EthApiError::HeaderNotFound(block))?;
let mut parent = base_block.sealed_header().clone();
let this = self.clone();
self.spawn_with_state_at_block(block, move |state| {
let mut db =
State::builder().with_database(StateProviderDatabase::new(state)).build();
let mut blocks: Vec<SimulatedBlock<RpcBlock<Self::NetworkTypes>>> =
Vec::with_capacity(block_state_calls.len());
for block in block_state_calls {
let mut evm_env = this
.evm_config()
.next_evm_env(&parent, &this.next_env_attributes(&parent)?)
.map_err(RethError::other)
.map_err(Self::Error::from_eth_err)?;
// Always disable EIP-3607
evm_env.cfg_env.disable_eip3607 = true;
if !validation {
// If not explicitly required, we disable nonce check <https://github.com/paradigmxyz/reth/issues/16108>
evm_env.cfg_env.disable_nonce_check = true;
evm_env.cfg_env.disable_base_fee = true;
evm_env.block_env.basefee = 0;
}
let SimBlock { block_overrides, state_overrides, calls } = block;
if let Some(block_overrides) = block_overrides {
// ensure we don't allow uncapped gas limit per block
if let Some(gas_limit_override) = block_overrides.gas_limit {
if gas_limit_override > evm_env.block_env.gas_limit &&
gas_limit_override > this.call_gas_limit()
{
return Err(
EthApiError::other(EthSimulateError::GasLimitReached).into()
)
}
}
apply_block_overrides(block_overrides, &mut db, &mut evm_env.block_env);
}
if let Some(state_overrides) = state_overrides {
apply_state_overrides(state_overrides, &mut db)
.map_err(Self::Error::from_eth_err)?;
}
let block_gas_limit = evm_env.block_env.gas_limit;
let chain_id = evm_env.cfg_env.chain_id;
let default_gas_limit = {
let total_specified_gas =
calls.iter().filter_map(|tx| tx.as_ref().gas_limit()).sum::<u64>();
let txs_without_gas_limit =
calls.iter().filter(|tx| tx.as_ref().gas_limit().is_none()).count();
if total_specified_gas > block_gas_limit {
return Err(EthApiError::Other(Box::new(
EthSimulateError::BlockGasLimitExceeded,
))
.into())
}
if txs_without_gas_limit > 0 {
(block_gas_limit - total_specified_gas) / txs_without_gas_limit as u64
} else {
0
}
};
let ctx = this
.evm_config()
.context_for_next_block(&parent, this.next_env_attributes(&parent)?);
let (result, results) = if trace_transfers {
// prepare inspector to capture transfer inside the evm so they are recorded
// and included in logs
let inspector = TransferInspector::new(false).with_logs(true);
let evm = this
.evm_config()
.evm_with_env_and_inspector(&mut db, evm_env, inspector);
let builder = this.evm_config().create_block_builder(evm, &parent, ctx);
simulate::execute_transactions(
builder,
calls,
default_gas_limit,
chain_id,
this.tx_resp_builder(),
)?
} else {
let evm = this.evm_config().evm_with_env(&mut db, evm_env);
let builder = this.evm_config().create_block_builder(evm, &parent, ctx);
simulate::execute_transactions(
builder,
calls,
default_gas_limit,
chain_id,
this.tx_resp_builder(),
)?
};
parent = result.block.clone_sealed_header();
let block = simulate::build_simulated_block(
result.block,
results,
return_full_transactions.into(),
this.tx_resp_builder(),
)?;
blocks.push(block);
}
Ok(blocks)
})
.await
}
}
/// Executes the call request (`eth_call`) and returns the output
fn call(
&self,
request: RpcTxReq<<Self::RpcConvert as RpcConvert>::Network>,
block_number: Option<BlockId>,
overrides: EvmOverrides,
) -> impl Future<Output = Result<Bytes, Self::Error>> + Send {
async move {
let res =
self.transact_call_at(request, block_number.unwrap_or_default(), overrides).await?;
ensure_success(res.result)
}
}
/// Simulate arbitrary number of transactions at an arbitrary blockchain index, with the
/// optionality of state overrides
fn call_many(
&self,
bundles: Vec<Bundle<RpcTxReq<<Self::RpcConvert as RpcConvert>::Network>>>,
state_context: Option<StateContext>,
mut state_override: Option<StateOverride>,
) -> impl Future<Output = Result<Vec<Vec<EthCallResponse>>, Self::Error>> + Send {
async move {
// Check if the vector of bundles is empty
if bundles.is_empty() {
return Err(EthApiError::InvalidParams(String::from("bundles are empty.")).into());
}
let StateContext { transaction_index, block_number } =
state_context.unwrap_or_default();
let transaction_index = transaction_index.unwrap_or_default();
let mut target_block = block_number.unwrap_or_default();
let is_block_target_pending = target_block.is_pending();
// if it's not pending, we should always use block_hash over block_number to ensure that
// different provider calls query data related to the same block.
if !is_block_target_pending {
target_block = self
.provider()
.block_hash_for_id(target_block)
.map_err(|_| EthApiError::HeaderNotFound(target_block))?
.ok_or_else(|| EthApiError::HeaderNotFound(target_block))?
.into();
}
let ((evm_env, _), block) = futures::try_join!(
self.evm_env_at(target_block),
self.recovered_block(target_block)
)?;
let block = block.ok_or(EthApiError::HeaderNotFound(target_block))?;
// we're essentially replaying the transactions in the block here, hence we need the
// state that points to the beginning of the block, which is the state at
// the parent block
let mut at = block.parent_hash();
let mut replay_block_txs = true;
let num_txs =
transaction_index.index().unwrap_or_else(|| block.body().transactions().len());
// but if all transactions are to be replayed, we can use the state at the block itself,
// however only if we're not targeting the pending block, because for pending we can't
// rely on the block's state being available
if !is_block_target_pending && num_txs == block.body().transactions().len() {
at = block.hash();
replay_block_txs = false;
}
let this = self.clone();
self.spawn_with_state_at_block(at.into(), move |state| {
let mut all_results = Vec::with_capacity(bundles.len());
let mut db = CacheDB::new(StateProviderDatabase::new(state));
if replay_block_txs {
// only need to replay the transactions in the block if not all transactions are
// to be replayed
let block_transactions = block.transactions_recovered().take(num_txs);
for tx in block_transactions {
let tx_env = RpcNodeCore::evm_config(&this).tx_env(tx);
let res = this.transact(&mut db, evm_env.clone(), tx_env)?;
db.commit(res.state);
}
}
// transact all bundles
for bundle in bundles {
let Bundle { transactions, block_override } = bundle;
if transactions.is_empty() {
// Skip empty bundles
continue;
}
let mut bundle_results = Vec::with_capacity(transactions.len());
let block_overrides = block_override.map(Box::new);
// transact all transactions in the bundle
for tx in transactions {
// Apply overrides, state overrides are only applied for the first tx in the
// request
let overrides =
EvmOverrides::new(state_override.take(), block_overrides.clone());
let (current_evm_env, prepared_tx) =
this.prepare_call_env(evm_env.clone(), tx, &mut db, overrides)?;
let res = this.transact(&mut db, current_evm_env, prepared_tx)?;
match ensure_success::<_, Self::Error>(res.result) {
Ok(output) => {
bundle_results
.push(EthCallResponse { value: Some(output), error: None });
}
Err(err) => {
bundle_results.push(EthCallResponse {
value: None,
error: Some(err.to_string()),
});
}
}
// Commit state changes after each transaction to allow subsequent calls to
// see the updates
db.commit(res.state);
}
all_results.push(bundle_results);
}
Ok(all_results)
})
.await
}
}
/// Creates [`AccessListResult`] for the [`RpcTxReq`] at the given
/// [`BlockId`], or latest block.
fn create_access_list_at(
&self,
request: RpcTxReq<<Self::RpcConvert as RpcConvert>::Network>,
block_number: Option<BlockId>,
state_override: Option<StateOverride>,
) -> impl Future<Output = Result<AccessListResult, Self::Error>> + Send
where
Self: Trace,
{
async move {
let block_id = block_number.unwrap_or_default();
let (evm_env, at) = self.evm_env_at(block_id).await?;
self.spawn_blocking_io_fut(move |this| async move {
this.create_access_list_with(evm_env, at, request, state_override).await
})
.await
}
}
/// Creates [`AccessListResult`] for the [`RpcTxReq`] at the given
/// [`BlockId`].
fn create_access_list_with(
&self,
mut evm_env: EvmEnvFor<Self::Evm>,
at: BlockId,
request: RpcTxReq<<Self::RpcConvert as RpcConvert>::Network>,
state_override: Option<StateOverride>,
) -> impl Future<Output = Result<AccessListResult, Self::Error>> + Send
where
Self: Trace,
{
self.spawn_blocking_io_fut(move |this| async move {
let state = this.state_at_block_id(at).await?;
let mut db = CacheDB::new(StateProviderDatabase::new(state));
if let Some(state_overrides) = state_override {
apply_state_overrides(state_overrides, &mut db)
.map_err(Self::Error::from_eth_err)?;
}
let mut tx_env = this.create_txn_env(&evm_env, request.clone(), &mut db)?;
// we want to disable this in eth_createAccessList, since this is common practice used
// by other node impls and providers <https://github.com/foundry-rs/foundry/issues/4388>
evm_env.cfg_env.disable_block_gas_limit = true;
// The basefee should be ignored for eth_createAccessList
// See:
// <https://github.com/ethereum/go-ethereum/blob/8990c92aea01ca07801597b00c0d83d4e2d9b811/internal/ethapi/api.go#L1476-L1476>
evm_env.cfg_env.disable_base_fee = true;
// Disabled because eth_createAccessList is sometimes used with non-eoa senders
evm_env.cfg_env.disable_eip3607 = true;
if request.as_ref().gas_limit().is_none() && tx_env.gas_price() > 0 {
let cap = this.caller_gas_allowance(&mut db, &evm_env, &tx_env)?;
// no gas limit was provided in the request, so we need to cap the request's gas
// limit
tx_env.set_gas_limit(cap.min(evm_env.block_env.gas_limit));
}
// can consume the list since we're not using the request anymore
let initial = request.as_ref().access_list().cloned().unwrap_or_default();
let mut inspector = AccessListInspector::new(initial);
let result = this.inspect(&mut db, evm_env.clone(), tx_env.clone(), &mut inspector)?;
let access_list = inspector.into_access_list();
tx_env.set_access_list(access_list.clone());
match result.result {
ExecutionResult::Halt { reason, gas_used } => {
let error =
Some(Self::Error::from_evm_halt(reason, tx_env.gas_limit()).to_string());
return Ok(AccessListResult {
access_list,
gas_used: U256::from(gas_used),
error,
})
}
ExecutionResult::Revert { output, gas_used } => {
let error = Some(RevertError::new(output).to_string());
return Ok(AccessListResult {
access_list,
gas_used: U256::from(gas_used),
error,
})
}
ExecutionResult::Success { .. } => {}
};
// transact again to get the exact gas used
let gas_limit = tx_env.gas_limit();
let result = this.transact(&mut db, evm_env, tx_env)?;
let res = match result.result {
ExecutionResult::Halt { reason, gas_used } => {
let error = Some(Self::Error::from_evm_halt(reason, gas_limit).to_string());
AccessListResult { access_list, gas_used: U256::from(gas_used), error }
}
ExecutionResult::Revert { output, gas_used } => {
let error = Some(RevertError::new(output).to_string());
AccessListResult { access_list, gas_used: U256::from(gas_used), error }
}
ExecutionResult::Success { gas_used, .. } => {
AccessListResult { access_list, gas_used: U256::from(gas_used), error: None }
}
};
Ok(res)
})
}
}
/// Executes code on state.
pub trait Call:
LoadState<
RpcConvert: RpcConvert<TxEnv = TxEnvFor<Self::Evm>, Spec = SpecFor<Self::Evm>>,
Error: FromEvmError<Self::Evm>
+ From<<Self::RpcConvert as RpcConvert>::Error>
+ From<ProviderError>,
> + SpawnBlocking
{
/// Returns default gas limit to use for `eth_call` and tracing RPC methods.
///
/// Data access in default trait method implementations.
fn call_gas_limit(&self) -> u64;
/// Returns the maximum number of blocks accepted for `eth_simulateV1`.
fn max_simulate_blocks(&self) -> u64;
/// Returns the max gas limit that the caller can afford given a transaction environment.
fn caller_gas_allowance(
&self,
mut db: impl Database<Error: Into<EthApiError>>,
_evm_env: &EvmEnvFor<Self::Evm>,
tx_env: &TxEnvFor<Self::Evm>,
) -> Result<u64, Self::Error> {
alloy_evm::call::caller_gas_allowance(&mut db, tx_env).map_err(Self::Error::from_eth_err)
}
/// Executes the closure with the state that corresponds to the given [`BlockId`].
fn with_state_at_block<F, R>(
&self,
at: BlockId,
f: F,
) -> impl Future<Output = Result<R, Self::Error>> + Send
where
R: Send + 'static,
F: FnOnce(Self, StateProviderTraitObjWrapper<'_>) -> Result<R, Self::Error>
+ Send
+ 'static,
{
self.spawn_blocking_io_fut(move |this| async move {
let state = this.state_at_block_id(at).await?;
f(this, StateProviderTraitObjWrapper(&state))
})
}
/// Executes the `TxEnv` against the given [Database] without committing state
/// changes.
fn transact<DB>(
&self,
db: DB,
evm_env: EvmEnvFor<Self::Evm>,
tx_env: TxEnvFor<Self::Evm>,
) -> Result<ResultAndState<HaltReasonFor<Self::Evm>>, Self::Error>
where
DB: Database<Error = ProviderError> + fmt::Debug,
{
let mut evm = self.evm_config().evm_with_env(db, evm_env);
let res = evm.transact(tx_env).map_err(Self::Error::from_evm_err)?;
Ok(res)
}
/// Executes the [`EvmEnv`] against the given [Database] without committing state
/// changes.
fn transact_with_inspector<DB, I>(
&self,
db: DB,
evm_env: EvmEnvFor<Self::Evm>,
tx_env: TxEnvFor<Self::Evm>,
inspector: I,
) -> Result<ResultAndState<HaltReasonFor<Self::Evm>>, Self::Error>
where
DB: Database<Error = ProviderError> + fmt::Debug,
I: InspectorFor<Self::Evm, DB>,
{
let mut evm = self.evm_config().evm_with_env_and_inspector(db, evm_env, inspector);
let res = evm.transact(tx_env).map_err(Self::Error::from_evm_err)?;
Ok(res)
}
/// Executes the call request at the given [`BlockId`].
fn transact_call_at(
&self,
request: RpcTxReq<<Self::RpcConvert as RpcConvert>::Network>,
at: BlockId,
overrides: EvmOverrides,
) -> impl Future<Output = Result<ResultAndState<HaltReasonFor<Self::Evm>>, Self::Error>> + Send
where
Self: LoadPendingBlock,
{
let this = self.clone();
self.spawn_with_call_at(request, at, overrides, move |db, evm_env, tx_env| {
this.transact(db, evm_env, tx_env)
})
}
/// Executes the closure with the state that corresponds to the given [`BlockId`] on a new task
fn spawn_with_state_at_block<F, R>(
&self,
at: BlockId,
f: F,
) -> impl Future<Output = Result<R, Self::Error>> + Send
where
F: FnOnce(StateProviderTraitObjWrapper<'_>) -> Result<R, Self::Error> + Send + 'static,
R: Send + 'static,
{
self.spawn_blocking_io_fut(move |this| async move {
let state = this.state_at_block_id(at).await?;
f(StateProviderTraitObjWrapper(&state))
})
}
/// Prepares the state and env for the given [`RpcTxReq`] at the given [`BlockId`] and
/// executes the closure on a new task returning the result of the closure.
///
/// This returns the configured [`EvmEnv`] for the given [`RpcTxReq`] at
/// the given [`BlockId`] and with configured call settings: `prepare_call_env`.
///
/// This is primarily used by `eth_call`.
///
/// # Blocking behaviour
///
/// This assumes executing the call is relatively more expensive on IO than CPU because it
/// transacts a single transaction on an empty in memory database. Because `eth_call`s are
/// usually allowed to consume a lot of gas, this also allows a lot of memory operations so
/// we assume this is not primarily CPU bound and instead spawn the call on a regular tokio task
/// instead, where blocking IO is less problematic.
fn spawn_with_call_at<F, R>(
&self,
request: RpcTxReq<<Self::RpcConvert as RpcConvert>::Network>,
at: BlockId,
overrides: EvmOverrides,
f: F,
) -> impl Future<Output = Result<R, Self::Error>> + Send
where
Self: LoadPendingBlock,
F: FnOnce(
StateCacheDbRefMutWrapper<'_, '_>,
EvmEnvFor<Self::Evm>,
TxEnvFor<Self::Evm>,
) -> Result<R, Self::Error>
+ Send
+ 'static,
R: Send + 'static,
{
async move {
let (evm_env, at) = self.evm_env_at(at).await?;
let this = self.clone();
self.spawn_blocking_io_fut(move |_| async move {
let state = this.state_at_block_id(at).await?;
let mut db =
CacheDB::new(StateProviderDatabase::new(StateProviderTraitObjWrapper(&state)));
let (evm_env, tx_env) =
this.prepare_call_env(evm_env, request, &mut db, overrides)?;
f(StateCacheDbRefMutWrapper(&mut db), evm_env, tx_env)
})
.await
}
}
/// Retrieves the transaction if it exists and executes it.
///
/// Before the transaction is executed, all previous transaction in the block are applied to the
/// state by executing them first.
/// The callback `f` is invoked with the [`ResultAndState`] after the transaction was executed
/// and the database that points to the beginning of the transaction.
///
/// Note: Implementers should use a threadpool where blocking is allowed, such as
/// [`BlockingTaskPool`](reth_tasks::pool::BlockingTaskPool).
fn spawn_replay_transaction<F, R>(
&self,
hash: B256,
f: F,
) -> impl Future<Output = Result<Option<R>, Self::Error>> + Send
where
Self: LoadBlock + LoadTransaction,
F: FnOnce(
TransactionInfo,
ResultAndState<HaltReasonFor<Self::Evm>>,
StateCacheDb<'_>,
) -> Result<R, Self::Error>
+ Send
+ 'static,
R: Send + 'static,
{
async move {
let (transaction, block) = match self.transaction_and_block(hash).await? {
None => return Ok(None),
Some(res) => res,
};
let (tx, tx_info) = transaction.split();
let (evm_env, _) = self.evm_env_at(block.hash().into()).await?;
// we need to get the state of the parent block because we're essentially replaying the
// block the transaction is included in
let parent_block = block.parent_hash();
let this = self.clone();
self.spawn_with_state_at_block(parent_block.into(), move |state| {
let mut db = CacheDB::new(StateProviderDatabase::new(state));
let block_txs = block.transactions_recovered();
// replay all transactions prior to the targeted transaction
this.replay_transactions_until(&mut db, evm_env.clone(), block_txs, *tx.tx_hash())?;
let tx_env = RpcNodeCore::evm_config(&this).tx_env(tx);
let res = this.transact(&mut db, evm_env, tx_env)?;
f(tx_info, res, db)
})
.await
.map(Some)
}
}
/// Replays all the transactions until the target transaction is found.
///
/// All transactions before the target transaction are executed and their changes are written to
/// the _runtime_ db ([`CacheDB`]).
///
/// Note: This assumes the target transaction is in the given iterator.
/// Returns the index of the target transaction in the given iterator.
fn replay_transactions_until<'a, DB, I>(
&self,
db: &mut DB,
evm_env: EvmEnvFor<Self::Evm>,
transactions: I,
target_tx_hash: B256,
) -> Result<usize, Self::Error>
where
DB: Database<Error = ProviderError> + DatabaseCommit + core::fmt::Debug,
I: IntoIterator<Item = Recovered<&'a ProviderTx<Self::Provider>>>,
{
let mut evm = self.evm_config().evm_with_env(db, evm_env);
let mut index = 0;
for tx in transactions {
if *tx.tx_hash() == target_tx_hash {
// reached the target transaction
break
}
let tx_env = self.evm_config().tx_env(tx);
evm.transact_commit(tx_env).map_err(Self::Error::from_evm_err)?;
index += 1;
}
Ok(index)
}
///
/// All `TxEnv` fields are derived from the given [`RpcTxReq`], if fields are
/// `None`, they fall back to the [`EvmEnv`]'s settings.
fn create_txn_env(
&self,
evm_env: &EvmEnv<SpecFor<Self::Evm>>,
mut request: RpcTxReq<<Self::RpcConvert as RpcConvert>::Network>,
mut db: impl Database<Error: Into<EthApiError>>,
) -> Result<TxEnvFor<Self::Evm>, Self::Error> {
if request.as_ref().nonce().is_none() {
let nonce = db
.basic(request.as_ref().from().unwrap_or_default())
.map_err(Into::into)?
.map(|acc| acc.nonce)
.unwrap_or_default();
request.as_mut().set_nonce(nonce);
}
Ok(self.tx_resp_builder().tx_env(request, &evm_env.cfg_env, &evm_env.block_env)?)
}
/// Prepares the [`EvmEnv`] for execution of calls.
///
/// Does not commit any changes to the underlying database.
///
/// ## EVM settings
///
/// This modifies certain EVM settings to mirror geth's `SkipAccountChecks` when transacting requests, see also: <https://github.com/ethereum/go-ethereum/blob/380688c636a654becc8f114438c2a5d93d2db032/core/state_transition.go#L145-L148>:
///
/// - `disable_eip3607` is set to `true`
/// - `disable_base_fee` is set to `true`
/// - `nonce` is set to `None`
///
/// In addition, this changes the block's gas limit to the configured [`Self::call_gas_limit`].
#[expect(clippy::type_complexity)]
fn prepare_call_env<DB>(
&self,
mut evm_env: EvmEnvFor<Self::Evm>,
mut request: RpcTxReq<<Self::RpcConvert as RpcConvert>::Network>,
db: &mut DB,
overrides: EvmOverrides,
) -> Result<(EvmEnvFor<Self::Evm>, TxEnvFor<Self::Evm>), Self::Error>
where
DB: Database + DatabaseCommit + OverrideBlockHashes,
EthApiError: From<<DB as Database>::Error>,
{
if let Some(requested_gas) = request.as_ref().gas_limit() {
let global_gas_cap = self.call_gas_limit();
if global_gas_cap != 0 && global_gas_cap < requested_gas {
warn!(target: "rpc::eth::call", ?request, ?global_gas_cap, "Capping gas limit to global gas cap");
request.as_mut().set_gas_limit(global_gas_cap);
}
}
// apply configured gas cap
evm_env.block_env.gas_limit = self.call_gas_limit();
// Disabled because eth_call is sometimes used with eoa senders
// See <https://github.com/paradigmxyz/reth/issues/1959>
evm_env.cfg_env.disable_eip3607 = true;
// The basefee should be ignored for eth_call
// See:
// <https://github.com/ethereum/go-ethereum/blob/ee8e83fa5f6cb261dad2ed0a7bbcde4930c41e6c/internal/ethapi/api.go#L985>
evm_env.cfg_env.disable_base_fee = true;
// set nonce to None so that the correct nonce is chosen by the EVM
request.as_mut().take_nonce();
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | true |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-eth-api/src/helpers/blocking_task.rs | crates/rpc/rpc-eth-api/src/helpers/blocking_task.rs | //! Spawns a blocking task. CPU heavy tasks are executed with the `rayon` library. IO heavy tasks
//! are executed on the `tokio` runtime.
use futures::Future;
use reth_rpc_eth_types::EthApiError;
use reth_tasks::{
pool::{BlockingTaskGuard, BlockingTaskPool},
TaskSpawner,
};
use tokio::sync::{oneshot, AcquireError, OwnedSemaphorePermit};
use crate::EthApiTypes;
/// Executes code on a blocking thread.
pub trait SpawnBlocking: EthApiTypes + Clone + Send + Sync + 'static {
/// Returns a handle for spawning IO heavy blocking tasks.
///
/// Runtime access in default trait method implementations.
fn io_task_spawner(&self) -> impl TaskSpawner;
/// Returns a handle for spawning CPU heavy blocking tasks.
///
/// Thread pool access in default trait method implementations.
fn tracing_task_pool(&self) -> &BlockingTaskPool;
/// Returns handle to semaphore for pool of CPU heavy blocking tasks.
fn tracing_task_guard(&self) -> &BlockingTaskGuard;
/// See also [`Semaphore::acquire_owned`](`tokio::sync::Semaphore::acquire_owned`).
fn acquire_owned(
&self,
) -> impl Future<Output = Result<OwnedSemaphorePermit, AcquireError>> + Send {
self.tracing_task_guard().clone().acquire_owned()
}
/// See also [`Semaphore::acquire_many_owned`](`tokio::sync::Semaphore::acquire_many_owned`).
fn acquire_many_owned(
&self,
n: u32,
) -> impl Future<Output = Result<OwnedSemaphorePermit, AcquireError>> + Send {
self.tracing_task_guard().clone().acquire_many_owned(n)
}
/// Executes the future on a new blocking task.
///
/// Note: This is expected for futures that are dominated by blocking IO operations, for tracing
/// or CPU bound operations in general use [`spawn_tracing`](Self::spawn_tracing).
fn spawn_blocking_io<F, R>(&self, f: F) -> impl Future<Output = Result<R, Self::Error>> + Send
where
F: FnOnce(Self) -> Result<R, Self::Error> + Send + 'static,
R: Send + 'static,
{
let (tx, rx) = oneshot::channel();
let this = self.clone();
self.io_task_spawner().spawn_blocking(Box::pin(async move {
let res = f(this);
let _ = tx.send(res);
}));
async move { rx.await.map_err(|_| EthApiError::InternalEthError)? }
}
/// Executes the future on a new blocking task.
///
/// Note: This is expected for futures that are dominated by blocking IO operations, for tracing
/// or CPU bound operations in general use [`spawn_tracing`](Self::spawn_tracing).
fn spawn_blocking_io_fut<F, R, Fut>(
&self,
f: F,
) -> impl Future<Output = Result<R, Self::Error>> + Send
where
Fut: Future<Output = Result<R, Self::Error>> + Send + 'static,
F: FnOnce(Self) -> Fut + Send + 'static,
R: Send + 'static,
{
let (tx, rx) = oneshot::channel();
let this = self.clone();
self.io_task_spawner().spawn_blocking(Box::pin(async move {
let res = f(this).await;
let _ = tx.send(res);
}));
async move { rx.await.map_err(|_| EthApiError::InternalEthError)? }
}
/// Executes a blocking task on the tracing pool.
///
/// Note: This is expected for futures that are predominantly CPU bound, as it uses `rayon`
/// under the hood, for blocking IO futures use [`spawn_blocking`](Self::spawn_blocking_io). See
/// <https://ryhl.io/blog/async-what-is-blocking/>.
fn spawn_tracing<F, R>(&self, f: F) -> impl Future<Output = Result<R, Self::Error>> + Send
where
F: FnOnce(Self) -> Result<R, Self::Error> + Send + 'static,
R: Send + 'static,
{
let this = self.clone();
let fut = self.tracing_task_pool().spawn(move || f(this));
async move { fut.await.map_err(|_| EthApiError::InternalBlockingTaskError)? }
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-eth-api/src/helpers/fee.rs | crates/rpc/rpc-eth-api/src/helpers/fee.rs | //! Loads fee history from database. Helper trait for `eth_` fee and transaction RPC methods.
use super::LoadBlock;
use crate::FromEthApiError;
use alloy_consensus::BlockHeader;
use alloy_eips::eip7840::BlobParams;
use alloy_primitives::U256;
use alloy_rpc_types_eth::{BlockNumberOrTag, FeeHistory};
use futures::Future;
use reth_chainspec::{ChainSpecProvider, EthChainSpec};
use reth_primitives_traits::BlockBody;
use reth_rpc_eth_types::{
fee_history::calculate_reward_percentiles_for_block, utils::checked_blob_gas_used_ratio,
EthApiError, FeeHistoryCache, FeeHistoryEntry, GasPriceOracle, RpcInvalidTransactionError,
};
use reth_storage_api::{BlockIdReader, BlockReaderIdExt, HeaderProvider, ProviderHeader};
use tracing::debug;
use reth_primitives_traits::BlockHeader as _;
/// Fee related functions for the [`EthApiServer`](crate::EthApiServer) trait in the
/// `eth_` namespace.
pub trait EthFees:
LoadFee<
Provider: ChainSpecProvider<ChainSpec: EthChainSpec<Header = ProviderHeader<Self::Provider>>>,
>
{
/// Returns a suggestion for a gas price for legacy transactions.
///
/// See also: <https://github.com/ethereum/pm/issues/328#issuecomment-853234014>
fn gas_price(&self) -> impl Future<Output = Result<U256, Self::Error>> + Send
where
Self: LoadBlock,
{
LoadFee::gas_price(self)
}
/// Returns a suggestion for a base fee for blob transactions.
fn blob_base_fee(&self) -> impl Future<Output = Result<U256, Self::Error>> + Send
where
Self: LoadBlock,
{
LoadFee::blob_base_fee(self)
}
/// Returns a suggestion for the priority fee (the tip)
fn suggested_priority_fee(&self) -> impl Future<Output = Result<U256, Self::Error>> + Send
where
Self: 'static,
{
LoadFee::suggested_priority_fee(self)
}
/// Reports the fee history, for the given amount of blocks, up until the given newest block.
///
/// If `reward_percentiles` are provided the [`FeeHistory`] will include the _approximated_
/// rewards for the requested range.
fn fee_history(
&self,
mut block_count: u64,
mut newest_block: BlockNumberOrTag,
reward_percentiles: Option<Vec<f64>>,
) -> impl Future<Output = Result<FeeHistory, Self::Error>> + Send {
async move {
if block_count == 0 {
return Ok(FeeHistory::default())
}
// ensure the given reward percentiles aren't excessive
if reward_percentiles.as_ref().map(|perc| perc.len() as u64) >
Some(self.gas_oracle().config().max_reward_percentile_count)
{
return Err(EthApiError::InvalidRewardPercentiles.into())
}
// See https://github.com/ethereum/go-ethereum/blob/2754b197c935ee63101cbbca2752338246384fec/eth/gasprice/feehistory.go#L218C8-L225
let max_fee_history = if reward_percentiles.is_none() {
self.gas_oracle().config().max_header_history
} else {
self.gas_oracle().config().max_block_history
};
if block_count > max_fee_history {
debug!(
requested = block_count,
truncated = max_fee_history,
"Sanitizing fee history block count"
);
block_count = max_fee_history
}
if newest_block.is_pending() {
// cap the target block since we don't have fee history for the pending block
newest_block = BlockNumberOrTag::Latest;
}
let end_block = self
.provider()
.block_number_for_id(newest_block.into())
.map_err(Self::Error::from_eth_err)?
.ok_or(EthApiError::HeaderNotFound(newest_block.into()))?;
// need to add 1 to the end block to get the correct (inclusive) range
let end_block_plus = end_block + 1;
// Ensure that we would not be querying outside of genesis
if end_block_plus < block_count {
block_count = end_block_plus;
}
// If reward percentiles were specified, we
// need to validate that they are monotonically
// increasing and 0 <= p <= 100
// Note: The types used ensure that the percentiles are never < 0
if let Some(percentiles) = &reward_percentiles {
if percentiles.windows(2).any(|w| w[0] > w[1] || w[0] > 100.) {
return Err(EthApiError::InvalidRewardPercentiles.into())
}
}
// Fetch the headers and ensure we got all of them
//
// Treat a request for 1 block as a request for `newest_block..=newest_block`,
// otherwise `newest_block - 2`
// NOTE: We ensured that block count is capped
let start_block = end_block_plus - block_count;
// Collect base fees, gas usage ratios and (optionally) reward percentile data
let mut base_fee_per_gas: Vec<u128> = Vec::new();
let mut gas_used_ratio: Vec<f64> = Vec::new();
let mut base_fee_per_blob_gas: Vec<u128> = Vec::new();
let mut blob_gas_used_ratio: Vec<f64> = Vec::new();
let mut rewards: Vec<Vec<u128>> = Vec::new();
// Check if the requested range is within the cache bounds
let fee_entries = self.fee_history_cache().get_history(start_block, end_block).await;
if let Some(fee_entries) = fee_entries {
if fee_entries.len() != block_count as usize {
return Err(EthApiError::InvalidBlockRange.into())
}
for entry in &fee_entries {
base_fee_per_gas
.push(entry.header.base_fee_per_gas().unwrap_or_default() as u128);
gas_used_ratio.push(entry.gas_used_ratio);
base_fee_per_blob_gas.push(entry.base_fee_per_blob_gas.unwrap_or_default());
blob_gas_used_ratio.push(entry.blob_gas_used_ratio);
if let Some(percentiles) = &reward_percentiles {
let mut block_rewards = Vec::with_capacity(percentiles.len());
for &percentile in percentiles {
block_rewards.push(self.approximate_percentile(entry, percentile));
}
rewards.push(block_rewards);
}
}
let last_entry = fee_entries.last().expect("is not empty");
// Also need to include the `base_fee_per_gas` and `base_fee_per_blob_gas` for the
// next block
base_fee_per_gas.push(
self.provider()
.chain_spec()
.next_block_base_fee(
&last_entry.header,
last_entry.header.timestamp_seconds(),
)
.unwrap_or_default() as u128,
);
base_fee_per_blob_gas.push(last_entry.next_block_blob_fee().unwrap_or_default());
} else {
// read the requested header range
let headers = self.provider()
.sealed_headers_range(start_block..=end_block)
.map_err(Self::Error::from_eth_err)?;
if headers.len() != block_count as usize {
return Err(EthApiError::InvalidBlockRange.into())
}
let chain_spec = self.provider().chain_spec();
for header in &headers {
base_fee_per_gas.push(header.base_fee_per_gas().unwrap_or_default() as u128);
gas_used_ratio.push(header.gas_used() as f64 / header.gas_limit() as f64);
let blob_params = chain_spec
.blob_params_at_timestamp(header.timestamp_seconds())
.unwrap_or_else(BlobParams::cancun);
base_fee_per_blob_gas.push(header.blob_fee(blob_params).unwrap_or_default());
blob_gas_used_ratio.push(
checked_blob_gas_used_ratio(
header.blob_gas_used().unwrap_or_default(),
blob_params.max_blob_gas_per_block(),
)
);
// Percentiles were specified, so we need to collect reward percentile info
if let Some(percentiles) = &reward_percentiles {
let (block, receipts) = self.cache()
.get_block_and_receipts(header.hash())
.await
.map_err(Self::Error::from_eth_err)?
.ok_or(EthApiError::InvalidBlockRange)?;
rewards.push(
calculate_reward_percentiles_for_block(
percentiles,
header.gas_used(),
header.base_fee_per_gas().unwrap_or_default(),
block.body().transactions(),
&receipts,
)
.unwrap_or_default(),
);
}
}
// The spec states that `base_fee_per_gas` "[..] includes the next block after the
// newest of the returned range, because this value can be derived from the
// newest block"
//
// The unwrap is safe since we checked earlier that we got at least 1 header.
let last_header = headers.last().expect("is present");
base_fee_per_gas.push(
chain_spec
.next_block_base_fee(last_header.header(), last_header.timestamp_seconds())
.unwrap_or_default() as u128,
);
// Same goes for the `base_fee_per_blob_gas`:
// > "[..] includes the next block after the newest of the returned range, because this value can be derived from the newest block.
base_fee_per_blob_gas.push(
last_header
.maybe_next_block_blob_fee(
chain_spec.blob_params_at_timestamp(last_header.timestamp_seconds())
).unwrap_or_default()
);
};
Ok(FeeHistory {
base_fee_per_gas,
gas_used_ratio,
base_fee_per_blob_gas,
blob_gas_used_ratio,
oldest_block: start_block,
reward: reward_percentiles.map(|_| rewards),
})
}
}
/// Approximates reward at a given percentile for a specific block
/// Based on the configured resolution
fn approximate_percentile(
&self,
entry: &FeeHistoryEntry<ProviderHeader<Self::Provider>>,
requested_percentile: f64,
) -> u128 {
let resolution = self.fee_history_cache().resolution();
let rounded_percentile =
(requested_percentile * resolution as f64).round() / resolution as f64;
let clamped_percentile = rounded_percentile.clamp(0.0, 100.0);
// Calculate the index in the precomputed rewards array
let index = (clamped_percentile / (1.0 / resolution as f64)).round() as usize;
// Fetch the reward from the FeeHistoryEntry
entry.rewards.get(index).copied().unwrap_or_default()
}
}
/// Loads fee from database.
///
/// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` fees RPC methods.
pub trait LoadFee: LoadBlock
where
Self::Provider: BlockReaderIdExt,
{
/// Returns a handle for reading gas price.
///
/// Data access in default (L1) trait method implementations.
fn gas_oracle(&self) -> &GasPriceOracle<Self::Provider>;
/// Returns a handle for reading fee history data from memory.
///
/// Data access in default (L1) trait method implementations.
fn fee_history_cache(&self) -> &FeeHistoryCache<ProviderHeader<Self::Provider>>;
/// Returns the gas price if it is set, otherwise fetches a suggested gas price for legacy
/// transactions.
fn legacy_gas_price(
&self,
gas_price: Option<U256>,
) -> impl Future<Output = Result<U256, Self::Error>> + Send {
async move {
match gas_price {
Some(gas_price) => Ok(gas_price),
None => {
// fetch a suggested gas price
self.gas_price().await
}
}
}
}
/// Returns the EIP-1559 fees if they are set, otherwise fetches a suggested gas price for
/// EIP-1559 transactions.
///
/// Returns (`base_fee`, `priority_fee`)
fn eip1559_fees(
&self,
base_fee: Option<U256>,
max_priority_fee_per_gas: Option<U256>,
) -> impl Future<Output = Result<(U256, U256), Self::Error>> + Send {
async move {
let base_fee = match base_fee {
Some(base_fee) => base_fee,
None => {
// fetch pending base fee
let base_fee = self
.recovered_block(BlockNumberOrTag::Pending.into())
.await?
.ok_or(EthApiError::HeaderNotFound(BlockNumberOrTag::Pending.into()))?
.base_fee_per_gas()
.ok_or(EthApiError::InvalidTransaction(
RpcInvalidTransactionError::TxTypeNotSupported,
))?;
U256::from(base_fee)
}
};
let max_priority_fee_per_gas = match max_priority_fee_per_gas {
Some(max_priority_fee_per_gas) => max_priority_fee_per_gas,
None => self.suggested_priority_fee().await?,
};
Ok((base_fee, max_priority_fee_per_gas))
}
}
/// Returns the EIP-4844 blob fee if it is set, otherwise fetches a blob fee.
fn eip4844_blob_fee(
&self,
blob_fee: Option<U256>,
) -> impl Future<Output = Result<U256, Self::Error>> + Send {
async move {
match blob_fee {
Some(blob_fee) => Ok(blob_fee),
None => self.blob_base_fee().await,
}
}
}
/// Returns a suggestion for a gas price for legacy transactions.
///
/// See also: <https://github.com/ethereum/pm/issues/328#issuecomment-853234014>
fn gas_price(&self) -> impl Future<Output = Result<U256, Self::Error>> + Send {
async move {
let header = self.provider().latest_header().map_err(Self::Error::from_eth_err)?;
let suggested_tip = self.suggested_priority_fee().await?;
let base_fee = header.and_then(|h| h.base_fee_per_gas()).unwrap_or_default();
Ok(suggested_tip + U256::from(base_fee))
}
}
/// Returns a suggestion for a base fee for blob transactions.
fn blob_base_fee(&self) -> impl Future<Output = Result<U256, Self::Error>> + Send {
async move {
self.provider()
.latest_header()
.map_err(Self::Error::from_eth_err)?
.and_then(|h| {
h.maybe_next_block_blob_fee(
self.provider()
.chain_spec()
.blob_params_at_timestamp(h.timestamp_seconds()),
)
})
.ok_or(EthApiError::ExcessBlobGasNotSet.into())
.map(U256::from)
}
}
/// Returns a suggestion for the priority fee (the tip)
fn suggested_priority_fee(&self) -> impl Future<Output = Result<U256, Self::Error>> + Send
where
Self: 'static,
{
async move { self.gas_oracle().suggest_tip_cap().await.map_err(Self::Error::from_eth_err) }
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-eth-api/src/helpers/trace.rs | crates/rpc/rpc-eth-api/src/helpers/trace.rs | //! Loads a pending block from database. Helper trait for `eth_` call and trace RPC methods.
use super::{Call, LoadBlock, LoadPendingBlock, LoadState, LoadTransaction};
use crate::FromEvmError;
use alloy_consensus::BlockHeader;
use alloy_primitives::B256;
use alloy_rpc_types_eth::{BlockId, TransactionInfo};
use futures::Future;
use reth_chainspec::ChainSpecProvider;
use reth_errors::ProviderError;
use reth_evm::{
evm::EvmFactoryExt, system_calls::SystemCaller, tracing::TracingCtx, ConfigureEvm, Database,
Evm, EvmEnvFor, EvmFor, HaltReasonFor, InspectorFor, TxEnvFor,
};
use reth_primitives_traits::{BlockBody, Recovered, RecoveredBlock, SignedTransaction};
use reth_revm::{database::StateProviderDatabase, db::CacheDB};
use reth_rpc_eth_types::{
cache::db::{StateCacheDb, StateCacheDbRefMutWrapper, StateProviderTraitObjWrapper},
EthApiError,
};
use reth_storage_api::{ProviderBlock, ProviderTx};
use revm::{context_interface::result::ResultAndState, DatabaseCommit};
use revm_inspectors::tracing::{TracingInspector, TracingInspectorConfig};
use std::sync::Arc;
/// Executes CPU heavy tasks.
pub trait Trace: LoadState<Error: FromEvmError<Self::Evm>> {
/// Executes the [`TxEnvFor`] with [`EvmEnvFor`] against the given [Database] without committing
/// state changes.
fn inspect<DB, I>(
&self,
db: DB,
evm_env: EvmEnvFor<Self::Evm>,
tx_env: TxEnvFor<Self::Evm>,
inspector: I,
) -> Result<ResultAndState<HaltReasonFor<Self::Evm>>, Self::Error>
where
DB: Database<Error = ProviderError>,
I: InspectorFor<Self::Evm, DB>,
{
let mut evm = self.evm_config().evm_with_env_and_inspector(db, evm_env, inspector);
evm.transact(tx_env).map_err(Self::Error::from_evm_err)
}
/// Executes the transaction on top of the given [`BlockId`] with a tracer configured by the
/// config.
///
/// The callback is then called with the [`TracingInspector`] and the [`ResultAndState`] after
/// the configured [`reth_evm::EvmEnv`] was inspected.
///
/// Caution: this is blocking
fn trace_at<F, R>(
&self,
evm_env: EvmEnvFor<Self::Evm>,
tx_env: TxEnvFor<Self::Evm>,
config: TracingInspectorConfig,
at: BlockId,
f: F,
) -> impl Future<Output = Result<R, Self::Error>> + Send
where
Self: Call,
R: Send + 'static,
F: FnOnce(
TracingInspector,
ResultAndState<HaltReasonFor<Self::Evm>>,
) -> Result<R, Self::Error>
+ Send
+ 'static,
{
self.with_state_at_block(at, move |this, state| {
let mut db = CacheDB::new(StateProviderDatabase::new(state));
let mut inspector = TracingInspector::new(config);
let res = this.inspect(&mut db, evm_env, tx_env, &mut inspector)?;
f(inspector, res)
})
}
/// Same as [`trace_at`](Self::trace_at) but also provides the used database to the callback.
///
/// Executes the transaction on top of the given [`BlockId`] with a tracer configured by the
/// config.
///
/// The callback is then called with the [`TracingInspector`] and the [`ResultAndState`] after
/// the configured [`reth_evm::EvmEnv`] was inspected.
fn spawn_trace_at_with_state<F, R>(
&self,
evm_env: EvmEnvFor<Self::Evm>,
tx_env: TxEnvFor<Self::Evm>,
config: TracingInspectorConfig,
at: BlockId,
f: F,
) -> impl Future<Output = Result<R, Self::Error>> + Send
where
Self: LoadPendingBlock + Call,
F: FnOnce(
TracingInspector,
ResultAndState<HaltReasonFor<Self::Evm>>,
StateCacheDb<'_>,
) -> Result<R, Self::Error>
+ Send
+ 'static,
R: Send + 'static,
{
let this = self.clone();
self.spawn_with_state_at_block(at, move |state| {
let mut db = CacheDB::new(StateProviderDatabase::new(state));
let mut inspector = TracingInspector::new(config);
let res = this.inspect(&mut db, evm_env, tx_env, &mut inspector)?;
f(inspector, res, db)
})
}
/// Retrieves the transaction if it exists and returns its trace.
///
/// Before the transaction is traced, all previous transaction in the block are applied to the
/// state by executing them first.
/// The callback `f` is invoked with the [`ResultAndState`] after the transaction was executed
/// and the database that points to the beginning of the transaction.
///
/// Note: Implementers should use a threadpool where blocking is allowed, such as
/// [`BlockingTaskPool`](reth_tasks::pool::BlockingTaskPool).
fn spawn_trace_transaction_in_block<F, R>(
&self,
hash: B256,
config: TracingInspectorConfig,
f: F,
) -> impl Future<Output = Result<Option<R>, Self::Error>> + Send
where
Self: LoadPendingBlock + LoadTransaction + Call,
F: FnOnce(
TransactionInfo,
TracingInspector,
ResultAndState<HaltReasonFor<Self::Evm>>,
StateCacheDb<'_>,
) -> Result<R, Self::Error>
+ Send
+ 'static,
R: Send + 'static,
{
self.spawn_trace_transaction_in_block_with_inspector(hash, TracingInspector::new(config), f)
}
/// Retrieves the transaction if it exists and returns its trace.
///
/// Before the transaction is traced, all previous transaction in the block are applied to the
/// state by executing them first.
/// The callback `f` is invoked with the [`ResultAndState`] after the transaction was executed
/// and the database that points to the beginning of the transaction.
///
/// Note: Implementers should use a threadpool where blocking is allowed, such as
/// [`BlockingTaskPool`](reth_tasks::pool::BlockingTaskPool).
fn spawn_trace_transaction_in_block_with_inspector<Insp, F, R>(
&self,
hash: B256,
mut inspector: Insp,
f: F,
) -> impl Future<Output = Result<Option<R>, Self::Error>> + Send
where
Self: LoadPendingBlock + LoadTransaction + Call,
F: FnOnce(
TransactionInfo,
Insp,
ResultAndState<HaltReasonFor<Self::Evm>>,
StateCacheDb<'_>,
) -> Result<R, Self::Error>
+ Send
+ 'static,
Insp:
for<'a, 'b> InspectorFor<Self::Evm, StateCacheDbRefMutWrapper<'a, 'b>> + Send + 'static,
R: Send + 'static,
{
async move {
let (transaction, block) = match self.transaction_and_block(hash).await? {
None => return Ok(None),
Some(res) => res,
};
let (tx, tx_info) = transaction.split();
let (evm_env, _) = self.evm_env_at(block.hash().into()).await?;
// we need to get the state of the parent block because we're essentially replaying the
// block the transaction is included in
let parent_block = block.parent_hash();
let this = self.clone();
self.spawn_with_state_at_block(parent_block.into(), move |state| {
let mut db = CacheDB::new(StateProviderDatabase::new(state));
let block_txs = block.transactions_recovered();
this.apply_pre_execution_changes(&block, &mut db, &evm_env)?;
// replay all transactions prior to the targeted transaction
this.replay_transactions_until(&mut db, evm_env.clone(), block_txs, *tx.tx_hash())?;
let tx_env = this.evm_config().tx_env(tx);
let res = this.inspect(
StateCacheDbRefMutWrapper(&mut db),
evm_env,
tx_env,
&mut inspector,
)?;
f(tx_info, inspector, res, db)
})
.await
.map(Some)
}
}
/// Executes all transactions of a block up to a given index.
///
/// If a `highest_index` is given, this will only execute the first `highest_index`
/// transactions, in other words, it will stop executing transactions after the
/// `highest_index`th transaction. If `highest_index` is `None`, all transactions
/// are executed.
fn trace_block_until<F, R>(
&self,
block_id: BlockId,
block: Option<Arc<RecoveredBlock<ProviderBlock<Self::Provider>>>>,
highest_index: Option<u64>,
config: TracingInspectorConfig,
f: F,
) -> impl Future<Output = Result<Option<Vec<R>>, Self::Error>> + Send
where
Self: LoadBlock,
F: Fn(
TransactionInfo,
TracingCtx<
'_,
Recovered<&ProviderTx<Self::Provider>>,
EvmFor<Self::Evm, StateCacheDbRefMutWrapper<'_, '_>, TracingInspector>,
>,
) -> Result<R, Self::Error>
+ Send
+ 'static,
R: Send + 'static,
{
self.trace_block_until_with_inspector(
block_id,
block,
highest_index,
move || TracingInspector::new(config),
f,
)
}
/// Executes all transactions of a block.
///
/// If a `highest_index` is given, this will only execute the first `highest_index`
/// transactions, in other words, it will stop executing transactions after the
/// `highest_index`th transaction.
///
/// Note: This expect tx index to be 0-indexed, so the first transaction is at index 0.
///
/// This accepts a `inspector_setup` closure that returns the inspector to be used for tracing
/// the transactions.
fn trace_block_until_with_inspector<Setup, Insp, F, R>(
&self,
block_id: BlockId,
block: Option<Arc<RecoveredBlock<ProviderBlock<Self::Provider>>>>,
highest_index: Option<u64>,
mut inspector_setup: Setup,
f: F,
) -> impl Future<Output = Result<Option<Vec<R>>, Self::Error>> + Send
where
Self: LoadBlock,
F: Fn(
TransactionInfo,
TracingCtx<
'_,
Recovered<&ProviderTx<Self::Provider>>,
EvmFor<Self::Evm, StateCacheDbRefMutWrapper<'_, '_>, Insp>,
>,
) -> Result<R, Self::Error>
+ Send
+ 'static,
Setup: FnMut() -> Insp + Send + 'static,
Insp: Clone + for<'a, 'b> InspectorFor<Self::Evm, StateCacheDbRefMutWrapper<'a, 'b>>,
R: Send + 'static,
{
async move {
let block = async {
if block.is_some() {
return Ok(block)
}
self.recovered_block(block_id).await
};
let ((evm_env, _), block) = futures::try_join!(self.evm_env_at(block_id), block)?;
let Some(block) = block else { return Ok(None) };
if block.body().transactions().is_empty() {
// nothing to trace
return Ok(Some(Vec::new()))
}
// replay all transactions of the block
self.spawn_blocking_io_fut(move |this| async move {
// we need to get the state of the parent block because we're replaying this block
// on top of its parent block's state
let state_at = block.parent_hash();
let block_hash = block.hash();
let block_number = evm_env.block_env.number.saturating_to();
let base_fee = evm_env.block_env.basefee;
// now get the state
let state = this.state_at_block_id(state_at.into()).await?;
let mut db =
CacheDB::new(StateProviderDatabase::new(StateProviderTraitObjWrapper(&state)));
this.apply_pre_execution_changes(&block, &mut db, &evm_env)?;
// prepare transactions, we do everything upfront to reduce time spent with open
// state
let max_transactions = highest_index.map_or_else(
|| block.body().transaction_count(),
|highest| {
// we need + 1 because the index is 0-based
highest as usize + 1
},
);
let mut idx = 0;
let results = this
.evm_config()
.evm_factory()
.create_tracer(StateCacheDbRefMutWrapper(&mut db), evm_env, inspector_setup())
.try_trace_many(block.transactions_recovered().take(max_transactions), |ctx| {
let tx_info = TransactionInfo {
hash: Some(*ctx.tx.tx_hash()),
index: Some(idx),
block_hash: Some(block_hash),
block_number: Some(block_number),
base_fee: Some(base_fee),
};
idx += 1;
f(tx_info, ctx)
})
.collect::<Result<_, _>>()?;
Ok(Some(results))
})
.await
}
}
/// Executes all transactions of a block and returns a list of callback results invoked for each
/// transaction in the block.
///
/// This
/// 1. fetches all transactions of the block
/// 2. configures the EVM env
/// 3. loops over all transactions and executes them
/// 4. calls the callback with the transaction info, the execution result, the changed state
/// _after_ the transaction [`StateProviderDatabase`] and the database that points to the
/// state right _before_ the transaction.
fn trace_block_with<F, R>(
&self,
block_id: BlockId,
block: Option<Arc<RecoveredBlock<ProviderBlock<Self::Provider>>>>,
config: TracingInspectorConfig,
f: F,
) -> impl Future<Output = Result<Option<Vec<R>>, Self::Error>> + Send
where
Self: LoadBlock,
// This is the callback that's invoked for each transaction with the inspector, the result,
// state and db
F: Fn(
TransactionInfo,
TracingCtx<
'_,
Recovered<&ProviderTx<Self::Provider>>,
EvmFor<Self::Evm, StateCacheDbRefMutWrapper<'_, '_>, TracingInspector>,
>,
) -> Result<R, Self::Error>
+ Send
+ 'static,
R: Send + 'static,
{
self.trace_block_until(block_id, block, None, config, f)
}
/// Executes all transactions of a block and returns a list of callback results invoked for each
/// transaction in the block.
///
/// This
/// 1. fetches all transactions of the block
/// 2. configures the EVM env
/// 3. loops over all transactions and executes them
/// 4. calls the callback with the transaction info, the execution result, the changed state
/// _after_ the transaction `EvmState` and the database that points to the state right
/// _before_ the transaction, in other words the state the transaction was executed on:
/// `changed_state = tx(cached_state)`
///
/// This accepts a `inspector_setup` closure that returns the inspector to be used for tracing
/// a transaction. This is invoked for each transaction.
fn trace_block_inspector<Setup, Insp, F, R>(
&self,
block_id: BlockId,
block: Option<Arc<RecoveredBlock<ProviderBlock<Self::Provider>>>>,
insp_setup: Setup,
f: F,
) -> impl Future<Output = Result<Option<Vec<R>>, Self::Error>> + Send
where
Self: LoadBlock,
// This is the callback that's invoked for each transaction with the inspector, the result,
// state and db
F: Fn(
TransactionInfo,
TracingCtx<
'_,
Recovered<&ProviderTx<Self::Provider>>,
EvmFor<Self::Evm, StateCacheDbRefMutWrapper<'_, '_>, Insp>,
>,
) -> Result<R, Self::Error>
+ Send
+ 'static,
Setup: FnMut() -> Insp + Send + 'static,
Insp: Clone + for<'a, 'b> InspectorFor<Self::Evm, StateCacheDbRefMutWrapper<'a, 'b>>,
R: Send + 'static,
{
self.trace_block_until_with_inspector(block_id, block, None, insp_setup, f)
}
/// Applies chain-specific state transitions required before executing a block.
///
/// Note: This should only be called when tracing an entire block vs individual transactions.
/// When tracing transaction on top of an already committed block state, those transitions are
/// already applied.
fn apply_pre_execution_changes<DB: Send + Database + DatabaseCommit>(
&self,
block: &RecoveredBlock<ProviderBlock<Self::Provider>>,
db: &mut DB,
evm_env: &EvmEnvFor<Self::Evm>,
) -> Result<(), Self::Error> {
let mut system_caller = SystemCaller::new(self.provider().chain_spec());
// apply relevant system calls
let mut evm = self.evm_config().evm_with_env(db, evm_env.clone());
system_caller.apply_pre_execution_changes(block.header(), &mut evm).map_err(|err| {
EthApiError::EvmCustom(format!("failed to apply 4788 system call {err}"))
})?;
Ok(())
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-eth-api/src/helpers/state.rs | crates/rpc/rpc-eth-api/src/helpers/state.rs | //! Loads a pending block from database. Helper trait for `eth_` block, transaction, call and trace
//! RPC methods.
use super::{EthApiSpec, LoadPendingBlock, SpawnBlocking};
use crate::{EthApiTypes, FromEthApiError, RpcNodeCore, RpcNodeCoreExt};
use alloy_consensus::constants::KECCAK_EMPTY;
use alloy_eips::BlockId;
use alloy_primitives::{Address, Bytes, B256, U256};
use alloy_rpc_types_eth::{Account, AccountInfo, EIP1186AccountProofResponse};
use alloy_serde::JsonStorageKey;
use futures::Future;
use reth_errors::RethError;
use reth_evm::{ConfigureEvm, EvmEnvFor};
use reth_rpc_convert::RpcConvert;
use reth_rpc_eth_types::{
error::FromEvmError, EthApiError, PendingBlockEnv, RpcInvalidTransactionError,
};
use reth_storage_api::{
BlockIdReader, BlockNumReader, StateProvider, StateProviderBox, StateProviderFactory,
};
use reth_transaction_pool::TransactionPool;
/// Helper methods for `eth_` methods relating to state (accounts).
pub trait EthState: LoadState + SpawnBlocking {
/// Returns the maximum number of blocks into the past for generating state proofs.
fn max_proof_window(&self) -> u64;
/// Returns the number of transactions sent from an address at the given block identifier.
///
/// If this is [`BlockNumberOrTag::Pending`](alloy_eips::BlockNumberOrTag) then this will
/// look up the highest transaction in pool and return the next nonce (highest + 1).
fn transaction_count(
&self,
address: Address,
block_id: Option<BlockId>,
) -> impl Future<Output = Result<U256, Self::Error>> + Send {
LoadState::transaction_count(self, address, block_id)
}
/// Returns code of given account, at given blocknumber.
fn get_code(
&self,
address: Address,
block_id: Option<BlockId>,
) -> impl Future<Output = Result<Bytes, Self::Error>> + Send {
LoadState::get_code(self, address, block_id)
}
/// Returns balance of given account, at given blocknumber.
fn balance(
&self,
address: Address,
block_id: Option<BlockId>,
) -> impl Future<Output = Result<U256, Self::Error>> + Send {
self.spawn_blocking_io_fut(move |this| async move {
Ok(this
.state_at_block_id_or_latest(block_id)
.await?
.account_balance(&address)
.map_err(Self::Error::from_eth_err)?
.unwrap_or_default())
})
}
/// Returns values stored of given account, at given blocknumber.
fn storage_at(
&self,
address: Address,
index: JsonStorageKey,
block_id: Option<BlockId>,
) -> impl Future<Output = Result<B256, Self::Error>> + Send {
self.spawn_blocking_io_fut(move |this| async move {
let storage_value = this
.state_at_block_id_or_latest(block_id)
.await?
.storage(address, index.as_b256())
.map_err(Self::Error::from_eth_err)?
.unwrap_or_default();
match storage_value.is_public() {
true => Ok(B256::new(storage_value.value.to_be_bytes())),
false => Ok(B256::ZERO),
}
})
}
/// Returns values stored of given account, with Merkle-proof, at given blocknumber.
fn get_proof(
&self,
address: Address,
keys: Vec<JsonStorageKey>,
block_id: Option<BlockId>,
) -> Result<
impl Future<Output = Result<EIP1186AccountProofResponse, Self::Error>> + Send,
Self::Error,
>
where
Self: EthApiSpec,
{
Ok(async move {
let _permit = self
.acquire_owned()
.await
.map_err(RethError::other)
.map_err(EthApiError::Internal)?;
let chain_info = self.chain_info().map_err(Self::Error::from_eth_err)?;
let block_id = block_id.unwrap_or_default();
// Check whether the distance to the block exceeds the maximum configured window.
let block_number = self
.provider()
.block_number_for_id(block_id)
.map_err(Self::Error::from_eth_err)?
.ok_or(EthApiError::HeaderNotFound(block_id))?;
let max_window = self.max_proof_window();
if chain_info.best_number.saturating_sub(block_number) > max_window {
return Err(EthApiError::ExceedsMaxProofWindow.into())
}
self.spawn_blocking_io_fut(move |this| async move {
let state = this.state_at_block_id(block_id).await?;
let storage_keys = keys.iter().map(|key| key.as_b256()).collect::<Vec<_>>();
let proof = state
.proof(Default::default(), address, &storage_keys)
.map_err(Self::Error::from_eth_err)?;
Ok(proof.into_eip1186_response(keys))
})
.await
})
}
/// Returns the account at the given address for the provided block identifier.
fn get_account(
&self,
address: Address,
block_id: BlockId,
) -> impl Future<Output = Result<Option<Account>, Self::Error>> + Send {
self.spawn_blocking_io_fut(move |this| async move {
let state = this.state_at_block_id(block_id).await?;
let account = state.basic_account(&address).map_err(Self::Error::from_eth_err)?;
let Some(account) = account else { return Ok(None) };
// Check whether the distance to the block exceeds the maximum configured proof window.
let chain_info = this.provider().chain_info().map_err(Self::Error::from_eth_err)?;
let block_number = this
.provider()
.block_number_for_id(block_id)
.map_err(Self::Error::from_eth_err)?
.ok_or(EthApiError::HeaderNotFound(block_id))?;
let max_window = this.max_proof_window();
if chain_info.best_number.saturating_sub(block_number) > max_window {
return Err(EthApiError::ExceedsMaxProofWindow.into())
}
let balance = account.balance;
let nonce = account.nonce;
let code_hash = account.bytecode_hash.unwrap_or(KECCAK_EMPTY);
// Provide a default `HashedStorage` value in order to
// get the storage root hash of the current state.
let storage_root = state
.storage_root(address, Default::default())
.map_err(Self::Error::from_eth_err)?;
Ok(Some(Account { balance, nonce, code_hash, storage_root }))
})
}
/// Retrieves the account's balance, nonce, and code for a given address.
fn get_account_info(
&self,
address: Address,
block_id: BlockId,
) -> impl Future<Output = Result<AccountInfo, Self::Error>> + Send {
self.spawn_blocking_io_fut(move |this| async move {
let state = this.state_at_block_id(block_id).await?;
let account = state
.basic_account(&address)
.map_err(Self::Error::from_eth_err)?
.unwrap_or_default();
let balance = account.balance;
let nonce = account.nonce;
let code = if account.get_bytecode_hash() == KECCAK_EMPTY {
Default::default()
} else {
state
.account_code(&address)
.map_err(Self::Error::from_eth_err)?
.unwrap_or_default()
.original_bytes()
};
Ok(AccountInfo { balance, nonce, code })
})
}
}
/// Loads state from database.
///
/// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` state RPC methods.
pub trait LoadState:
LoadPendingBlock
+ EthApiTypes<
Error: FromEvmError<Self::Evm> + FromEthApiError,
RpcConvert: RpcConvert<Network = Self::NetworkTypes>,
> + RpcNodeCoreExt
{
/// Returns the state at the given block number
fn state_at_hash(&self, block_hash: B256) -> Result<StateProviderBox, Self::Error> {
self.provider().history_by_block_hash(block_hash).map_err(Self::Error::from_eth_err)
}
/// Returns the state at the given [`BlockId`] enum.
///
/// Note: if not [`BlockNumberOrTag::Pending`](alloy_eips::BlockNumberOrTag) then this
/// will only return canonical state. See also <https://github.com/paradigmxyz/reth/issues/4515>
fn state_at_block_id(
&self,
at: BlockId,
) -> impl Future<Output = Result<StateProviderBox, Self::Error>> + Send
where
Self: SpawnBlocking,
{
async move {
if at.is_pending() {
if let Ok(Some(state)) = self.local_pending_state().await {
return Ok(state)
}
}
self.provider().state_by_block_id(at).map_err(Self::Error::from_eth_err)
}
}
/// Returns the _latest_ state
fn latest_state(&self) -> Result<StateProviderBox, Self::Error> {
self.provider().latest().map_err(Self::Error::from_eth_err)
}
/// Returns the state at the given [`BlockId`] enum or the latest.
///
/// Convenience function to interprets `None` as `BlockId::Number(BlockNumberOrTag::Latest)`
fn state_at_block_id_or_latest(
&self,
block_id: Option<BlockId>,
) -> impl Future<Output = Result<StateProviderBox, Self::Error>> + Send
where
Self: SpawnBlocking,
{
async move {
if let Some(block_id) = block_id {
self.state_at_block_id(block_id).await
} else {
Ok(self.latest_state()?)
}
}
}
/// Returns the revm evm env for the requested [`BlockId`]
///
/// If the [`BlockId`] this will return the [`BlockId`] of the block the env was configured
/// for.
/// If the [`BlockId`] is pending, this will return the "Pending" tag, otherwise this returns
/// the hash of the exact block.
fn evm_env_at(
&self,
at: BlockId,
) -> impl Future<Output = Result<(EvmEnvFor<Self::Evm>, BlockId), Self::Error>> + Send
where
Self: SpawnBlocking,
{
async move {
if at.is_pending() {
let PendingBlockEnv { evm_env, origin } = self.pending_block_env_and_cfg()?;
Ok((evm_env, origin.state_block_id()))
} else {
// Use cached values if there is no pending block
let block_hash = RpcNodeCore::provider(self)
.block_hash_for_id(at)
.map_err(Self::Error::from_eth_err)?
.ok_or(EthApiError::HeaderNotFound(at))?;
let header =
self.cache().get_header(block_hash).await.map_err(Self::Error::from_eth_err)?;
let evm_env = self.evm_config().evm_env(&header);
Ok((evm_env, block_hash.into()))
}
}
}
/// Returns the next available nonce without gaps for the given address
/// Next available nonce is either the on chain nonce of the account or the highest consecutive
/// nonce in the pool + 1
fn next_available_nonce(
&self,
address: Address,
) -> impl Future<Output = Result<u64, Self::Error>> + Send
where
Self: SpawnBlocking,
{
self.spawn_blocking_io(move |this| {
// first fetch the on chain nonce of the account
let mut next_nonce = this
.latest_state()?
.account_nonce(&address)
.map_err(Self::Error::from_eth_err)?
.unwrap_or_default();
// Retrieve the highest consecutive transaction for the sender from the transaction pool
if let Some(highest_tx) =
this.pool().get_highest_consecutive_transaction_by_sender(address, next_nonce)
{
// Return the nonce of the highest consecutive transaction + 1
next_nonce = highest_tx.nonce().checked_add(1).ok_or_else(|| {
Self::Error::from(EthApiError::InvalidTransaction(
RpcInvalidTransactionError::NonceMaxValue,
))
})?;
}
Ok(next_nonce)
})
}
/// Returns the number of transactions sent from an address at the given block identifier.
///
/// If this is [`BlockNumberOrTag::Pending`](alloy_eips::BlockNumberOrTag) then this will
/// look up the highest transaction in pool and return the next nonce (highest + 1).
fn transaction_count(
&self,
address: Address,
block_id: Option<BlockId>,
) -> impl Future<Output = Result<U256, Self::Error>> + Send
where
Self: SpawnBlocking,
{
self.spawn_blocking_io_fut(move |this| async move {
// first fetch the on chain nonce of the account
let on_chain_account_nonce = this
.state_at_block_id_or_latest(block_id)
.await?
.account_nonce(&address)
.map_err(Self::Error::from_eth_err)?
.unwrap_or_default();
if block_id == Some(BlockId::pending()) {
// for pending tag we need to find the highest nonce of txn in the pending state.
if let Some(highest_pool_tx) = this
.pool()
.get_highest_consecutive_transaction_by_sender(address, on_chain_account_nonce)
{
{
// and the corresponding txcount is nonce + 1 of the highest tx in the pool
// (on chain nonce is increased after tx)
let next_tx_nonce =
highest_pool_tx.nonce().checked_add(1).ok_or_else(|| {
Self::Error::from(EthApiError::InvalidTransaction(
RpcInvalidTransactionError::NonceMaxValue,
))
})?;
// guard against drifts in the pool
let next_tx_nonce = on_chain_account_nonce.max(next_tx_nonce);
let tx_count = on_chain_account_nonce.max(next_tx_nonce);
return Ok(U256::from(tx_count));
}
}
}
Ok(U256::from(on_chain_account_nonce))
})
}
/// Returns code of given account, at the given identifier.
fn get_code(
&self,
address: Address,
block_id: Option<BlockId>,
) -> impl Future<Output = Result<Bytes, Self::Error>> + Send
where
Self: SpawnBlocking,
{
self.spawn_blocking_io_fut(move |this| async move {
Ok(this
.state_at_block_id_or_latest(block_id)
.await?
.account_code(&address)
.map_err(Self::Error::from_eth_err)?
.unwrap_or_default()
.original_bytes())
})
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-eth-api/src/helpers/block.rs | crates/rpc/rpc-eth-api/src/helpers/block.rs | //! Database access for `eth_` block RPC methods. Loads block and receipt data w.r.t. network.
use super::{LoadPendingBlock, LoadReceipt, SpawnBlocking};
use crate::{
node::RpcNodeCoreExt, EthApiTypes, FromEthApiError, FullEthApiTypes, RpcBlock, RpcNodeCore,
RpcReceipt,
};
use alloy_consensus::TxReceipt;
use alloy_eips::BlockId;
use alloy_rlp::Encodable;
use alloy_rpc_types_eth::{Block, BlockTransactions, Index};
use futures::Future;
use reth_node_api::BlockBody;
use reth_primitives_traits::{
AlloyBlockHeader, RecoveredBlock, SealedHeader, SignedTransaction, TransactionMeta,
};
use reth_rpc_convert::{transaction::ConvertReceiptInput, RpcConvert, RpcHeader};
use reth_storage_api::{BlockIdReader, BlockReader, ProviderHeader, ProviderReceipt, ProviderTx};
use reth_transaction_pool::{PoolTransaction, TransactionPool};
use std::{borrow::Cow, sync::Arc};
/// Result type of the fetched block receipts.
pub type BlockReceiptsResult<N, E> = Result<Option<Vec<RpcReceipt<N>>>, E>;
/// Result type of the fetched block and its receipts.
pub type BlockAndReceiptsResult<Eth> = Result<
Option<(
Arc<RecoveredBlock<<<Eth as RpcNodeCore>::Provider as BlockReader>::Block>>,
Arc<Vec<ProviderReceipt<<Eth as RpcNodeCore>::Provider>>>,
)>,
<Eth as EthApiTypes>::Error,
>;
/// Block related functions for the [`EthApiServer`](crate::EthApiServer) trait in the
/// `eth_` namespace.
pub trait EthBlocks:
LoadBlock<RpcConvert: RpcConvert<Primitives = Self::Primitives, Error = Self::Error>>
{
/// Returns the block header for the given block id.
fn rpc_block_header(
&self,
block_id: BlockId,
) -> impl Future<Output = Result<Option<RpcHeader<Self::NetworkTypes>>, Self::Error>> + Send
where
Self: FullEthApiTypes,
{
async move { Ok(self.rpc_block(block_id, false).await?.map(|block| block.header)) }
}
/// Returns the populated rpc block object for the given block id.
///
/// If `full` is true, the block object will contain all transaction objects, otherwise it will
/// only contain the transaction hashes.
fn rpc_block(
&self,
block_id: BlockId,
full: bool,
) -> impl Future<Output = Result<Option<RpcBlock<Self::NetworkTypes>>, Self::Error>> + Send
where
Self: FullEthApiTypes,
{
async move {
let Some(block) = self.recovered_block(block_id).await? else { return Ok(None) };
let block = block.clone_into_rpc_block(
full.into(),
|tx, tx_info| self.tx_resp_builder().fill(tx, tx_info),
|header, size| self.tx_resp_builder().convert_header(header, size),
)?;
Ok(Some(block))
}
}
/// Returns the number transactions in the given block.
///
/// Returns `None` if the block does not exist
fn block_transaction_count(
&self,
block_id: BlockId,
) -> impl Future<Output = Result<Option<usize>, Self::Error>> + Send {
async move {
if block_id.is_pending() {
// Pending block can be fetched directly without need for caching
return Ok(self
.provider()
.pending_block()
.map_err(Self::Error::from_eth_err)?
.map(|block| block.body().transaction_count()));
}
let block_hash = match self
.provider()
.block_hash_for_id(block_id)
.map_err(Self::Error::from_eth_err)?
{
Some(block_hash) => block_hash,
None => return Ok(None),
};
Ok(self
.cache()
.get_recovered_block(block_hash)
.await
.map_err(Self::Error::from_eth_err)?
.map(|b| b.body().transaction_count()))
}
}
/// Helper function for `eth_getBlockReceipts`.
///
/// Returns all transaction receipts in block, or `None` if block wasn't found.
fn block_receipts(
&self,
block_id: BlockId,
) -> impl Future<Output = BlockReceiptsResult<Self::NetworkTypes, Self::Error>> + Send
where
Self: LoadReceipt,
{
async move {
if let Some((block, receipts)) = self.load_block_and_receipts(block_id).await? {
let block_number = block.number();
let base_fee = block.base_fee_per_gas();
let block_hash = block.hash();
let excess_blob_gas = block.excess_blob_gas();
let timestamp = block.timestamp();
let mut gas_used = 0;
let mut next_log_index = 0;
let inputs = block
.transactions_recovered()
.zip(receipts.iter())
.enumerate()
.map(|(idx, (tx, receipt))| {
let meta = TransactionMeta {
tx_hash: *tx.tx_hash(),
index: idx as u64,
block_hash,
block_number,
base_fee,
excess_blob_gas,
timestamp,
};
let input = ConvertReceiptInput {
receipt: Cow::Borrowed(receipt),
tx,
gas_used: receipt.cumulative_gas_used() - gas_used,
next_log_index,
meta,
};
gas_used = receipt.cumulative_gas_used();
next_log_index += receipt.logs().len();
input
})
.collect::<Vec<_>>();
return self.tx_resp_builder().convert_receipts(inputs).map(Some)
}
Ok(None)
}
}
/// Helper method that loads a block and all its receipts.
fn load_block_and_receipts(
&self,
block_id: BlockId,
) -> impl Future<Output = BlockAndReceiptsResult<Self>> + Send
where
Self: LoadReceipt,
Self::Pool:
TransactionPool<Transaction: PoolTransaction<Consensus = ProviderTx<Self::Provider>>>,
{
async move {
if block_id.is_pending() {
// First, try to get the pending block from the provider, in case we already
// received the actual pending block from the CL.
if let Some((block, receipts)) = self
.provider()
.pending_block_and_receipts()
.map_err(Self::Error::from_eth_err)?
{
return Ok(Some((Arc::new(block), Arc::new(receipts))));
}
// If no pending block from provider, build the pending block locally.
if let Some((block, receipts)) = self.local_pending_block().await? {
return Ok(Some((block, receipts)));
}
}
if let Some(block_hash) =
self.provider().block_hash_for_id(block_id).map_err(Self::Error::from_eth_err)?
{
if let Some((block, receipts)) = self
.cache()
.get_block_and_receipts(block_hash)
.await
.map_err(Self::Error::from_eth_err)?
{
return Ok(Some((block, receipts)));
}
}
Ok(None)
}
}
/// Returns uncle headers of given block.
///
/// Returns an empty vec if there are none.
#[expect(clippy::type_complexity)]
fn ommers(
&self,
block_id: BlockId,
) -> impl Future<Output = Result<Option<Vec<ProviderHeader<Self::Provider>>>, Self::Error>> + Send
{
async move {
if let Some(block) = self.recovered_block(block_id).await? {
Ok(block.body().ommers().map(|o| o.to_vec()))
} else {
Ok(None)
}
}
}
/// Returns uncle block at given index in given block.
///
/// Returns `None` if index out of range.
fn ommer_by_block_and_index(
&self,
block_id: BlockId,
index: Index,
) -> impl Future<Output = Result<Option<RpcBlock<Self::NetworkTypes>>, Self::Error>> + Send
{
async move {
let uncles = if block_id.is_pending() {
// Pending block can be fetched directly without need for caching
self.provider()
.pending_block()
.map_err(Self::Error::from_eth_err)?
.and_then(|block| block.body().ommers().map(|o| o.to_vec()))
} else {
self.recovered_block(block_id)
.await?
.map(|block| block.body().ommers().map(|o| o.to_vec()).unwrap_or_default())
}
.unwrap_or_default();
uncles
.into_iter()
.nth(index.into())
.map(|header| {
let block =
alloy_consensus::Block::<alloy_consensus::TxEnvelope, _>::uncle(header);
let size = block.length();
let header = self
.tx_resp_builder()
.convert_header(SealedHeader::new_unhashed(block.header), size)?;
Ok(Block {
uncles: vec![],
header,
transactions: BlockTransactions::Uncle,
withdrawals: None,
})
})
.transpose()
}
}
}
/// Loads a block from database.
///
/// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` blocks RPC methods.
pub trait LoadBlock: LoadPendingBlock + SpawnBlocking + RpcNodeCoreExt {
/// Returns the block object for the given block id.
#[expect(clippy::type_complexity)]
fn recovered_block(
&self,
block_id: BlockId,
) -> impl Future<
Output = Result<
Option<Arc<RecoveredBlock<<Self::Provider as BlockReader>::Block>>>,
Self::Error,
>,
> + Send {
async move {
if block_id.is_pending() {
// Pending block can be fetched directly without need for caching
if let Some(pending_block) =
self.provider().pending_block().map_err(Self::Error::from_eth_err)?
{
return Ok(Some(Arc::new(pending_block)));
}
// If no pending block from provider, try to get local pending block
return match self.local_pending_block().await? {
Some((block, _)) => Ok(Some(block)),
None => Ok(None),
};
}
let block_hash = match self
.provider()
.block_hash_for_id(block_id)
.map_err(Self::Error::from_eth_err)?
{
Some(block_hash) => block_hash,
None => return Ok(None),
};
self.cache().get_recovered_block(block_hash).await.map_err(Self::Error::from_eth_err)
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-eth-api/src/helpers/receipt.rs | crates/rpc/rpc-eth-api/src/helpers/receipt.rs | //! Loads a receipt from database. Helper trait for `eth_` block and transaction RPC methods, that
//! loads receipt data w.r.t. network.
use crate::{EthApiTypes, RpcNodeCoreExt, RpcReceipt};
use alloy_consensus::{transaction::TransactionMeta, TxReceipt};
use futures::Future;
use reth_primitives_traits::SignerRecoverable;
use reth_rpc_convert::{transaction::ConvertReceiptInput, RpcConvert};
use reth_rpc_eth_types::{error::FromEthApiError, EthApiError};
use reth_storage_api::{ProviderReceipt, ProviderTx};
use std::borrow::Cow;
/// Assembles transaction receipt data w.r.t to network.
///
/// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` receipts RPC methods.
pub trait LoadReceipt:
EthApiTypes<
RpcConvert: RpcConvert<
Primitives = Self::Primitives,
Error = Self::Error,
Network = Self::NetworkTypes,
>,
Error: FromEthApiError,
> + RpcNodeCoreExt
+ Send
+ Sync
{
/// Helper method for `eth_getBlockReceipts` and `eth_getTransactionReceipt`.
fn build_transaction_receipt(
&self,
tx: ProviderTx<Self::Provider>,
meta: TransactionMeta,
receipt: ProviderReceipt<Self::Provider>,
) -> impl Future<Output = Result<RpcReceipt<Self::NetworkTypes>, Self::Error>> + Send {
async move {
let hash = meta.block_hash;
// get all receipts for the block
let all_receipts = self
.cache()
.get_receipts(hash)
.await
.map_err(Self::Error::from_eth_err)?
.ok_or(EthApiError::HeaderNotFound(hash.into()))?;
let mut gas_used = 0;
let mut next_log_index = 0;
if meta.index > 0 {
for receipt in all_receipts.iter().take(meta.index as usize) {
gas_used = receipt.cumulative_gas_used();
next_log_index += receipt.logs().len();
}
}
Ok(self
.tx_resp_builder()
.convert_receipts(vec![ConvertReceiptInput {
tx: tx
.try_into_recovered_unchecked()
.map_err(Self::Error::from_eth_err)?
.as_recovered_ref(),
gas_used: receipt.cumulative_gas_used() - gas_used,
receipt: Cow::Owned(receipt),
next_log_index,
meta,
}])?
.pop()
.unwrap())
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-eth-api/src/helpers/mod.rs | crates/rpc/rpc-eth-api/src/helpers/mod.rs | //! Behaviour needed to serve `eth_` RPC requests, divided into general database reads and
//! specific database access.
//!
//! Traits with `Load` prefix, read atomic data from database, e.g. a block or transaction. Any
//! database read done in more than one default `Eth` trait implementation, is defined in a `Load`
//! trait.
//!
//! Traits with `Eth` prefix, compose specific data needed to serve RPC requests in the `eth`
//! namespace. They use `Load` traits as building blocks. [`EthTransactions`] also writes data
//! (submits transactions). Based on the `eth_` request method semantics, request methods are
//! divided into: [`EthTransactions`], [`EthBlocks`], [`EthFees`], [`EthState`] and [`EthCall`].
//! Default implementation of the `Eth` traits, is done w.r.t. L1.
//!
//! [`EthApiServer`](crate::EthApiServer), is implemented for any type that implements
//! all the `Eth` traits, e.g. `reth_rpc::EthApi`.
pub mod block;
pub mod blocking_task;
pub mod call;
pub mod config;
pub mod estimate;
pub mod fee;
pub mod pending_block;
pub mod receipt;
pub mod signer;
pub mod spec;
pub mod state;
pub mod trace;
pub mod transaction;
pub use block::{EthBlocks, LoadBlock};
pub use blocking_task::SpawnBlocking;
pub use call::{Call, EthCall};
pub use fee::{EthFees, LoadFee};
pub use pending_block::LoadPendingBlock;
pub use receipt::LoadReceipt;
pub use signer::{AddDevSigners, EthSigner};
pub use spec::EthApiSpec;
pub use state::{EthState, LoadState};
pub use trace::Trace;
pub use transaction::{EthTransactions, LoadTransaction};
use crate::FullEthApiTypes;
/// Extension trait that bundles traits needed for tracing transactions.
pub trait TraceExt: LoadTransaction + LoadBlock + SpawnBlocking + Trace + Call {}
impl<T> TraceExt for T where T: LoadTransaction + LoadBlock + Trace + Call {}
/// Helper trait to unify all `eth` rpc server building block traits, for simplicity.
///
/// This trait is automatically implemented for any type that implements all the `Eth` traits.
pub trait FullEthApi:
FullEthApiTypes
+ EthApiSpec
+ EthTransactions
+ EthBlocks
+ EthState
+ EthCall
+ EthFees
+ Trace
+ LoadReceipt
{
}
impl<T> FullEthApi for T where
T: FullEthApiTypes
+ EthApiSpec
+ EthTransactions
+ EthBlocks
+ EthState
+ EthCall
+ EthFees
+ Trace
+ LoadReceipt
{
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-eth-api/src/helpers/transaction.rs | crates/rpc/rpc-eth-api/src/helpers/transaction.rs | //! Database access for `eth_` transaction RPC methods. Loads transaction and receipt data w.r.t.
//! network.
use super::{EthApiSpec, EthSigner, LoadBlock, LoadReceipt, LoadState, SpawnBlocking};
use crate::{
helpers::{estimate::EstimateCall, spec::SignersForRpc},
FromEthApiError, FullEthApiTypes, IntoEthApiError, RpcNodeCore, RpcNodeCoreExt, RpcReceipt,
RpcTransaction,
};
use alloy_consensus::{
transaction::{SignerRecoverable, TransactionMeta},
BlockHeader, Transaction,
};
use alloy_dyn_abi::TypedData;
use alloy_eips::{eip2718::Encodable2718, BlockId};
use alloy_network::TransactionBuilder;
use alloy_primitives::{Address, Bytes, TxHash, B256};
use alloy_rpc_types_eth::{BlockNumberOrTag, TransactionInfo};
use futures::{Future, StreamExt};
use reth_chain_state::CanonStateSubscriptions;
use reth_node_api::BlockBody;
use reth_primitives_traits::{RecoveredBlock, SignedTransaction};
use reth_rpc_convert::{transaction::RpcConvert, RpcTxReq};
use reth_rpc_eth_types::{
utils::binary_search, EthApiError, EthApiError::TransactionConfirmationTimeout, SignError,
TransactionSource,
};
use reth_storage_api::{
BlockNumReader, BlockReaderIdExt, ProviderBlock, ProviderReceipt, ProviderTx, ReceiptProvider,
TransactionsProvider,
};
use reth_transaction_pool::{
AddedTransactionOutcome, PoolTransaction, TransactionOrigin, TransactionPool,
};
use std::sync::Arc;
/// Transaction related functions for the [`EthApiServer`](crate::EthApiServer) trait in
/// the `eth_` namespace.
///
/// This includes utilities for transaction tracing, transacting and inspection.
///
/// Async functions that are spawned onto the
/// [`BlockingTaskPool`](reth_tasks::pool::BlockingTaskPool) begin with `spawn_`
///
/// ## Calls
///
/// There are subtle differences between when transacting [`RpcTxReq`]:
///
/// The endpoints `eth_call` and `eth_estimateGas` and `eth_createAccessList` should always
/// __disable__ the base fee check in the [`CfgEnv`](revm::context::CfgEnv).
///
/// The behaviour for tracing endpoints is not consistent across clients.
/// Geth also disables the basefee check for tracing: <https://github.com/ethereum/go-ethereum/blob/bc0b87ca196f92e5af49bd33cc190ef0ec32b197/eth/tracers/api.go#L955-L955>
/// Erigon does not: <https://github.com/ledgerwatch/erigon/blob/aefb97b07d1c4fd32a66097a24eddd8f6ccacae0/turbo/transactions/tracing.go#L209-L209>
///
/// See also <https://github.com/paradigmxyz/reth/issues/6240>
///
/// This implementation follows the behaviour of Geth and disables the basefee check for tracing.
pub trait EthTransactions: LoadTransaction<Provider: BlockReaderIdExt> {
/// Returns a handle for signing data.
///
/// Signer access in default (L1) trait method implementations.
fn signers(&self) -> &SignersForRpc<Self::Provider, Self::NetworkTypes>;
/// Decodes and recovers the transaction and submits it to the pool.
///
/// Returns the hash of the transaction.
fn send_raw_transaction(
&self,
tx: Bytes,
) -> impl Future<Output = Result<B256, Self::Error>> + Send;
/// Decodes and recovers the transaction and submits it to the pool.
///
/// And awaits the receipt.
fn send_raw_transaction_sync(
&self,
tx: Bytes,
) -> impl Future<Output = Result<RpcReceipt<Self::NetworkTypes>, Self::Error>> + Send
where
Self: LoadReceipt + 'static,
{
let this = self.clone();
async move {
let hash = EthTransactions::send_raw_transaction(&this, tx).await?;
let mut stream = this.provider().canonical_state_stream();
const TIMEOUT_DURATION: tokio::time::Duration = tokio::time::Duration::from_secs(30);
tokio::time::timeout(TIMEOUT_DURATION, async {
while let Some(notification) = stream.next().await {
let chain = notification.committed();
for block in chain.blocks_iter() {
if block.body().contains_transaction(&hash) {
if let Some(receipt) = this.transaction_receipt(hash).await? {
return Ok(receipt);
}
}
}
}
Err(Self::Error::from_eth_err(TransactionConfirmationTimeout {
hash,
duration: TIMEOUT_DURATION,
}))
})
.await
.unwrap_or_else(|_elapsed| {
Err(Self::Error::from_eth_err(TransactionConfirmationTimeout {
hash,
duration: TIMEOUT_DURATION,
}))
})
}
}
/// Returns the transaction by hash.
///
/// Checks the pool and state.
///
/// Returns `Ok(None)` if no matching transaction was found.
#[expect(clippy::complexity)]
fn transaction_by_hash(
&self,
hash: B256,
) -> impl Future<
Output = Result<Option<TransactionSource<ProviderTx<Self::Provider>>>, Self::Error>,
> + Send {
LoadTransaction::transaction_by_hash(self, hash)
}
/// Get all transactions in the block with the given hash.
///
/// Returns `None` if block does not exist.
#[expect(clippy::type_complexity)]
fn transactions_by_block(
&self,
block: B256,
) -> impl Future<Output = Result<Option<Vec<ProviderTx<Self::Provider>>>, Self::Error>> + Send
{
async move {
self.cache()
.get_recovered_block(block)
.await
.map(|b| b.map(|b| b.body().transactions().to_vec()))
.map_err(Self::Error::from_eth_err)
}
}
/// Returns the EIP-2718 encoded transaction by hash.
///
/// If this is a pooled EIP-4844 transaction, the blob sidecar is included.
///
/// Checks the pool and state.
///
/// Returns `Ok(None)` if no matching transaction was found.
fn raw_transaction_by_hash(
&self,
hash: B256,
) -> impl Future<Output = Result<Option<Bytes>, Self::Error>> + Send {
async move {
// Note: this is mostly used to fetch pooled transactions so we check the pool first
if let Some(tx) =
self.pool().get_pooled_transaction_element(hash).map(|tx| tx.encoded_2718().into())
{
return Ok(Some(tx))
}
self.spawn_blocking_io(move |ref this| {
Ok(this
.provider()
.transaction_by_hash(hash)
.map_err(Self::Error::from_eth_err)?
.map(|tx| tx.encoded_2718().into()))
})
.await
}
}
/// Returns the _historical_ transaction and the block it was mined in
#[expect(clippy::type_complexity)]
fn historical_transaction_by_hash_at(
&self,
hash: B256,
) -> impl Future<
Output = Result<Option<(TransactionSource<ProviderTx<Self::Provider>>, B256)>, Self::Error>,
> + Send {
async move {
match self.transaction_by_hash_at(hash).await? {
None => Ok(None),
Some((tx, at)) => Ok(at.as_block_hash().map(|hash| (tx, hash))),
}
}
}
/// Returns the transaction receipt for the given hash.
///
/// Returns None if the transaction does not exist or is pending
/// Note: The tx receipt is not available for pending transactions.
fn transaction_receipt(
&self,
hash: B256,
) -> impl Future<Output = Result<Option<RpcReceipt<Self::NetworkTypes>>, Self::Error>> + Send
where
Self: LoadReceipt + 'static,
{
async move {
match self.load_transaction_and_receipt(hash).await? {
Some((tx, meta, receipt)) => {
self.build_transaction_receipt(tx, meta, receipt).await.map(Some)
}
None => Ok(None),
}
}
}
/// Helper method that loads a transaction and its receipt.
#[expect(clippy::complexity)]
fn load_transaction_and_receipt(
&self,
hash: TxHash,
) -> impl Future<
Output = Result<
Option<(ProviderTx<Self::Provider>, TransactionMeta, ProviderReceipt<Self::Provider>)>,
Self::Error,
>,
> + Send
where
Self: 'static,
{
self.spawn_blocking_io(move |this| {
let provider = this.provider();
let (tx, meta) = match provider
.transaction_by_hash_with_meta(hash)
.map_err(Self::Error::from_eth_err)?
{
Some((tx, meta)) => (tx, meta),
None => return Ok(None),
};
let receipt = match provider.receipt_by_hash(hash).map_err(Self::Error::from_eth_err)? {
Some(recpt) => recpt,
None => return Ok(None),
};
Ok(Some((tx, meta, receipt)))
})
}
/// Get transaction by [`BlockId`] and index of transaction within that block.
///
/// Returns `Ok(None)` if the block does not exist, or index is out of range.
fn transaction_by_block_and_tx_index(
&self,
block_id: BlockId,
index: usize,
) -> impl Future<Output = Result<Option<RpcTransaction<Self::NetworkTypes>>, Self::Error>> + Send
where
Self: LoadBlock,
{
async move {
if let Some(block) = self.recovered_block(block_id).await? {
let block_hash = block.hash();
let block_number = block.number();
let base_fee_per_gas = block.base_fee_per_gas();
if let Some((signer, tx)) = block.transactions_with_sender().nth(index) {
let tx_info = TransactionInfo {
hash: Some(*tx.tx_hash()),
block_hash: Some(block_hash),
block_number: Some(block_number),
base_fee: base_fee_per_gas,
index: Some(index as u64),
};
return Ok(Some(
self.tx_resp_builder().fill(tx.clone().with_signer(*signer), tx_info)?,
))
}
}
Ok(None)
}
}
/// Find a transaction by sender's address and nonce.
fn get_transaction_by_sender_and_nonce(
&self,
sender: Address,
nonce: u64,
include_pending: bool,
) -> impl Future<Output = Result<Option<RpcTransaction<Self::NetworkTypes>>, Self::Error>> + Send
where
Self: LoadBlock + LoadState,
{
async move {
// Check the pool first
if include_pending {
if let Some(tx) =
RpcNodeCore::pool(self).get_transaction_by_sender_and_nonce(sender, nonce)
{
let transaction = tx.transaction.clone_into_consensus();
return Ok(Some(self.tx_resp_builder().fill_pending(transaction)?));
}
}
// Check if the sender is a contract
if !self.get_code(sender, None).await?.is_empty() {
return Ok(None);
}
let highest = self.transaction_count(sender, None).await?.saturating_to::<u64>();
// If the nonce is higher or equal to the highest nonce, the transaction is pending or
// not exists.
if nonce >= highest {
return Ok(None);
}
let Ok(high) = self.provider().best_block_number() else {
return Err(EthApiError::HeaderNotFound(BlockNumberOrTag::Latest.into()).into());
};
// Perform a binary search over the block range to find the block in which the sender's
// nonce reached the requested nonce.
let num = binary_search::<_, _, Self::Error>(1, high, |mid| async move {
let mid_nonce =
self.transaction_count(sender, Some(mid.into())).await?.saturating_to::<u64>();
Ok(mid_nonce > nonce)
})
.await?;
let block_id = num.into();
self.recovered_block(block_id)
.await?
.and_then(|block| {
let block_hash = block.hash();
let block_number = block.number();
let base_fee_per_gas = block.base_fee_per_gas();
block
.transactions_with_sender()
.enumerate()
.find(|(_, (signer, tx))| **signer == sender && (*tx).nonce() == nonce)
.map(|(index, (signer, tx))| {
let tx_info = TransactionInfo {
hash: Some(*tx.tx_hash()),
block_hash: Some(block_hash),
block_number: Some(block_number),
base_fee: base_fee_per_gas,
index: Some(index as u64),
};
self.tx_resp_builder().fill(tx.clone().with_signer(*signer), tx_info)
})
})
.ok_or(EthApiError::HeaderNotFound(block_id))?
.map(Some)
}
}
/// Get transaction, as raw bytes, by [`BlockId`] and index of transaction within that block.
///
/// Returns `Ok(None)` if the block does not exist, or index is out of range.
fn raw_transaction_by_block_and_tx_index(
&self,
block_id: BlockId,
index: usize,
) -> impl Future<Output = Result<Option<Bytes>, Self::Error>> + Send
where
Self: LoadBlock,
{
async move {
if let Some(block) = self.recovered_block(block_id).await? {
if let Some(tx) = block.body().transactions().get(index) {
return Ok(Some(tx.encoded_2718().into()))
}
}
Ok(None)
}
}
/// Signs transaction with a matching signer, if any and submits the transaction to the pool.
/// Returns the hash of the signed transaction.
fn send_transaction(
&self,
mut request: RpcTxReq<Self::NetworkTypes>,
) -> impl Future<Output = Result<B256, Self::Error>> + Send
where
Self: EthApiSpec + LoadBlock + EstimateCall,
{
async move {
let from = match request.as_ref().from() {
Some(from) => from,
None => return Err(SignError::NoAccount.into_eth_err()),
};
if self.find_signer(&from).is_err() {
return Err(SignError::NoAccount.into_eth_err())
}
// set nonce if not already set before
if request.as_ref().nonce().is_none() {
let nonce = self.next_available_nonce(from).await?;
request.as_mut().set_nonce(nonce);
}
let chain_id = self.chain_id();
request.as_mut().set_chain_id(chain_id.to());
let estimated_gas =
self.estimate_gas_at(request.clone(), BlockId::pending(), None).await?;
let gas_limit = estimated_gas;
request.as_mut().set_gas_limit(gas_limit.to());
let transaction = self.sign_request(&from, request).await?.with_signer(from);
let pool_transaction =
<<Self as RpcNodeCore>::Pool as TransactionPool>::Transaction::try_from_consensus(
transaction,
)
.map_err(|_| EthApiError::TransactionConversionError)?;
// submit the transaction to the pool with a `Local` origin
let AddedTransactionOutcome { hash, .. } = self
.pool()
.add_transaction(TransactionOrigin::Local, pool_transaction)
.await
.map_err(Self::Error::from_eth_err)?;
Ok(hash)
}
}
/// Signs a transaction, with configured signers.
fn sign_request(
&self,
from: &Address,
txn: RpcTxReq<Self::NetworkTypes>,
) -> impl Future<Output = Result<ProviderTx<Self::Provider>, Self::Error>> + Send {
async move {
self.find_signer(from)?
.sign_transaction(txn, from)
.await
.map_err(Self::Error::from_eth_err)
}
}
/// Signs given message. Returns the signature.
fn sign(
&self,
account: Address,
message: Bytes,
) -> impl Future<Output = Result<Bytes, Self::Error>> + Send {
async move {
Ok(self
.find_signer(&account)?
.sign(account, &message)
.await
.map_err(Self::Error::from_eth_err)?
.as_bytes()
.into())
}
}
/// Signs a transaction request using the given account in request
/// Returns the EIP-2718 encoded signed transaction.
fn sign_transaction(
&self,
request: RpcTxReq<Self::NetworkTypes>,
) -> impl Future<Output = Result<Bytes, Self::Error>> + Send {
async move {
let from = match request.as_ref().from() {
Some(from) => from,
None => return Err(SignError::NoAccount.into_eth_err()),
};
Ok(self.sign_request(&from, request).await?.encoded_2718().into())
}
}
/// Encodes and signs the typed data according EIP-712. Payload must implement Eip712 trait.
fn sign_typed_data(&self, data: &TypedData, account: Address) -> Result<Bytes, Self::Error> {
Ok(self
.find_signer(&account)?
.sign_typed_data(account, data)
.map_err(Self::Error::from_eth_err)?
.as_bytes()
.into())
}
/// Returns the signer for the given account, if found in configured signers.
#[expect(clippy::type_complexity)]
fn find_signer(
&self,
account: &Address,
) -> Result<
Box<dyn EthSigner<ProviderTx<Self::Provider>, RpcTxReq<Self::NetworkTypes>> + 'static>,
Self::Error,
> {
self.signers()
.read()
.iter()
.find(|signer| signer.is_signer_for(account))
.map(|signer| dyn_clone::clone_box(&**signer))
.ok_or_else(|| SignError::NoAccount.into_eth_err())
}
}
/// Loads a transaction from database.
///
/// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` transactions RPC
/// methods.
pub trait LoadTransaction: SpawnBlocking + FullEthApiTypes + RpcNodeCoreExt {
/// Returns the transaction by hash.
///
/// Checks the pool and state.
///
/// Returns `Ok(None)` if no matching transaction was found.
#[expect(clippy::complexity)]
fn transaction_by_hash(
&self,
hash: B256,
) -> impl Future<
Output = Result<Option<TransactionSource<ProviderTx<Self::Provider>>>, Self::Error>,
> + Send {
async move {
// Try to find the transaction on disk
let mut resp = self
.spawn_blocking_io(move |this| {
match this
.provider()
.transaction_by_hash_with_meta(hash)
.map_err(Self::Error::from_eth_err)?
{
None => Ok(None),
Some((tx, meta)) => {
// Note: we assume this transaction is valid, because it's mined (or
// part of pending block) and already. We don't need to
// check for pre EIP-2 because this transaction could be pre-EIP-2.
let transaction = tx
.try_into_recovered_unchecked()
.map_err(|_| EthApiError::InvalidTransactionSignature)?;
let tx = TransactionSource::Block {
transaction,
index: meta.index,
block_hash: meta.block_hash,
block_number: meta.block_number,
base_fee: meta.base_fee,
};
Ok(Some(tx))
}
}
})
.await?;
if resp.is_none() {
// tx not found on disk, check pool
if let Some(tx) =
self.pool().get(&hash).map(|tx| tx.transaction.clone().into_consensus())
{
resp = Some(TransactionSource::Pool(tx.into()));
}
}
Ok(resp)
}
}
/// Returns the transaction by including its corresponding [`BlockId`].
///
/// Note: this supports pending transactions
#[expect(clippy::type_complexity)]
fn transaction_by_hash_at(
&self,
transaction_hash: B256,
) -> impl Future<
Output = Result<
Option<(TransactionSource<ProviderTx<Self::Provider>>, BlockId)>,
Self::Error,
>,
> + Send {
async move {
Ok(self.transaction_by_hash(transaction_hash).await?.map(|tx| match tx {
tx @ TransactionSource::Pool(_) => (tx, BlockId::pending()),
tx @ TransactionSource::Block { block_hash, .. } => {
(tx, BlockId::Hash(block_hash.into()))
}
}))
}
}
/// Fetches the transaction and the transaction's block
#[expect(clippy::type_complexity)]
fn transaction_and_block(
&self,
hash: B256,
) -> impl Future<
Output = Result<
Option<(
TransactionSource<ProviderTx<Self::Provider>>,
Arc<RecoveredBlock<ProviderBlock<Self::Provider>>>,
)>,
Self::Error,
>,
> + Send {
async move {
let (transaction, at) = match self.transaction_by_hash_at(hash).await? {
None => return Ok(None),
Some(res) => res,
};
// Note: this is always either hash or pending
let block_hash = match at {
BlockId::Hash(hash) => hash.block_hash,
_ => return Ok(None),
};
let block = self
.cache()
.get_recovered_block(block_hash)
.await
.map_err(Self::Error::from_eth_err)?;
Ok(block.map(|block| (transaction, block)))
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-eth-api/src/helpers/spec.rs | crates/rpc/rpc-eth-api/src/helpers/spec.rs | //! Loads chain metadata.
use alloy_primitives::{Address, U256, U64};
use alloy_rpc_types_eth::{Stage, SyncInfo, SyncStatus};
use futures::Future;
use reth_chainspec::{ChainInfo, ChainSpecProvider, EthereumHardforks, Hardforks};
use reth_errors::{RethError, RethResult};
use reth_network_api::NetworkInfo;
use reth_rpc_convert::{RpcTxReq, RpcTypes};
use reth_storage_api::{BlockNumReader, StageCheckpointReader, TransactionsProvider};
use crate::{helpers::EthSigner, RpcNodeCore};
/// `Eth` API trait.
///
/// Defines core functionality of the `eth` API implementation.
#[auto_impl::auto_impl(&, Arc)]
pub trait EthApiSpec:
RpcNodeCore<
Provider: ChainSpecProvider<ChainSpec: Hardforks + EthereumHardforks>
+ BlockNumReader
+ StageCheckpointReader,
Network: NetworkInfo,
>
{
/// The transaction type signers are using.
type Transaction;
/// The RPC requests and responses.
type Rpc: RpcTypes;
/// Returns the block node is started on.
fn starting_block(&self) -> U256;
/// Returns a handle to the signers owned by provider.
fn signers(&self) -> &SignersForApi<Self>;
/// Returns the current ethereum protocol version.
fn protocol_version(&self) -> impl Future<Output = RethResult<U64>> + Send {
async move {
let status = self.network().network_status().await.map_err(RethError::other)?;
Ok(U64::from(status.protocol_version))
}
}
/// Returns the chain id
fn chain_id(&self) -> U64 {
U64::from(self.network().chain_id())
}
/// Returns provider chain info
fn chain_info(&self) -> RethResult<ChainInfo> {
Ok(self.provider().chain_info()?)
}
/// Returns a list of addresses owned by provider.
fn accounts(&self) -> Vec<Address> {
self.signers().read().iter().flat_map(|s| s.accounts()).collect()
}
/// Returns `true` if the network is undergoing sync.
fn is_syncing(&self) -> bool {
self.network().is_syncing()
}
/// Returns the [`SyncStatus`] of the network
fn sync_status(&self) -> RethResult<SyncStatus> {
let status = if self.is_syncing() {
let current_block = U256::from(
self.provider().chain_info().map(|info| info.best_number).unwrap_or_default(),
);
let stages = self
.provider()
.get_all_checkpoints()
.unwrap_or_default()
.into_iter()
.map(|(name, checkpoint)| Stage { name, block: checkpoint.block_number })
.collect();
SyncStatus::Info(Box::new(SyncInfo {
starting_block: self.starting_block(),
current_block,
highest_block: current_block,
warp_chunks_amount: None,
warp_chunks_processed: None,
stages: Some(stages),
}))
} else {
SyncStatus::None
};
Ok(status)
}
}
/// A handle to [`EthSigner`]s with its generics set from [`EthApiSpec`].
pub type SignersForApi<Api> = parking_lot::RwLock<
Vec<Box<dyn EthSigner<<Api as EthApiSpec>::Transaction, RpcTxReq<<Api as EthApiSpec>::Rpc>>>>,
>;
/// A handle to [`EthSigner`]s with its generics set from [`TransactionsProvider`] and
/// [`reth_rpc_convert::RpcTypes`].
pub type SignersForRpc<Provider, Rpc> = parking_lot::RwLock<
Vec<Box<dyn EthSigner<<Provider as TransactionsProvider>::Transaction, RpcTxReq<Rpc>>>>,
>;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-e2e-tests/src/lib.rs | crates/rpc/rpc-e2e-tests/src/lib.rs | //! RPC end-to-end tests including execution-apis compatibility testing.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
/// RPC compatibility test actions for the e2e test framework
pub mod rpc_compat;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-e2e-tests/src/rpc_compat.rs | crates/rpc/rpc-e2e-tests/src/rpc_compat.rs | //! RPC compatibility test actions for testing RPC methods against execution-apis test data.
use eyre::{eyre, Result};
use futures_util::future::BoxFuture;
use jsonrpsee::core::client::ClientT;
use reth_e2e_test_utils::testsuite::{actions::Action, BlockInfo, Environment};
use reth_node_api::EngineTypes;
use serde_json::Value;
use std::path::Path;
use tracing::{debug, info};
/// Test case from execution-apis .io file format
#[derive(Debug, Clone)]
pub struct RpcTestCase {
/// The test name (filename without .io extension)
pub name: String,
/// Request to send (as JSON value)
pub request: Value,
/// Expected response (as JSON value)
pub expected_response: Value,
/// Whether this test is spec-only
pub spec_only: bool,
}
/// Action that runs RPC compatibility tests from execution-apis test data
#[derive(Debug)]
pub struct RunRpcCompatTests {
/// RPC methods to test (e.g. `eth_getLogs`)
pub methods: Vec<String>,
/// Path to the execution-apis tests directory
pub test_data_path: String,
/// Whether to stop on first failure
pub fail_fast: bool,
}
impl RunRpcCompatTests {
/// Create a new RPC compatibility test runner
pub fn new(methods: Vec<String>, test_data_path: impl Into<String>) -> Self {
Self { methods, test_data_path: test_data_path.into(), fail_fast: false }
}
/// Set whether to stop on first failure
pub const fn with_fail_fast(mut self, fail_fast: bool) -> Self {
self.fail_fast = fail_fast;
self
}
/// Parse a .io test file
fn parse_io_file(content: &str) -> Result<RpcTestCase> {
let mut lines = content.lines();
let mut spec_only = false;
let mut request_line = None;
let mut response_line = None;
// Skip comments and look for spec_only marker
for line in lines.by_ref() {
let line = line.trim();
if line.starts_with("//") {
if line.contains("speconly:") {
spec_only = true;
}
} else if let Some(stripped) = line.strip_prefix(">>") {
request_line = Some(stripped.trim());
break;
}
}
// Look for response
for line in lines {
let line = line.trim();
if let Some(stripped) = line.strip_prefix("<<") {
response_line = Some(stripped.trim());
break;
}
}
let request_str =
request_line.ok_or_else(|| eyre!("No request found in test file (>> marker)"))?;
let response_str =
response_line.ok_or_else(|| eyre!("No response found in test file (<< marker)"))?;
// Parse request
let request: Value = serde_json::from_str(request_str)
.map_err(|e| eyre!("Failed to parse request: {}", e))?;
// Parse response
let expected_response: Value = serde_json::from_str(response_str)
.map_err(|e| eyre!("Failed to parse response: {}", e))?;
Ok(RpcTestCase { name: String::new(), request, expected_response, spec_only })
}
/// Compare JSON values with special handling for numbers and errors
/// Uses iterative approach to avoid stack overflow with deeply nested structures
fn compare_json_values(actual: &Value, expected: &Value, path: &str) -> Result<()> {
// Stack to hold work items: (actual, expected, path)
let mut work_stack = vec![(actual, expected, path.to_string())];
while let Some((actual, expected, current_path)) = work_stack.pop() {
match (actual, expected) {
// Number comparison: handle different representations
(Value::Number(a), Value::Number(b)) => {
let a_f64 = a.as_f64().ok_or_else(|| eyre!("Invalid number"))?;
let b_f64 = b.as_f64().ok_or_else(|| eyre!("Invalid number"))?;
// Use a reasonable epsilon for floating point comparison
const EPSILON: f64 = 1e-10;
if (a_f64 - b_f64).abs() > EPSILON {
return Err(eyre!("Number mismatch at {}: {} != {}", current_path, a, b));
}
}
// Array comparison
(Value::Array(a), Value::Array(b)) => {
if a.len() != b.len() {
return Err(eyre!(
"Array length mismatch at {}: {} != {}",
current_path,
a.len(),
b.len()
));
}
// Add array elements to work stack in reverse order
// so they are processed in correct order
for (i, (av, bv)) in a.iter().zip(b.iter()).enumerate().rev() {
work_stack.push((av, bv, format!("{current_path}[{i}]")));
}
}
// Object comparison
(Value::Object(a), Value::Object(b)) => {
// Check all keys in expected are present in actual
for (key, expected_val) in b {
if let Some(actual_val) = a.get(key) {
work_stack.push((
actual_val,
expected_val,
format!("{current_path}.{key}"),
));
} else {
return Err(eyre!("Missing key at {}.{}", current_path, key));
}
}
}
// Direct value comparison
(a, b) => {
if a != b {
return Err(eyre!("Value mismatch at {}: {:?} != {:?}", current_path, a, b));
}
}
}
}
Ok(())
}
/// Execute a single test case
async fn execute_test_case<Engine: EngineTypes>(
&self,
test_case: &RpcTestCase,
env: &Environment<Engine>,
) -> Result<()> {
let node_client = &env.node_clients[env.active_node_idx];
// Extract method and params from request
let method = test_case
.request
.get("method")
.and_then(|v| v.as_str())
.ok_or_else(|| eyre!("Request missing method field"))?;
let params = test_case.request.get("params").cloned().unwrap_or(Value::Array(vec![]));
// Make the RPC request using jsonrpsee
// We need to handle the case where the RPC might return an error
use jsonrpsee::core::params::ArrayParams;
let response_result: Result<Value, jsonrpsee::core::client::Error> = match params {
Value::Array(ref arr) => {
// Use ArrayParams for array parameters
let mut array_params = ArrayParams::new();
for param in arr {
array_params
.insert(param.clone())
.map_err(|e| eyre!("Failed to insert param: {}", e))?;
}
node_client.rpc.request(method, array_params).await
}
_ => {
// For non-array params, wrap in an array
let mut array_params = ArrayParams::new();
array_params.insert(params).map_err(|e| eyre!("Failed to insert param: {}", e))?;
node_client.rpc.request(method, array_params).await
}
};
// Build actual response object to match execution-apis format
let actual_response = match response_result {
Ok(response) => {
serde_json::json!({
"jsonrpc": "2.0",
"id": test_case.request.get("id").cloned().unwrap_or(Value::Null),
"result": response
})
}
Err(err) => {
// RPC error - build error response
serde_json::json!({
"jsonrpc": "2.0",
"id": test_case.request.get("id").cloned().unwrap_or(Value::Null),
"error": {
"code": -32000, // Generic error code
"message": err.to_string()
}
})
}
};
// Compare responses
let expected_result = test_case.expected_response.get("result");
let expected_error = test_case.expected_response.get("error");
let actual_result = actual_response.get("result");
let actual_error = actual_response.get("error");
match (expected_result, expected_error) {
(Some(expected), None) => {
// Expected success response
if let Some(actual) = actual_result {
Self::compare_json_values(actual, expected, "result")?;
} else if let Some(error) = actual_error {
return Err(eyre!("Expected success response but got error: {}", error));
} else {
return Err(eyre!("Expected success response but got neither result nor error"));
}
}
(None, Some(_)) => {
// Expected error response - just check that we got an error
if actual_error.is_none() {
return Err(eyre!("Expected error response but got success"));
}
debug!("Both responses are errors (expected behavior)");
}
_ => {
return Err(eyre!("Invalid expected response format"));
}
}
Ok(())
}
}
impl<Engine> Action<Engine> for RunRpcCompatTests
where
Engine: EngineTypes,
{
fn execute<'a>(&'a mut self, env: &'a mut Environment<Engine>) -> BoxFuture<'a, Result<()>> {
Box::pin(async move {
let mut total_tests = 0;
let mut passed_tests = 0;
for method in &self.methods {
info!("Running RPC compatibility tests for {}", method);
let method_dir = Path::new(&self.test_data_path).join(method);
if !method_dir.exists() {
return Err(eyre!("Test directory does not exist: {}", method_dir.display()));
}
// Read all .io files in the method directory
let entries = std::fs::read_dir(&method_dir)
.map_err(|e| eyre!("Failed to read directory: {}", e))?;
for entry in entries {
let entry = entry?;
let path = entry.path();
if path.extension().and_then(|s| s.to_str()) == Some("io") {
let test_name = path
.file_stem()
.and_then(|s| s.to_str())
.unwrap_or("unknown")
.to_string();
let content = std::fs::read_to_string(&path)
.map_err(|e| eyre!("Failed to read test file: {}", e))?;
match Self::parse_io_file(&content) {
Ok(mut test_case) => {
test_case.name = test_name.clone();
total_tests += 1;
match self.execute_test_case(&test_case, env).await {
Ok(_) => {
info!("✓ {}/{}: PASS", method, test_name);
passed_tests += 1;
}
Err(e) => {
info!("✗ {}/{}: FAIL - {}", method, test_name, e);
if self.fail_fast {
return Err(eyre!("Test failed (fail-fast enabled)"));
}
}
}
}
Err(e) => {
info!("✗ {}/{}: PARSE ERROR - {}", method, test_name, e);
if self.fail_fast {
return Err(e);
}
}
}
}
}
}
info!("RPC compatibility test results: {}/{} passed", passed_tests, total_tests);
if passed_tests < total_tests {
return Err(eyre!("Some tests failed: {}/{} passed", passed_tests, total_tests));
}
Ok(())
})
}
}
/// Action to initialize the chain from execution-apis test data
#[derive(Debug)]
pub struct InitializeFromExecutionApis {
/// Path to the base.rlp file (if different from default)
pub chain_rlp_path: Option<String>,
/// Path to the headfcu.json file (if different from default)
pub fcu_json_path: Option<String>,
}
impl Default for InitializeFromExecutionApis {
fn default() -> Self {
Self::new()
}
}
impl InitializeFromExecutionApis {
/// Create with default paths (assumes execution-apis/tests structure)
pub const fn new() -> Self {
Self { chain_rlp_path: None, fcu_json_path: None }
}
/// Set custom chain RLP path
pub fn with_chain_rlp(mut self, path: impl Into<String>) -> Self {
self.chain_rlp_path = Some(path.into());
self
}
/// Set custom FCU JSON path
pub fn with_fcu_json(mut self, path: impl Into<String>) -> Self {
self.fcu_json_path = Some(path.into());
self
}
}
impl<Engine> Action<Engine> for InitializeFromExecutionApis
where
Engine: EngineTypes,
{
fn execute<'a>(&'a mut self, env: &'a mut Environment<Engine>) -> BoxFuture<'a, Result<()>> {
Box::pin(async move {
// Load forkchoice state
let fcu_path = self
.fcu_json_path
.as_ref()
.map(Path::new)
.ok_or_else(|| eyre!("FCU JSON path is required"))?;
let fcu_state = reth_e2e_test_utils::setup_import::load_forkchoice_state(fcu_path)?;
info!(
"Applying forkchoice state - head: {}, safe: {}, finalized: {}",
fcu_state.head_block_hash,
fcu_state.safe_block_hash,
fcu_state.finalized_block_hash
);
// Apply forkchoice update to each node
for (idx, client) in env.node_clients.iter().enumerate() {
debug!("Applying forkchoice update to node {}", idx);
// Wait for the node to finish syncing imported blocks
let mut retries = 0;
const MAX_RETRIES: u32 = 10;
const RETRY_DELAY_MS: u64 = 500;
loop {
let response =
reth_rpc_api::clients::EngineApiClient::<Engine>::fork_choice_updated_v3(
&client.engine.http_client(),
fcu_state,
None,
)
.await
.map_err(|e| eyre!("Failed to update forkchoice on node {}: {}", idx, e))?;
match response.payload_status.status {
alloy_rpc_types_engine::PayloadStatusEnum::Valid => {
debug!("Forkchoice update successful on node {}", idx);
break;
}
alloy_rpc_types_engine::PayloadStatusEnum::Syncing => {
if retries >= MAX_RETRIES {
return Err(eyre!(
"Node {} still syncing after {} retries",
idx,
MAX_RETRIES
));
}
debug!("Node {} is syncing, retrying in {}ms...", idx, RETRY_DELAY_MS);
tokio::time::sleep(std::time::Duration::from_millis(RETRY_DELAY_MS))
.await;
retries += 1;
}
_ => {
return Err(eyre!(
"Invalid forkchoice state on node {}: {:?}",
idx,
response.payload_status
));
}
}
}
}
// Update environment state
env.active_node_state_mut()?.current_block_info = Some(BlockInfo {
hash: fcu_state.head_block_hash,
number: 0, // Will be updated when we fetch the actual block
timestamp: 0,
});
info!("Successfully initialized chain from execution-apis test data");
Ok(())
})
}
}
#[cfg(test)]
mod tests {
use super::*;
use serde_json::json;
#[test]
fn test_compare_json_values_deeply_nested() {
// Test that the iterative comparison handles deeply nested structures
// without stack overflow
let mut nested = json!({"value": 0});
let mut expected = json!({"value": 0});
// Create a deeply nested structure
for i in 1..1000 {
nested = json!({"level": i, "nested": nested});
expected = json!({"level": i, "nested": expected});
}
// Should not panic with stack overflow
RunRpcCompatTests::compare_json_values(&nested, &expected, "root").unwrap();
}
#[test]
fn test_compare_json_values_arrays() {
// Test array comparison
let actual = json!([1, 2, 3, 4, 5]);
let expected = json!([1, 2, 3, 4, 5]);
RunRpcCompatTests::compare_json_values(&actual, &expected, "root").unwrap();
// Test array length mismatch
let actual = json!([1, 2, 3]);
let expected = json!([1, 2, 3, 4, 5]);
let result = RunRpcCompatTests::compare_json_values(&actual, &expected, "root");
assert!(result.is_err());
assert!(result.unwrap_err().to_string().contains("Array length mismatch"));
}
#[test]
fn test_compare_json_values_objects() {
// Test object comparison
let actual = json!({"a": 1, "b": 2, "c": 3});
let expected = json!({"a": 1, "b": 2, "c": 3});
RunRpcCompatTests::compare_json_values(&actual, &expected, "root").unwrap();
// Test missing key
let actual = json!({"a": 1, "b": 2});
let expected = json!({"a": 1, "b": 2, "c": 3});
let result = RunRpcCompatTests::compare_json_values(&actual, &expected, "root");
assert!(result.is_err());
assert!(result.unwrap_err().to_string().contains("Missing key"));
}
#[test]
fn test_compare_json_values_numbers() {
// Test number comparison with floating point
let actual = json!({"value": 1.00000000001});
let expected = json!({"value": 1.0});
// Should be equal within epsilon (1e-10)
RunRpcCompatTests::compare_json_values(&actual, &expected, "root").unwrap();
// Test significant difference
let actual = json!({"value": 1.1});
let expected = json!({"value": 1.0});
let result = RunRpcCompatTests::compare_json_values(&actual, &expected, "root");
assert!(result.is_err());
assert!(result.unwrap_err().to_string().contains("Number mismatch"));
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-e2e-tests/tests/e2e-testsuite/main.rs | crates/rpc/rpc-e2e-tests/tests/e2e-testsuite/main.rs | //! RPC compatibility tests using execution-apis test data
use eyre::Result;
use reth_chainspec::ChainSpec;
use reth_e2e_test_utils::testsuite::{
actions::{MakeCanonical, UpdateBlockInfo},
setup::{NetworkSetup, Setup},
TestBuilder,
};
use reth_node_ethereum::{EthEngineTypes, EthereumNode};
use reth_rpc_e2e_tests::rpc_compat::{InitializeFromExecutionApis, RunRpcCompatTests};
use seismic_alloy_genesis::Genesis;
use std::{env, path::PathBuf, sync::Arc};
use tracing::{debug, info};
/// Test repo-local RPC method compatibility with execution-apis test data
///
/// This test:
/// 1. Initializes a node with chain data from testdata (chain.rlp)
/// 2. Applies the forkchoice state from headfcu.json
/// 3. Runs tests cases in the local repository, some of which are execution-api tests
#[tokio::test(flavor = "multi_thread")]
#[ignore = "Imports blocks with non-flagged storage state root"]
async fn test_local_rpc_tests_compat() -> Result<()> {
reth_tracing::init_test_tracing();
// Use local test data
let test_data_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("testdata/rpc-compat");
assert!(test_data_path.exists(), "Test data path does not exist: {}", test_data_path.display());
info!("Using test data from: {}", test_data_path.display());
// Paths to test files
let chain_rlp_path = test_data_path.join("chain.rlp");
let fcu_json_path = test_data_path.join("headfcu.json");
let genesis_path = test_data_path.join("genesis.json");
// Verify required files exist
if !chain_rlp_path.exists() {
return Err(eyre::eyre!("chain.rlp not found at {}", chain_rlp_path.display()));
}
if !fcu_json_path.exists() {
return Err(eyre::eyre!("headfcu.json not found at {}", fcu_json_path.display()));
}
if !genesis_path.exists() {
return Err(eyre::eyre!("genesis.json not found at {}", genesis_path.display()));
}
// Load genesis from test data
let genesis_json = std::fs::read_to_string(&genesis_path)?;
// Parse the Genesis struct from JSON and convert it to ChainSpec
// This properly handles all the hardfork configuration from the config section
let genesis: Genesis = serde_json::from_str(&genesis_json)?;
let chain_spec: ChainSpec = genesis.into();
let chain_spec = Arc::new(chain_spec);
// Create test setup with imported chain
let setup = Setup::<EthEngineTypes>::default()
.with_chain_spec(chain_spec)
.with_network(NetworkSetup::single_node());
// Build and run the test
let test = TestBuilder::new()
.with_setup_and_import(setup, chain_rlp_path)
.with_action(UpdateBlockInfo::default())
.with_action(
InitializeFromExecutionApis::new().with_fcu_json(fcu_json_path.to_string_lossy()),
)
.with_action(MakeCanonical::new())
.with_action(RunRpcCompatTests::new(
vec!["eth_getLogs".to_string(), "eth_syncing".to_string()],
test_data_path.to_string_lossy(),
));
test.run::<EthereumNode>().await?;
Ok(())
}
/// Test RPC method compatibility with execution-apis test data from environment variable
///
/// This test:
/// 1. Reads test data path from `EXECUTION_APIS_TEST_PATH` environment variable
/// 2. Auto-discovers all RPC method directories (starting with `eth_`)
/// 3. Initializes a node with chain data from that directory (chain.rlp)
/// 4. Applies the forkchoice state from headfcu.json
/// 5. Runs all discovered RPC test cases individually (each test file reported separately)
#[tokio::test(flavor = "multi_thread")]
async fn test_execution_apis_compat() -> Result<()> {
reth_tracing::init_test_tracing();
// Get test data path from environment variable
let test_data_path = match env::var("EXECUTION_APIS_TEST_PATH") {
Ok(path) => path,
Err(_) => {
info!("SKIPPING: EXECUTION_APIS_TEST_PATH environment variable not set. Please set it to the path of execution-apis/tests directory to run this test.");
return Ok(());
}
};
let test_data_path = PathBuf::from(test_data_path);
if !test_data_path.exists() {
return Err(eyre::eyre!("Test data path does not exist: {}", test_data_path.display()));
}
info!("Using execution-apis test data from: {}", test_data_path.display());
// Auto-discover RPC method directories
let mut rpc_methods = Vec::new();
if let Ok(entries) = std::fs::read_dir(&test_data_path) {
for entry in entries.flatten() {
if let Some(name) = entry.file_name().to_str() {
// Search for an underscore to get all namespaced directories
if entry.path().is_dir() && name.contains('_') {
rpc_methods.push(name.to_string());
}
}
}
}
if rpc_methods.is_empty() {
return Err(eyre::eyre!(
"No RPC method directories (containing a '_' indicating namespacing) found in {}",
test_data_path.display()
));
}
rpc_methods.sort();
debug!("Found RPC method test directories: {:?}", rpc_methods);
// Paths to chain config files
let chain_rlp_path = test_data_path.join("chain.rlp");
let genesis_path = test_data_path.join("genesis.json");
let fcu_json_path = test_data_path.join("headfcu.json");
// Verify required files exist
if !chain_rlp_path.exists() {
return Err(eyre::eyre!("chain.rlp not found at {}", chain_rlp_path.display()));
}
if !fcu_json_path.exists() {
return Err(eyre::eyre!("headfcu.json not found at {}", fcu_json_path.display()));
}
if !genesis_path.exists() {
return Err(eyre::eyre!("genesis.json not found at {}", genesis_path.display()));
}
// Load genesis from test data
let genesis_json = std::fs::read_to_string(&genesis_path)?;
let genesis: Genesis = serde_json::from_str(&genesis_json)?;
let chain_spec: ChainSpec = genesis.into();
let chain_spec = Arc::new(chain_spec);
// Create test setup with imported chain
let setup = Setup::<EthEngineTypes>::default()
.with_chain_spec(chain_spec)
.with_network(NetworkSetup::single_node());
// Build and run the test with all discovered methods
let test = TestBuilder::new()
.with_setup_and_import(setup, chain_rlp_path)
.with_action(UpdateBlockInfo::default())
.with_action(
InitializeFromExecutionApis::new().with_fcu_json(fcu_json_path.to_string_lossy()),
)
.with_action(MakeCanonical::new())
.with_action(RunRpcCompatTests::new(rpc_methods, test_data_path.to_string_lossy()));
test.run::<EthereumNode>().await?;
Ok(())
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-api/src/mev.rs | crates/rpc/rpc-api/src/mev.rs | use alloy_rpc_types_mev::{EthBundleHash, MevSendBundle, SimBundleOverrides, SimBundleResponse};
use jsonrpsee::proc_macros::rpc;
/// Mev rpc interface.
#[cfg_attr(not(feature = "client"), rpc(server, namespace = "mev"))]
#[cfg_attr(feature = "client", rpc(server, client, namespace = "mev"))]
pub trait MevSimApi {
/// Similar to `mev_sendBundle` but instead of submitting a bundle to the relay, it returns
/// a simulation result. Only fully matched bundles can be simulated.
#[method(name = "simBundle")]
async fn sim_bundle(
&self,
bundle: MevSendBundle,
sim_overrides: SimBundleOverrides,
) -> jsonrpsee::core::RpcResult<SimBundleResponse>;
}
/// Mev rpc interface.
#[cfg_attr(not(feature = "client"), rpc(server, namespace = "mev"))]
#[cfg_attr(feature = "client", rpc(server, client, namespace = "mev"))]
pub trait MevFullApi {
/// Submitting bundles to the relay. It takes in a bundle and provides a bundle hash as a
/// return value.
#[method(name = "sendBundle")]
async fn send_bundle(
&self,
request: MevSendBundle,
) -> jsonrpsee::core::RpcResult<EthBundleHash>;
/// Similar to `mev_sendBundle` but instead of submitting a bundle to the relay, it returns
/// a simulation result. Only fully matched bundles can be simulated.
#[method(name = "simBundle")]
async fn sim_bundle(
&self,
bundle: MevSendBundle,
sim_overrides: SimBundleOverrides,
) -> jsonrpsee::core::RpcResult<SimBundleResponse>;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-api/src/miner.rs | crates/rpc/rpc-api/src/miner.rs | use alloy_primitives::{Bytes, U128};
use jsonrpsee::{core::RpcResult, proc_macros::rpc};
/// Miner namespace rpc interface that can control miner/builder settings
#[cfg_attr(not(feature = "client"), rpc(server, namespace = "miner"))]
#[cfg_attr(feature = "client", rpc(server, client, namespace = "miner"))]
pub trait MinerApi {
/// Sets the extra data string that is included when this miner mines a block.
///
/// Returns an error if the extra data is too long.
#[method(name = "setExtra")]
fn set_extra(&self, record: Bytes) -> RpcResult<bool>;
/// Sets the minimum accepted gas price for the miner.
#[method(name = "setGasPrice")]
fn set_gas_price(&self, gas_price: U128) -> RpcResult<bool>;
/// Sets the gaslimit to target towards during mining.
#[method(name = "setGasLimit")]
fn set_gas_limit(&self, gas_limit: U128) -> RpcResult<bool>;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-api/src/engine.rs | crates/rpc/rpc-api/src/engine.rs | //! Server traits for the engine API
//!
//! This contains the `engine_` namespace and the subset of the `eth_` namespace that is exposed to
//! the consensus client.
use alloy_eips::{
eip4844::{BlobAndProofV1, BlobAndProofV2},
eip7685::RequestsOrHash,
BlockId, BlockNumberOrTag,
};
use alloy_json_rpc::RpcObject;
use alloy_primitives::{Address, BlockHash, Bytes, B256, U256, U64};
use alloy_rpc_types_engine::{
ClientVersionV1, ExecutionPayloadBodiesV1, ExecutionPayloadInputV2, ExecutionPayloadV1,
ExecutionPayloadV3, ForkchoiceState, ForkchoiceUpdated, PayloadId, PayloadStatus,
};
use alloy_rpc_types_eth::{
state::StateOverride, BlockOverrides, EIP1186AccountProofResponse, Filter, Log, SyncStatus,
};
use alloy_serde::JsonStorageKey;
use jsonrpsee::{core::RpcResult, proc_macros::rpc, RpcModule};
use reth_engine_primitives::EngineTypes;
/// Helper trait for the engine api server.
///
/// This type-erases the concrete [`jsonrpsee`] server implementation and only returns the
/// [`RpcModule`] that contains all the endpoints of the server.
pub trait IntoEngineApiRpcModule {
/// Consumes the type and returns all the methods and subscriptions defined in the trait and
/// returns them as a single [`RpcModule`]
fn into_rpc_module(self) -> RpcModule<()>;
}
// NOTE: We can't use associated types in the `EngineApi` trait because of jsonrpsee, so we use a
// generic here. It would be nice if the rpc macro would understand which types need to have serde.
// By default, if the trait has a generic, the rpc macro will add e.g. `Engine: DeserializeOwned` to
// the trait bounds, which is not what we want, because `Types` is not used directly in any of the
// trait methods. Instead, we have to add the bounds manually. This would be disastrous if we had
// more than one associated type used in the trait methods.
#[cfg_attr(not(feature = "client"), rpc(server, namespace = "engine"), server_bounds(Engine::PayloadAttributes: jsonrpsee::core::DeserializeOwned))]
#[cfg_attr(feature = "client", rpc(server, client, namespace = "engine", client_bounds(Engine::PayloadAttributes: jsonrpsee::core::Serialize + Clone), server_bounds(Engine::PayloadAttributes: jsonrpsee::core::DeserializeOwned)))]
pub trait EngineApi<Engine: EngineTypes> {
/// See also <https://github.com/ethereum/execution-apis/blob/6709c2a795b707202e93c4f2867fa0bf2640a84f/src/engine/paris.md#engine_newpayloadv1>
/// Caution: This should not accept the `withdrawals` field
#[method(name = "newPayloadV1")]
async fn new_payload_v1(&self, payload: ExecutionPayloadV1) -> RpcResult<PayloadStatus>;
/// See also <https://github.com/ethereum/execution-apis/blob/584905270d8ad665718058060267061ecfd79ca5/src/engine/shanghai.md#engine_newpayloadv2>
#[method(name = "newPayloadV2")]
async fn new_payload_v2(&self, payload: ExecutionPayloadInputV2) -> RpcResult<PayloadStatus>;
/// Post Cancun payload handler
///
/// See also <https://github.com/ethereum/execution-apis/blob/main/src/engine/cancun.md#engine_newpayloadv3>
#[method(name = "newPayloadV3")]
async fn new_payload_v3(
&self,
payload: ExecutionPayloadV3,
versioned_hashes: Vec<B256>,
parent_beacon_block_root: B256,
) -> RpcResult<PayloadStatus>;
/// Post Prague payload handler
///
/// See also <https://github.com/ethereum/execution-apis/blob/main/src/engine/prague.md#engine_newpayloadv4>
#[method(name = "newPayloadV4")]
async fn new_payload_v4(
&self,
payload: ExecutionPayloadV3,
versioned_hashes: Vec<B256>,
parent_beacon_block_root: B256,
execution_requests: RequestsOrHash,
) -> RpcResult<PayloadStatus>;
/// See also <https://github.com/ethereum/execution-apis/blob/6709c2a795b707202e93c4f2867fa0bf2640a84f/src/engine/paris.md#engine_forkchoiceupdatedv1>
///
/// Caution: This should not accept the `withdrawals` field in the payload attributes.
#[method(name = "forkchoiceUpdatedV1")]
async fn fork_choice_updated_v1(
&self,
fork_choice_state: ForkchoiceState,
payload_attributes: Option<Engine::PayloadAttributes>,
) -> RpcResult<ForkchoiceUpdated>;
/// Post Shanghai forkchoice update handler
///
/// This is the same as `forkchoiceUpdatedV1`, but expects an additional `withdrawals` field in
/// the `payloadAttributes`, if payload attributes are provided.
///
/// See also <https://github.com/ethereum/execution-apis/blob/6709c2a795b707202e93c4f2867fa0bf2640a84f/src/engine/shanghai.md#engine_forkchoiceupdatedv2>
///
/// Caution: This should not accept the `parentBeaconBlockRoot` field in the payload
/// attributes.
#[method(name = "forkchoiceUpdatedV2")]
async fn fork_choice_updated_v2(
&self,
fork_choice_state: ForkchoiceState,
payload_attributes: Option<Engine::PayloadAttributes>,
) -> RpcResult<ForkchoiceUpdated>;
/// Post Cancun forkchoice update handler
///
/// This is the same as `forkchoiceUpdatedV2`, but expects an additional
/// `parentBeaconBlockRoot` field in the `payloadAttributes`, if payload attributes
/// are provided.
///
/// See also <https://github.com/ethereum/execution-apis/blob/main/src/engine/cancun.md#engine_forkchoiceupdatedv3>
#[method(name = "forkchoiceUpdatedV3")]
async fn fork_choice_updated_v3(
&self,
fork_choice_state: ForkchoiceState,
payload_attributes: Option<Engine::PayloadAttributes>,
) -> RpcResult<ForkchoiceUpdated>;
/// See also <https://github.com/ethereum/execution-apis/blob/6709c2a795b707202e93c4f2867fa0bf2640a84f/src/engine/paris.md#engine_getpayloadv1>
///
/// Returns the most recent version of the payload that is available in the corresponding
/// payload build process at the time of receiving this call.
///
/// Caution: This should not return the `withdrawals` field
///
/// Note:
/// > Provider software MAY stop the corresponding build process after serving this call.
#[method(name = "getPayloadV1")]
async fn get_payload_v1(
&self,
payload_id: PayloadId,
) -> RpcResult<Engine::ExecutionPayloadEnvelopeV1>;
/// See also <https://github.com/ethereum/execution-apis/blob/6709c2a795b707202e93c4f2867fa0bf2640a84f/src/engine/shanghai.md#engine_getpayloadv2>
///
/// Returns the most recent version of the payload that is available in the corresponding
/// payload build process at the time of receiving this call. Note:
/// > Provider software MAY stop the corresponding build process after serving this call.
#[method(name = "getPayloadV2")]
async fn get_payload_v2(
&self,
payload_id: PayloadId,
) -> RpcResult<Engine::ExecutionPayloadEnvelopeV2>;
/// Post Cancun payload handler which also returns a blobs bundle.
///
/// See also <https://github.com/ethereum/execution-apis/blob/main/src/engine/cancun.md#engine_getpayloadv3>
///
/// Returns the most recent version of the payload that is available in the corresponding
/// payload build process at the time of receiving this call. Note:
/// > Provider software MAY stop the corresponding build process after serving this call.
#[method(name = "getPayloadV3")]
async fn get_payload_v3(
&self,
payload_id: PayloadId,
) -> RpcResult<Engine::ExecutionPayloadEnvelopeV3>;
/// Post Prague payload handler.
///
/// See also <https://github.com/ethereum/execution-apis/blob/main/src/engine/prague.md#engine_getpayloadv4>
///
/// Returns the most recent version of the payload that is available in the corresponding
/// payload build process at the time of receiving this call. Note:
/// > Provider software MAY stop the corresponding build process after serving this call.
#[method(name = "getPayloadV4")]
async fn get_payload_v4(
&self,
payload_id: PayloadId,
) -> RpcResult<Engine::ExecutionPayloadEnvelopeV4>;
/// Post Osaka payload handler.
///
/// See also <https://github.com/ethereum/execution-apis/blob/15399c2e2f16a5f800bf3f285640357e2c245ad9/src/engine/osaka.md#engine_getpayloadv5>.
///
/// Returns the most recent version of the payload that is available in the corresponding
/// payload build process at the time of receiving this call. Note:
/// > Provider software MAY stop the corresponding build process after serving this call.
#[method(name = "getPayloadV5")]
async fn get_payload_v5(
&self,
payload_id: PayloadId,
) -> RpcResult<Engine::ExecutionPayloadEnvelopeV5>;
/// See also <https://github.com/ethereum/execution-apis/blob/6452a6b194d7db269bf1dbd087a267251d3cc7f8/src/engine/shanghai.md#engine_getpayloadbodiesbyhashv1>
#[method(name = "getPayloadBodiesByHashV1")]
async fn get_payload_bodies_by_hash_v1(
&self,
block_hashes: Vec<BlockHash>,
) -> RpcResult<ExecutionPayloadBodiesV1>;
/// See also <https://github.com/ethereum/execution-apis/blob/6452a6b194d7db269bf1dbd087a267251d3cc7f8/src/engine/shanghai.md#engine_getpayloadbodiesbyrangev1>
///
/// Returns the execution payload bodies by the range starting at `start`, containing `count`
/// blocks.
///
/// WARNING: This method is associated with the `BeaconBlocksByRange` message in the consensus
/// layer p2p specification, meaning the input should be treated as untrusted or potentially
/// adversarial.
///
/// Implementers should take care when acting on the input to this method, specifically
/// ensuring that the range is limited properly, and that the range boundaries are computed
/// correctly and without panics.
#[method(name = "getPayloadBodiesByRangeV1")]
async fn get_payload_bodies_by_range_v1(
&self,
start: U64,
count: U64,
) -> RpcResult<ExecutionPayloadBodiesV1>;
/// This function will return the [`ClientVersionV1`] object.
/// See also:
/// <https://github.com/ethereum/execution-apis/blob/03911ffc053b8b806123f1fc237184b0092a485a/src/engine/identification.md#engine_getclientversionv1>
///
///
/// - When connected to a single execution client, the consensus client **MUST** receive an
/// array with a single `ClientVersionV1` object.
/// - When connected to multiple execution clients via a multiplexer, the multiplexer **MUST**
/// concatenate the responses from each execution client into a single,
/// flat array before returning the response to the consensus client.
#[method(name = "getClientVersionV1")]
async fn get_client_version_v1(
&self,
client_version: ClientVersionV1,
) -> RpcResult<Vec<ClientVersionV1>>;
/// See also <https://github.com/ethereum/execution-apis/blob/6452a6b194d7db269bf1dbd087a267251d3cc7f8/src/engine/common.md#capabilities>
#[method(name = "exchangeCapabilities")]
async fn exchange_capabilities(&self, capabilities: Vec<String>) -> RpcResult<Vec<String>>;
/// Fetch blobs for the consensus layer from the blob store.
#[method(name = "getBlobsV1")]
async fn get_blobs_v1(
&self,
versioned_hashes: Vec<B256>,
) -> RpcResult<Vec<Option<BlobAndProofV1>>>;
/// Fetch blobs for the consensus layer from the blob store.
///
/// Returns a response only if blobs and proofs are present for _all_ of the versioned hashes:
/// 2. Client software MUST return null in case of any missing or older version blobs.
#[method(name = "getBlobsV2")]
async fn get_blobs_v2(
&self,
versioned_hashes: Vec<B256>,
) -> RpcResult<Option<Vec<BlobAndProofV2>>>;
}
/// A subset of the ETH rpc interface: <https://ethereum.github.io/execution-apis/api-documentation>
///
/// This also includes additional eth functions required by optimism.
///
/// Specifically for the engine auth server: <https://github.com/ethereum/execution-apis/blob/main/src/engine/common.md#underlying-protocol>
#[cfg_attr(not(feature = "client"), rpc(server, namespace = "eth"))]
#[cfg_attr(feature = "client", rpc(server, client, namespace = "eth"))]
pub trait EngineEthApi<TxReq: RpcObject, B: RpcObject, R: RpcObject> {
/// Returns an object with data about the sync status or false.
#[method(name = "syncing")]
fn syncing(&self) -> RpcResult<SyncStatus>;
/// Returns the chain ID of the current network.
#[method(name = "chainId")]
async fn chain_id(&self) -> RpcResult<Option<U64>>;
/// Returns the number of most recent block.
#[method(name = "blockNumber")]
fn block_number(&self) -> RpcResult<U256>;
/// Executes a new message call immediately without creating a transaction on the block chain.
#[method(name = "call")]
async fn call(
&self,
request: TxReq,
block_id: Option<BlockId>,
state_overrides: Option<StateOverride>,
block_overrides: Option<Box<BlockOverrides>>,
) -> RpcResult<Bytes>;
/// Returns code at a given address at given block number.
#[method(name = "getCode")]
async fn get_code(&self, address: Address, block_id: Option<BlockId>) -> RpcResult<Bytes>;
/// Returns information about a block by hash.
#[method(name = "getBlockByHash")]
async fn block_by_hash(&self, hash: B256, full: bool) -> RpcResult<Option<B>>;
/// Returns information about a block by number.
#[method(name = "getBlockByNumber")]
async fn block_by_number(&self, number: BlockNumberOrTag, full: bool) -> RpcResult<Option<B>>;
/// Returns all transaction receipts for a given block.
#[method(name = "getBlockReceipts")]
async fn block_receipts(&self, block_id: BlockId) -> RpcResult<Option<Vec<R>>>;
/// Sends signed transaction, returning its hash.
#[method(name = "sendRawTransaction")]
async fn send_raw_transaction(&self, bytes: Bytes) -> RpcResult<B256>;
/// Returns the receipt of a transaction by transaction hash.
#[method(name = "getTransactionReceipt")]
async fn transaction_receipt(&self, hash: B256) -> RpcResult<Option<R>>;
/// Returns logs matching given filter object.
#[method(name = "getLogs")]
async fn logs(&self, filter: Filter) -> RpcResult<Vec<Log>>;
/// Returns the account and storage values of the specified account including the Merkle-proof.
/// This call can be used to verify that the data you are pulling from is not tampered with.
#[method(name = "getProof")]
async fn get_proof(
&self,
address: Address,
keys: Vec<JsonStorageKey>,
block_number: Option<BlockId>,
) -> RpcResult<EIP1186AccountProofResponse>;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-api/src/lib.rs | crates/rpc/rpc-api/src/lib.rs | //! Reth RPC interface definitions
//!
//! Provides all RPC interfaces.
//!
//! ## Feature Flags
//!
//! - `client`: Enables JSON-RPC client support.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
mod admin;
mod anvil;
mod debug;
mod engine;
mod hardhat;
mod mev;
mod miner;
mod net;
mod otterscan;
mod reth;
mod rpc;
mod trace;
mod txpool;
mod validation;
mod web3;
/// re-export of all server traits
pub use servers::*;
/// Aggregates all server traits.
pub mod servers {
pub use crate::{
admin::AdminApiServer,
debug::{DebugApiServer, DebugExecutionWitnessApiServer},
engine::{EngineApiServer, EngineEthApiServer, IntoEngineApiRpcModule},
mev::{MevFullApiServer, MevSimApiServer},
miner::MinerApiServer,
net::NetApiServer,
otterscan::OtterscanServer,
reth::RethApiServer,
rpc::RpcApiServer,
trace::TraceApiServer,
txpool::TxPoolApiServer,
validation::BlockSubmissionValidationApiServer,
web3::Web3ApiServer,
};
pub use reth_rpc_eth_api::{
self as eth, EthApiServer, EthBundleApiServer, EthCallBundleApiServer, EthFilterApiServer,
EthPubSubApiServer, L2EthApiExtServer,
};
}
/// re-export of all client traits
#[cfg(feature = "client")]
pub use clients::*;
/// Aggregates all client traits.
#[cfg(feature = "client")]
pub mod clients {
pub use crate::{
admin::AdminApiClient,
anvil::AnvilApiClient,
debug::{DebugApiClient, DebugExecutionWitnessApiClient},
engine::{EngineApiClient, EngineEthApiClient},
hardhat::HardhatApiClient,
mev::{MevFullApiClient, MevSimApiClient},
miner::MinerApiClient,
net::NetApiClient,
otterscan::OtterscanClient,
reth::RethApiClient,
rpc::RpcApiServer,
trace::TraceApiClient,
txpool::TxPoolApiClient,
validation::BlockSubmissionValidationApiClient,
web3::Web3ApiClient,
};
pub use reth_rpc_eth_api::{
EthApiClient, EthBundleApiClient, EthCallBundleApiClient, EthFilterApiClient,
L2EthApiExtServer,
};
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-api/src/validation.rs | crates/rpc/rpc-api/src/validation.rs | //! API for block submission validation.
use alloy_rpc_types_beacon::relay::{
BuilderBlockValidationRequest, BuilderBlockValidationRequestV2,
BuilderBlockValidationRequestV3, BuilderBlockValidationRequestV4,
BuilderBlockValidationRequestV5,
};
use jsonrpsee::proc_macros::rpc;
/// Block validation rpc interface.
#[cfg_attr(not(feature = "client"), rpc(server, namespace = "flashbots"))]
#[cfg_attr(feature = "client", rpc(server, client, namespace = "flashbots"))]
pub trait BlockSubmissionValidationApi {
/// A Request to validate a block submission.
#[method(name = "validateBuilderSubmissionV1")]
async fn validate_builder_submission_v1(
&self,
request: BuilderBlockValidationRequest,
) -> jsonrpsee::core::RpcResult<()>;
/// A Request to validate a block submission.
#[method(name = "validateBuilderSubmissionV2")]
async fn validate_builder_submission_v2(
&self,
request: BuilderBlockValidationRequestV2,
) -> jsonrpsee::core::RpcResult<()>;
/// A Request to validate a block submission.
#[method(name = "validateBuilderSubmissionV3")]
async fn validate_builder_submission_v3(
&self,
request: BuilderBlockValidationRequestV3,
) -> jsonrpsee::core::RpcResult<()>;
/// A Request to validate a block submission.
#[method(name = "validateBuilderSubmissionV4")]
async fn validate_builder_submission_v4(
&self,
request: BuilderBlockValidationRequestV4,
) -> jsonrpsee::core::RpcResult<()>;
/// A Request to validate a block submission.
#[method(name = "validateBuilderSubmissionV5")]
async fn validate_builder_submission_v5(
&self,
request: BuilderBlockValidationRequestV5,
) -> jsonrpsee::core::RpcResult<()>;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-api/src/trace.rs | crates/rpc/rpc-api/src/trace.rs | use alloy_eips::BlockId;
use alloy_primitives::{map::HashSet, Bytes, B256};
use alloy_rpc_types_eth::{state::StateOverride, BlockOverrides, Index};
use alloy_rpc_types_trace::{
filter::TraceFilter,
opcode::{BlockOpcodeGas, TransactionOpcodeGas},
parity::*,
};
use jsonrpsee::{core::RpcResult, proc_macros::rpc};
/// Ethereum trace API
#[cfg_attr(not(feature = "client"), rpc(server, namespace = "trace"))]
#[cfg_attr(feature = "client", rpc(server, client, namespace = "trace"))]
pub trait TraceApi<TxReq> {
/// Executes the given call and returns a number of possible traces for it.
#[method(name = "call")]
async fn trace_call(
&self,
call: TxReq,
trace_types: HashSet<TraceType>,
block_id: Option<BlockId>,
state_overrides: Option<StateOverride>,
block_overrides: Option<Box<BlockOverrides>>,
) -> RpcResult<TraceResults>;
/// Performs multiple call traces on top of the same block. i.e. transaction n will be executed
/// on top of a pending block with all n-1 transactions applied (traced) first. Allows to trace
/// dependent transactions.
#[method(name = "callMany")]
async fn trace_call_many(
&self,
calls: Vec<(TxReq, HashSet<TraceType>)>,
block_id: Option<BlockId>,
) -> RpcResult<Vec<TraceResults>>;
/// Traces a call to `eth_sendRawTransaction` without making the call, returning the traces.
///
/// Expects a raw transaction data
#[method(name = "rawTransaction")]
async fn trace_raw_transaction(
&self,
data: Bytes,
trace_types: HashSet<TraceType>,
block_id: Option<BlockId>,
) -> RpcResult<TraceResults>;
/// Replays all transactions in a block returning the requested traces for each transaction.
#[method(name = "replayBlockTransactions")]
async fn replay_block_transactions(
&self,
block_id: BlockId,
trace_types: HashSet<TraceType>,
) -> RpcResult<Option<Vec<TraceResultsWithTransactionHash>>>;
/// Replays a transaction, returning the traces.
#[method(name = "replayTransaction")]
async fn replay_transaction(
&self,
transaction: B256,
trace_types: HashSet<TraceType>,
) -> RpcResult<TraceResults>;
/// Returns traces created at given block.
#[method(name = "block")]
async fn trace_block(
&self,
block_id: BlockId,
) -> RpcResult<Option<Vec<LocalizedTransactionTrace>>>;
/// Returns traces matching given filter.
///
/// This is similar to `eth_getLogs` but for traces.
#[method(name = "filter")]
async fn trace_filter(&self, filter: TraceFilter) -> RpcResult<Vec<LocalizedTransactionTrace>>;
/// Returns transaction trace at given index.
///
/// `indices` represent the index positions of the traces.
///
/// Note: This expects a list of indices but only one is supported since this function returns a
/// single [`LocalizedTransactionTrace`].
#[method(name = "get")]
async fn trace_get(
&self,
hash: B256,
indices: Vec<Index>,
) -> RpcResult<Option<LocalizedTransactionTrace>>;
/// Returns all traces of given transaction.
#[method(name = "transaction")]
async fn trace_transaction(
&self,
hash: B256,
) -> RpcResult<Option<Vec<LocalizedTransactionTrace>>>;
/// Returns all opcodes with their count and combined gas usage for the given transaction in no
/// particular order.
#[method(name = "transactionOpcodeGas")]
async fn trace_transaction_opcode_gas(
&self,
tx_hash: B256,
) -> RpcResult<Option<TransactionOpcodeGas>>;
/// Returns the opcodes of all transactions in the given block.
///
/// This is the same as `trace_transactionOpcodeGas` but for all transactions in a block.
#[method(name = "blockOpcodeGas")]
async fn trace_block_opcode_gas(&self, block_id: BlockId) -> RpcResult<Option<BlockOpcodeGas>>;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-api/src/web3.rs | crates/rpc/rpc-api/src/web3.rs | use alloy_primitives::{Bytes, B256};
use jsonrpsee::{core::RpcResult, proc_macros::rpc};
/// Web3 rpc interface.
#[cfg_attr(not(feature = "client"), rpc(server, namespace = "web3"))]
#[cfg_attr(feature = "client", rpc(server, client, namespace = "web3"))]
pub trait Web3Api {
/// Returns current client version.
#[method(name = "clientVersion")]
async fn client_version(&self) -> RpcResult<String>;
/// Returns sha3 of the given data.
#[method(name = "sha3")]
fn sha3(&self, input: Bytes) -> RpcResult<B256>;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-api/src/debug.rs | crates/rpc/rpc-api/src/debug.rs | use alloy_eips::{BlockId, BlockNumberOrTag};
use alloy_genesis::ChainConfig;
use alloy_json_rpc::RpcObject;
use alloy_primitives::{Address, Bytes, B256};
use alloy_rpc_types_debug::ExecutionWitness;
use alloy_rpc_types_eth::{Block, Bundle, StateContext};
use alloy_rpc_types_trace::geth::{
BlockTraceResult, GethDebugTracingCallOptions, GethDebugTracingOptions, GethTrace, TraceResult,
};
use jsonrpsee::{core::RpcResult, proc_macros::rpc};
use reth_trie_common::{updates::TrieUpdates, HashedPostState};
/// Debug rpc interface.
#[cfg_attr(not(feature = "client"), rpc(server, namespace = "debug"))]
#[cfg_attr(feature = "client", rpc(server, client, namespace = "debug"))]
pub trait DebugApi<TxReq: RpcObject> {
/// Returns an RLP-encoded header.
#[method(name = "getRawHeader")]
async fn raw_header(&self, block_id: BlockId) -> RpcResult<Bytes>;
/// Returns an RLP-encoded block.
#[method(name = "getRawBlock")]
async fn raw_block(&self, block_id: BlockId) -> RpcResult<Bytes>;
/// Returns a EIP-2718 binary-encoded transaction.
///
/// If this is a pooled EIP-4844 transaction, the blob sidecar is included.
#[method(name = "getRawTransaction")]
async fn raw_transaction(&self, hash: B256) -> RpcResult<Option<Bytes>>;
/// Returns an array of EIP-2718 binary-encoded transactions for the given [`BlockId`].
#[method(name = "getRawTransactions")]
async fn raw_transactions(&self, block_id: BlockId) -> RpcResult<Vec<Bytes>>;
/// Returns an array of EIP-2718 binary-encoded receipts.
#[method(name = "getRawReceipts")]
async fn raw_receipts(&self, block_id: BlockId) -> RpcResult<Vec<Bytes>>;
/// Returns an array of recent bad blocks that the client has seen on the network.
#[method(name = "getBadBlocks")]
async fn bad_blocks(&self) -> RpcResult<Vec<Block>>;
/// Returns the structured logs created during the execution of EVM between two blocks
/// (excluding start) as a JSON object.
#[method(name = "traceChain")]
async fn debug_trace_chain(
&self,
start_exclusive: BlockNumberOrTag,
end_inclusive: BlockNumberOrTag,
) -> RpcResult<Vec<BlockTraceResult>>;
/// The `debug_traceBlock` method will return a full stack trace of all invoked opcodes of all
/// transaction that were included in this block.
///
/// This expects an rlp encoded block
///
/// Note, the parent of this block must be present, or it will fail. For the second parameter
/// see [`GethDebugTracingOptions`] reference.
#[method(name = "traceBlock")]
async fn debug_trace_block(
&self,
rlp_block: Bytes,
opts: Option<GethDebugTracingOptions>,
) -> RpcResult<Vec<TraceResult>>;
/// Similar to `debug_traceBlock`, `debug_traceBlockByHash` accepts a block hash and will replay
/// the block that is already present in the database. For the second parameter see
/// [`GethDebugTracingOptions`].
#[method(name = "traceBlockByHash")]
async fn debug_trace_block_by_hash(
&self,
block: B256,
opts: Option<GethDebugTracingOptions>,
) -> RpcResult<Vec<TraceResult>>;
/// Similar to `debug_traceBlockByHash`, `debug_traceBlockByNumber` accepts a block number
/// [`BlockNumberOrTag`] and will replay the block that is already present in the database.
/// For the second parameter see [`GethDebugTracingOptions`].
#[method(name = "traceBlockByNumber")]
async fn debug_trace_block_by_number(
&self,
block: BlockNumberOrTag,
opts: Option<GethDebugTracingOptions>,
) -> RpcResult<Vec<TraceResult>>;
/// The `debug_traceTransaction` debugging method will attempt to run the transaction in the
/// exact same manner as it was executed on the network. It will replay any transaction that
/// may have been executed prior to this one before it will finally attempt to execute the
/// transaction that corresponds to the given hash.
#[method(name = "traceTransaction")]
async fn debug_trace_transaction(
&self,
tx_hash: B256,
opts: Option<GethDebugTracingOptions>,
) -> RpcResult<GethTrace>;
/// The `debug_traceCall` method lets you run an `eth_call` within the context of the given
/// block execution using the final state of parent block as the base.
///
/// The first argument (just as in `eth_call`) is a transaction request.
/// The block can optionally be specified either by hash or by number as
/// the second argument.
/// The trace can be configured similar to `debug_traceTransaction`,
/// see [`GethDebugTracingOptions`]. The method returns the same output as
/// `debug_traceTransaction`.
#[method(name = "traceCall")]
async fn debug_trace_call(
&self,
request: TxReq,
block_id: Option<BlockId>,
opts: Option<GethDebugTracingCallOptions>,
) -> RpcResult<GethTrace>;
/// The `debug_traceCallMany` method lets you run an `eth_callMany` within the context of the
/// given block execution using the final state of parent block as the base followed by n
/// transactions.
///
/// The first argument is a list of bundles. Each bundle can overwrite the block headers. This
/// will affect all transaction in that bundle.
/// `BlockNumber` and `transaction_index` are optional. `Transaction_index`
/// specifies the number of tx in the block to replay and -1 means all transactions should be
/// replayed.
/// The trace can be configured similar to `debug_traceTransaction`.
/// State override apply to all bundles.
///
/// This methods is similar to many `eth_callMany`, hence this returns nested lists of traces.
/// Where the length of the outer list is the number of bundles and the length of the inner list
/// (`Vec<GethTrace>`) is the number of transactions in the bundle.
#[method(name = "traceCallMany")]
async fn debug_trace_call_many(
&self,
bundles: Vec<Bundle<TxReq>>,
state_context: Option<StateContext>,
opts: Option<GethDebugTracingCallOptions>,
) -> RpcResult<Vec<Vec<GethTrace>>>;
/// The `debug_executionWitness` method allows for re-execution of a block with the purpose of
/// generating an execution witness. The witness comprises of a map of all hashed trie nodes
/// to their preimages that were required during the execution of the block, including during
/// state root recomputation.
///
/// The first argument is the block number or tag.
#[method(name = "executionWitness")]
async fn debug_execution_witness(&self, block: BlockNumberOrTag)
-> RpcResult<ExecutionWitness>;
/// The `debug_executionWitnessByBlockHash` method allows for re-execution of a block with the
/// purpose of generating an execution witness. The witness comprises of a map of all hashed
/// trie nodes to their preimages that were required during the execution of the block,
/// including during state root recomputation.
///
/// The first argument is the block hash.
#[method(name = "executionWitnessByBlockHash")]
async fn debug_execution_witness_by_block_hash(
&self,
hash: B256,
) -> RpcResult<ExecutionWitness>;
/// Sets the logging backtrace location. When a backtrace location is set and a log message is
/// emitted at that location, the stack of the goroutine executing the log statement will
/// be printed to stderr.
#[method(name = "backtraceAt")]
async fn debug_backtrace_at(&self, location: &str) -> RpcResult<()>;
/// Enumerates all accounts at a given block with paging capability. `maxResults` are returned
/// in the page and the items have keys that come after the `start` key (hashed address).
///
/// If incompletes is false, then accounts for which the key preimage (i.e: the address) doesn't
/// exist in db are skipped. NB: geth by default does not store preimages.
#[method(name = "accountRange")]
async fn debug_account_range(
&self,
block_number: BlockNumberOrTag,
start: Bytes,
max_results: u64,
nocode: bool,
nostorage: bool,
incompletes: bool,
) -> RpcResult<()>;
/// Turns on block profiling for the given duration and writes profile data to disk. It uses a
/// profile rate of 1 for most accurate information. If a different rate is desired, set the
/// rate and write the profile manually using `debug_writeBlockProfile`.
#[method(name = "blockProfile")]
async fn debug_block_profile(&self, file: String, seconds: u64) -> RpcResult<()>;
/// Flattens the entire key-value database into a single level, removing all unused slots and
/// merging all keys.
#[method(name = "chaindbCompact")]
async fn debug_chaindb_compact(&self) -> RpcResult<()>;
/// Returns the current chain config.
#[method(name = "chainConfig")]
async fn debug_chain_config(&self) -> RpcResult<ChainConfig>;
/// Returns leveldb properties of the key-value database.
#[method(name = "chaindbProperty")]
async fn debug_chaindb_property(&self, property: String) -> RpcResult<()>;
/// Returns the code associated with a given hash at the specified block ID.
/// If no block ID is provided, it defaults to the latest block.
#[method(name = "codeByHash")]
async fn debug_code_by_hash(
&self,
hash: B256,
block_id: Option<BlockId>,
) -> RpcResult<Option<Bytes>>;
/// Turns on CPU profiling for the given duration and writes profile data to disk.
#[method(name = "cpuProfile")]
async fn debug_cpu_profile(&self, file: String, seconds: u64) -> RpcResult<()>;
/// Retrieves an ancient binary blob from the freezer. The freezer is a collection of
/// append-only immutable files. The first argument `kind` specifies which table to look up data
/// from. The list of all table kinds are as follows:
#[method(name = "dbAncient")]
async fn debug_db_ancient(&self, kind: String, number: u64) -> RpcResult<()>;
/// Returns the number of ancient items in the ancient store.
#[method(name = "dbAncients")]
async fn debug_db_ancients(&self) -> RpcResult<()>;
/// Returns the raw value of a key stored in the database.
#[method(name = "dbGet")]
async fn debug_db_get(&self, key: String) -> RpcResult<()>;
/// Retrieves the state that corresponds to the block number and returns a list of accounts
/// (including storage and code).
#[method(name = "dumpBlock")]
async fn debug_dump_block(&self, number: BlockId) -> RpcResult<()>;
/// Forces garbage collection.
#[method(name = "freeOSMemory")]
async fn debug_free_os_memory(&self) -> RpcResult<()>;
/// Forces a temporary client freeze, normally when the server is overloaded.
#[method(name = "freezeClient")]
async fn debug_freeze_client(&self, node: String) -> RpcResult<()>;
/// Returns garbage collection statistics.
#[method(name = "gcStats")]
async fn debug_gc_stats(&self) -> RpcResult<()>;
/// Returns the first number where the node has accessible state on disk. This is the
/// post-state of that block and the pre-state of the next block. The (from, to) parameters
/// are the sequence of blocks to search, which can go either forwards or backwards.
///
/// Note: to get the last state pass in the range of blocks in reverse, i.e. (last, first).
#[method(name = "getAccessibleState")]
async fn debug_get_accessible_state(
&self,
from: BlockNumberOrTag,
to: BlockNumberOrTag,
) -> RpcResult<()>;
/// Returns all accounts that have changed between the two blocks specified. A change is defined
/// as a difference in nonce, balance, code hash, or storage hash. With one parameter, returns
/// the list of accounts modified in the specified block.
#[method(name = "getModifiedAccountsByHash")]
async fn debug_get_modified_accounts_by_hash(
&self,
start_hash: B256,
end_hash: B256,
) -> RpcResult<()>;
/// Returns all accounts that have changed between the two blocks specified. A change is defined
/// as a difference in nonce, balance, code hash or storage hash.
#[method(name = "getModifiedAccountsByNumber")]
async fn debug_get_modified_accounts_by_number(
&self,
start_number: u64,
end_number: u64,
) -> RpcResult<()>;
/// Turns on Go runtime tracing for the given duration and writes trace data to disk.
#[method(name = "goTrace")]
async fn debug_go_trace(&self, file: String, seconds: u64) -> RpcResult<()>;
/// Executes a block (bad- or canon- or side-), and returns a list of intermediate roots: the
/// stateroot after each transaction.
#[method(name = "intermediateRoots")]
async fn debug_intermediate_roots(
&self,
block_hash: B256,
opts: Option<GethDebugTracingCallOptions>,
) -> RpcResult<()>;
/// Returns detailed runtime memory statistics.
#[method(name = "memStats")]
async fn debug_mem_stats(&self) -> RpcResult<()>;
/// Turns on mutex profiling for `nsec` seconds and writes profile data to file. It uses a
/// profile rate of 1 for most accurate information. If a different rate is desired, set the
/// rate and write the profile manually.
#[method(name = "mutexProfile")]
async fn debug_mutex_profile(&self, file: String, nsec: u64) -> RpcResult<()>;
/// Returns the preimage for a sha3 hash, if known.
#[method(name = "preimage")]
async fn debug_preimage(&self, hash: B256) -> RpcResult<()>;
/// Retrieves a block and returns its pretty printed form.
#[method(name = "printBlock")]
async fn debug_print_block(&self, number: u64) -> RpcResult<()>;
/// Fetches and retrieves the seed hash of the block by number.
#[method(name = "seedHash")]
async fn debug_seed_hash(&self, number: u64) -> RpcResult<B256>;
/// Sets the rate (in samples/sec) of goroutine block profile data collection. A non-zero rate
/// enables block profiling, setting it to zero stops the profile. Collected profile data can be
/// written using `debug_writeBlockProfile`.
#[method(name = "setBlockProfileRate")]
async fn debug_set_block_profile_rate(&self, rate: u64) -> RpcResult<()>;
/// Sets the garbage collection target percentage. A negative value disables garbage collection.
#[method(name = "setGCPercent")]
async fn debug_set_gc_percent(&self, v: i32) -> RpcResult<()>;
/// Sets the current head of the local chain by block number. Note, this is a destructive action
/// and may severely damage your chain. Use with extreme caution.
#[method(name = "setHead")]
async fn debug_set_head(&self, number: u64) -> RpcResult<()>;
/// Sets the rate of mutex profiling.
#[method(name = "setMutexProfileFraction")]
async fn debug_set_mutex_profile_fraction(&self, rate: i32) -> RpcResult<()>;
/// Configures how often in-memory state tries are persisted to disk. The interval needs to be
/// in a format parsable by a time.Duration. Note that the interval is not wall-clock time.
/// Rather it is accumulated block processing time after which the state should be flushed.
#[method(name = "setTrieFlushInterval")]
async fn debug_set_trie_flush_interval(&self, interval: String) -> RpcResult<()>;
/// Returns a printed representation of the stacks of all goroutines.
#[method(name = "stacks")]
async fn debug_stacks(&self) -> RpcResult<()>;
/// Used to obtain info about a block.
#[method(name = "standardTraceBadBlockToFile")]
async fn debug_standard_trace_bad_block_to_file(
&self,
block: BlockNumberOrTag,
opts: Option<GethDebugTracingCallOptions>,
) -> RpcResult<()>;
/// This method is similar to `debug_standardTraceBlockToFile`, but can be used to obtain info
/// about a block which has been rejected as invalid (for some reason).
#[method(name = "standardTraceBlockToFile")]
async fn debug_standard_trace_block_to_file(
&self,
block: BlockNumberOrTag,
opts: Option<GethDebugTracingCallOptions>,
) -> RpcResult<()>;
/// Turns on CPU profiling indefinitely, writing to the given file.
#[method(name = "startCPUProfile")]
async fn debug_start_cpu_profile(&self, file: String) -> RpcResult<()>;
/// Starts writing a Go runtime trace to the given file.
#[method(name = "startGoTrace")]
async fn debug_start_go_trace(&self, file: String) -> RpcResult<()>;
/// Returns the state root of the `HashedPostState` on top of the state for the given block with
/// trie updates.
#[method(name = "stateRootWithUpdates")]
async fn debug_state_root_with_updates(
&self,
hashed_state: HashedPostState,
block_id: Option<BlockId>,
) -> RpcResult<(B256, TrieUpdates)>;
/// Stops an ongoing CPU profile.
#[method(name = "stopCPUProfile")]
async fn debug_stop_cpu_profile(&self) -> RpcResult<()>;
/// Stops writing the Go runtime trace.
#[method(name = "stopGoTrace")]
async fn debug_stop_go_trace(&self) -> RpcResult<()>;
/// Returns the storage at the given block height and transaction index. The result can be
/// paged by providing a `maxResult` to cap the number of storage slots returned as well as
/// specifying the offset via `keyStart` (hash of storage key).
#[method(name = "storageRangeAt")]
async fn debug_storage_range_at(
&self,
block_hash: B256,
tx_idx: usize,
contract_address: Address,
key_start: B256,
max_result: u64,
) -> RpcResult<()>;
/// Returns the structured logs created during the execution of EVM against a block pulled
/// from the pool of bad ones and returns them as a JSON object. For the second parameter see
/// `TraceConfig` reference.
#[method(name = "traceBadBlock")]
async fn debug_trace_bad_block(
&self,
block_hash: B256,
opts: Option<GethDebugTracingCallOptions>,
) -> RpcResult<()>;
/// Sets the logging verbosity ceiling. Log messages with level up to and including the given
/// level will be printed.
#[method(name = "verbosity")]
async fn debug_verbosity(&self, level: usize) -> RpcResult<()>;
/// Sets the logging verbosity pattern.
#[method(name = "vmodule")]
async fn debug_vmodule(&self, pattern: String) -> RpcResult<()>;
/// Writes a goroutine blocking profile to the given file.
#[method(name = "writeBlockProfile")]
async fn debug_write_block_profile(&self, file: String) -> RpcResult<()>;
/// Writes an allocation profile to the given file.
#[method(name = "writeMemProfile")]
async fn debug_write_mem_profile(&self, file: String) -> RpcResult<()>;
/// Writes a goroutine blocking profile to the given file.
#[method(name = "writeMutexProfile")]
async fn debug_write_mutex_profile(&self, file: String) -> RpcResult<()>;
}
/// An extension to the `debug_` namespace that provides additional methods for retrieving
/// witnesses.
///
/// This is separate from the regular `debug_` api, because this depends on the network specific
/// params. For optimism this will expect the optimism specific payload attributes
#[cfg_attr(not(feature = "client"), rpc(server, namespace = "debug"))]
#[cfg_attr(feature = "client", rpc(server, client, namespace = "debug"))]
pub trait DebugExecutionWitnessApi<Attributes> {
/// The `debug_executePayload` method allows for re-execution of a group of transactions with
/// the purpose of generating an execution witness. The witness comprises of a map of all
/// hashed trie nodes to their preimages that were required during the execution of the block,
/// including during state root recomputation.
///
/// The first argument is the parent block hash. The second argument is the payload
/// attributes for the new block.
#[method(name = "executePayload")]
async fn execute_payload(
&self,
parent_block_hash: B256,
attributes: Attributes,
) -> RpcResult<ExecutionWitness>;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-api/src/admin.rs | crates/rpc/rpc-api/src/admin.rs | use alloy_rpc_types_admin::{NodeInfo, PeerInfo};
use jsonrpsee::{core::RpcResult, proc_macros::rpc};
use reth_network_peers::{AnyNode, NodeRecord};
/// Admin namespace rpc interface that gives access to several non-standard RPC methods.
#[cfg_attr(not(feature = "client"), rpc(server, namespace = "admin"))]
#[cfg_attr(feature = "client", rpc(server, client, namespace = "admin"))]
pub trait AdminApi {
/// Adds the given node record to the peerset.
#[method(name = "addPeer")]
fn add_peer(&self, record: NodeRecord) -> RpcResult<bool>;
/// Disconnects from a remote node if the connection exists.
///
/// Returns true if the peer was successfully removed.
#[method(name = "removePeer")]
fn remove_peer(&self, record: AnyNode) -> RpcResult<bool>;
/// Adds the given node record to the trusted peerset.
#[method(name = "addTrustedPeer")]
fn add_trusted_peer(&self, record: AnyNode) -> RpcResult<bool>;
/// Removes a remote node from the trusted peer set, but it does not disconnect it
/// automatically.
///
/// Returns true if the peer was successfully removed.
#[method(name = "removeTrustedPeer")]
fn remove_trusted_peer(&self, record: AnyNode) -> RpcResult<bool>;
/// The peers administrative property can be queried for all the information known about the
/// connected remote nodes at the networking granularity. These include general information
/// about the nodes themselves as participants of the devp2p P2P overlay protocol, as well as
/// specialized information added by each of the running application protocols
#[method(name = "peers")]
async fn peers(&self) -> RpcResult<Vec<PeerInfo>>;
/// Creates an RPC subscription which serves events received from the network.
#[subscription(
name = "peerEvents",
unsubscribe = "peerEvents_unsubscribe",
item = String
)]
async fn subscribe_peer_events(&self) -> jsonrpsee::core::SubscriptionResult;
/// Returns the ENR of the node.
#[method(name = "nodeInfo")]
async fn node_info(&self) -> RpcResult<NodeInfo>;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-api/src/rpc.rs | crates/rpc/rpc-api/src/rpc.rs | use alloy_rpc_types::RpcModules;
use jsonrpsee::{core::RpcResult, proc_macros::rpc};
/// RPC namespace, used to find the versions of all rpc modules
#[cfg_attr(not(feature = "client"), rpc(server, namespace = "rpc"))]
#[cfg_attr(feature = "client", rpc(server, client, namespace = "rpc"))]
pub trait RpcApi {
/// Lists enabled APIs and the version of each.
#[method(name = "modules")]
fn rpc_modules(&self) -> RpcResult<RpcModules>;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-api/src/anvil.rs | crates/rpc/rpc-api/src/anvil.rs | use jsonrpsee::{core::RpcResult, proc_macros::rpc};
use alloy_primitives::{Address, Bytes, B256, U256};
use alloy_rpc_types_anvil::{Forking, Metadata, MineOptions, NodeInfo};
use alloy_rpc_types_eth::Block;
/// Anvil rpc interface.
/// https://book.getfoundry.sh/reference/anvil/#custom-methods
#[cfg_attr(not(feature = "client"), rpc(server, namespace = "anvil"))]
#[cfg_attr(feature = "client", rpc(server, client, namespace = "anvil"))]
pub trait AnvilApi {
/// Sends transactions impersonating specific account and contract addresses.
#[method(name = "impersonateAccount")]
async fn anvil_impersonate_account(&self, address: Address) -> RpcResult<()>;
/// Stops impersonating an account if previously set with `anvil_impersonateAccount`.
#[method(name = "stopImpersonatingAccount")]
async fn anvil_stop_impersonating_account(&self, address: Address) -> RpcResult<()>;
/// If set to true will make every account impersonated.
#[method(name = "autoImpersonateAccount")]
async fn anvil_auto_impersonate_account(&self, enabled: bool) -> RpcResult<()>;
/// Returns `true` if auto mining is enabled, and `false`.
#[method(name = "getAutomine")]
async fn anvil_get_automine(&self) -> RpcResult<bool>;
/// Mines a series of blocks.
#[method(name = "mine")]
async fn anvil_mine(&self, blocks: Option<U256>, interval: Option<U256>) -> RpcResult<()>;
/// Enables or disables, based on the single boolean argument, the automatic mining of new
/// blocks with each new transaction submitted to the network.
#[method(name = "setAutomine")]
async fn anvil_set_automine(&self, enabled: bool) -> RpcResult<()>;
/// Sets the mining behavior to interval with the given interval (seconds).
#[method(name = "setIntervalMining")]
async fn anvil_set_interval_mining(&self, interval: u64) -> RpcResult<()>;
/// Removes transactions from the pool.
#[method(name = "anvil_dropTransaction")]
async fn anvil_drop_transaction(&self, tx_hash: B256) -> RpcResult<Option<B256>>;
/// Resets the fork to a fresh forked state, and optionally update the fork config.
///
/// If `forking` is `None` then this will disable forking entirely.
#[method(name = "reset")]
async fn anvil_reset(&self, fork: Option<Forking>) -> RpcResult<()>;
/// Sets the backend rpc url.
#[method(name = "setRpcUrl")]
async fn anvil_set_rpc_url(&self, url: String) -> RpcResult<()>;
/// Modifies the balance of an account.
#[method(name = "setBalance")]
async fn anvil_set_balance(&self, address: Address, balance: U256) -> RpcResult<()>;
/// Sets the code of a contract.
#[method(name = "setCode")]
async fn anvil_set_code(&self, address: Address, code: Bytes) -> RpcResult<()>;
/// Sets the nonce of an address.
#[method(name = "setNonce")]
async fn anvil_set_nonce(&self, address: Address, nonce: U256) -> RpcResult<()>;
/// Writes a single slot of the account's storage.
#[method(name = "setStorageAt")]
async fn anvil_set_storage_at(
&self,
address: Address,
slot: U256,
value: B256,
) -> RpcResult<bool>;
/// Sets the coinbase address.
#[method(name = "setCoinbase")]
async fn anvil_set_coinbase(&self, address: Address) -> RpcResult<()>;
/// Sets the chain id.
#[method(name = "setChainId")]
async fn anvil_set_chain_id(&self, chain_id: u64) -> RpcResult<()>;
/// Enables or disable logging.
#[method(name = "setLoggingEnabled")]
async fn anvil_set_logging_enabled(&self, enabled: bool) -> RpcResult<()>;
/// Sets the minimum gas price for the node.
#[method(name = "setMinGasPrice")]
async fn anvil_set_min_gas_price(&self, gas_price: U256) -> RpcResult<()>;
/// Sets the base fee of the next block.
#[method(name = "setNextBlockBaseFeePerGas")]
async fn anvil_set_next_block_base_fee_per_gas(&self, base_fee: U256) -> RpcResult<()>;
/// Sets the minimum gas price for the node.
#[method(name = "setTime")]
async fn anvil_set_time(&self, timestamp: u64) -> RpcResult<u64>;
/// Creates a buffer that represents all state on the chain, which can be loaded to separate
/// process by calling `anvil_loadState`.
#[method(name = "dumpState")]
async fn anvil_dump_state(&self) -> RpcResult<Bytes>;
/// Append chain state buffer to current chain. Will overwrite any conflicting addresses or
/// storage.
#[method(name = "loadState")]
async fn anvil_load_state(&self, state: Bytes) -> RpcResult<bool>;
/// Retrieves the Anvil node configuration params.
#[method(name = "nodeInfo")]
async fn anvil_node_info(&self) -> RpcResult<NodeInfo>;
/// Retrieves metadata about the Anvil instance.
#[method(name = "metadata")]
async fn anvil_metadata(&self) -> RpcResult<Metadata>;
/// Snapshot the state of the blockchain at the current block.
#[method(name = "snapshot")]
async fn anvil_snapshot(&self) -> RpcResult<U256>;
/// Revert the state of the blockchain to a previous snapshot.
/// Takes a single parameter, which is the snapshot id to revert to.
#[method(name = "revert")]
async fn anvil_revert(&self, id: U256) -> RpcResult<bool>;
/// Jump forward in time by the given amount of time, in seconds.
#[method(name = "increaseTime")]
async fn anvil_increase_time(&self, seconds: U256) -> RpcResult<i64>;
/// Similar to `evm_increaseTime` but takes the exact timestamp that you want in the next block.
#[method(name = "setNextBlockTimestamp")]
async fn anvil_set_next_block_timestamp(&self, seconds: u64) -> RpcResult<()>;
/// Sets the next block gas limit.
#[method(name = "setBlockGasLimit")]
async fn anvil_set_block_gas_limit(&self, gas_limit: U256) -> RpcResult<bool>;
/// Sets an interval for the block timestamp.
#[method(name = "setBlockTimestampInterval")]
async fn anvil_set_block_timestamp_interval(&self, seconds: u64) -> RpcResult<()>;
/// Sets an interval for the block timestamp.
#[method(name = "removeBlockTimestampInterval")]
async fn anvil_remove_block_timestamp_interval(&self) -> RpcResult<bool>;
/// Mine blocks, instantly and return the mined blocks.
///
/// This will mine the blocks regardless of the configured mining mode.
///
/// **Note**: This behaves exactly as `evm_mine` but returns different output, for
/// compatibility reasons, this is a separate call since `evm_mine` is not an anvil original.
/// and `ganache` may change the `0x0` placeholder.
#[method(name = "mine_detailed")] // This method requires using `snake_case`.
async fn anvil_mine_detailed(&self, opts: Option<MineOptions>) -> RpcResult<Vec<Block>>;
/// Turn on call traces for transactions that are returned to the user when they execute a
/// transaction (instead of just txhash/receipt).
#[method(name = "enableTraces")]
async fn anvil_enable_traces(&self) -> RpcResult<()>;
/// Removes all transactions for that address from the transaction pool.
#[method(name = "removePoolTransactions")]
async fn anvil_remove_pool_transactions(&self, address: Address) -> RpcResult<()>;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-api/src/txpool.rs | crates/rpc/rpc-api/src/txpool.rs | use alloy_json_rpc::RpcObject;
use alloy_primitives::Address;
use alloy_rpc_types_txpool::{TxpoolContent, TxpoolContentFrom, TxpoolInspect, TxpoolStatus};
use jsonrpsee::{core::RpcResult, proc_macros::rpc};
/// Txpool rpc interface.
#[cfg_attr(not(feature = "client"), rpc(server, namespace = "txpool"))]
#[cfg_attr(feature = "client", rpc(server, client, namespace = "txpool"))]
pub trait TxPoolApi<T: RpcObject> {
/// Returns the number of transactions currently pending for inclusion in the next block(s), as
/// well as the ones that are being scheduled for future execution only.
///
/// See [here](https://geth.ethereum.org/docs/rpc/ns-txpool#txpool_status) for more details
#[method(name = "status")]
async fn txpool_status(&self) -> RpcResult<TxpoolStatus>;
/// Returns a summary of all the transactions currently pending for inclusion in the next
/// block(s), as well as the ones that are being scheduled for future execution only.
///
/// See [here](https://geth.ethereum.org/docs/rpc/ns-txpool#txpool_inspect) for more details
#[method(name = "inspect")]
async fn txpool_inspect(&self) -> RpcResult<TxpoolInspect>;
/// Retrieves the transactions contained within the txpool, returning pending as well as queued
/// transactions of this address, grouped by nonce.
///
/// See [here](https://geth.ethereum.org/docs/rpc/ns-txpool#txpool_contentFrom) for more details
#[method(name = "contentFrom")]
async fn txpool_content_from(&self, from: Address) -> RpcResult<TxpoolContentFrom<T>>;
/// Returns the details of all transactions currently pending for inclusion in the next
/// block(s), as well as the ones that are being scheduled for future execution only.
///
/// See [here](https://geth.ethereum.org/docs/rpc/ns-txpool#txpool_content) for more details
#[method(name = "content")]
async fn txpool_content(&self) -> RpcResult<TxpoolContent<T>>;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-api/src/otterscan.rs | crates/rpc/rpc-api/src/otterscan.rs | use alloy_eips::{eip1898::LenientBlockNumberOrTag, BlockId};
use alloy_json_rpc::RpcObject;
use alloy_primitives::{Address, Bytes, TxHash, B256};
use alloy_rpc_types_trace::otterscan::{
BlockDetails, ContractCreator, InternalOperation, OtsBlockTransactions, TraceEntry,
TransactionsWithReceipts,
};
use jsonrpsee::{core::RpcResult, proc_macros::rpc};
/// Otterscan rpc interface.
#[cfg_attr(not(feature = "client"), rpc(server, namespace = "ots"))]
#[cfg_attr(feature = "client", rpc(server, client, namespace = "ots"))]
pub trait Otterscan<T: RpcObject, H: RpcObject> {
/// Get the block header by block number, required by otterscan.
/// Otterscan currently requires this endpoint, used as:
///
/// 1. check if the node is Erigon or not
/// 2. get block header instead of the full block
///
/// Ref: <https://github.com/otterscan/otterscan/blob/071d8c55202badf01804f6f8d53ef9311d4a9e47/src/useProvider.ts#L71>
#[method(name = "getHeaderByNumber", aliases = ["erigon_getHeaderByNumber"])]
async fn get_header_by_number(
&self,
block_number: LenientBlockNumberOrTag,
) -> RpcResult<Option<H>>;
/// Check if a certain address contains a deployed code.
#[method(name = "hasCode")]
async fn has_code(&self, address: Address, block_id: Option<BlockId>) -> RpcResult<bool>;
/// Very simple API versioning scheme. Every time we add a new capability, the number is
/// incremented. This allows for Otterscan to check if the node contains all API it
/// needs.
#[method(name = "getApiLevel")]
async fn get_api_level(&self) -> RpcResult<u64>;
/// Return the internal ETH transfers inside a transaction.
#[method(name = "getInternalOperations")]
async fn get_internal_operations(&self, tx_hash: TxHash) -> RpcResult<Vec<InternalOperation>>;
/// Given a transaction hash, returns its raw revert reason.
#[method(name = "getTransactionError")]
async fn get_transaction_error(&self, tx_hash: TxHash) -> RpcResult<Option<Bytes>>;
/// Extract all variations of calls, contract creation and self-destructs and returns a call
/// tree.
#[method(name = "traceTransaction")]
async fn trace_transaction(&self, tx_hash: TxHash) -> RpcResult<Option<Vec<TraceEntry>>>;
/// Tailor-made and expanded version of `eth_getBlockByNumber` for block details page in
/// Otterscan.
#[method(name = "getBlockDetails")]
async fn get_block_details(
&self,
block_number: LenientBlockNumberOrTag,
) -> RpcResult<BlockDetails<H>>;
/// Tailor-made and expanded version of `eth_getBlockByHash` for block details page in
/// Otterscan.
#[method(name = "getBlockDetailsByHash")]
async fn get_block_details_by_hash(&self, block_hash: B256) -> RpcResult<BlockDetails<H>>;
/// Get paginated transactions for a certain block. Also remove some verbose fields like logs.
#[method(name = "getBlockTransactions")]
async fn get_block_transactions(
&self,
block_number: LenientBlockNumberOrTag,
page_number: usize,
page_size: usize,
) -> RpcResult<OtsBlockTransactions<T, H>>;
/// Gets paginated inbound/outbound transaction calls for a certain address.
#[method(name = "searchTransactionsBefore")]
async fn search_transactions_before(
&self,
address: Address,
block_number: LenientBlockNumberOrTag,
page_size: usize,
) -> RpcResult<TransactionsWithReceipts>;
/// Gets paginated inbound/outbound transaction calls for a certain address.
#[method(name = "searchTransactionsAfter")]
async fn search_transactions_after(
&self,
address: Address,
block_number: LenientBlockNumberOrTag,
page_size: usize,
) -> RpcResult<TransactionsWithReceipts>;
/// Gets the transaction hash for a certain sender address, given its nonce.
#[method(name = "getTransactionBySenderAndNonce")]
async fn get_transaction_by_sender_and_nonce(
&self,
sender: Address,
nonce: u64,
) -> RpcResult<Option<TxHash>>;
/// Gets the transaction hash and the address who created a contract.
#[method(name = "getContractCreator")]
async fn get_contract_creator(&self, address: Address) -> RpcResult<Option<ContractCreator>>;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-api/src/reth.rs | crates/rpc/rpc-api/src/reth.rs | use alloy_eips::BlockId;
use alloy_primitives::{Address, U256};
use jsonrpsee::{core::RpcResult, proc_macros::rpc};
use std::collections::HashMap;
// Required for the subscription attribute below
use reth_chain_state as _;
/// Reth API namespace for reth-specific methods
#[cfg_attr(not(feature = "client"), rpc(server, namespace = "reth"))]
#[cfg_attr(feature = "client", rpc(server, client, namespace = "reth"))]
pub trait RethApi {
/// Returns all ETH balance changes in a block
#[method(name = "getBalanceChangesInBlock")]
async fn reth_get_balance_changes_in_block(
&self,
block_id: BlockId,
) -> RpcResult<HashMap<Address, U256>>;
/// Subscribe to json `ChainNotifications`
#[subscription(
name = "subscribeChainNotifications",
unsubscribe = "unsubscribeChainNotifications",
item = reth_chain_state::CanonStateNotification
)]
async fn reth_subscribe_chain_notifications(&self) -> jsonrpsee::core::SubscriptionResult;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-api/src/net.rs | crates/rpc/rpc-api/src/net.rs | use alloy_primitives::U64;
use jsonrpsee::{core::RpcResult, proc_macros::rpc};
/// Net rpc interface.
#[cfg_attr(not(feature = "client"), rpc(server, namespace = "net"))]
#[cfg_attr(feature = "client", rpc(server, client, namespace = "net"))]
pub trait NetApi {
/// Returns the network ID.
#[method(name = "version")]
fn version(&self) -> RpcResult<String>;
/// Returns number of peers connected to node.
#[method(name = "peerCount")]
fn peer_count(&self) -> RpcResult<U64>;
/// Returns true if client is actively listening for network connections.
/// Otherwise false.
#[method(name = "listening")]
fn is_listening(&self) -> RpcResult<bool>;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-api/src/hardhat.rs | crates/rpc/rpc-api/src/hardhat.rs | use alloy_primitives::{Address, Bytes, B256, U256};
use alloy_rpc_types_anvil::{Forking, Metadata};
use jsonrpsee::{core::RpcResult, proc_macros::rpc};
/// Hardhat rpc interface.
/// https://hardhat.org/hardhat-network/docs/reference#hardhat-network-methods
#[cfg_attr(not(feature = "client"), rpc(server, namespace = "hardhat"))]
#[cfg_attr(feature = "client", rpc(server, client, namespace = "hardhat"))]
pub trait HardhatApi {
/// Removes the given transaction from the mempool, if it exists.
///
/// Returns `true` if successful, otherwise `false`.
#[method(name = "hardhat_dropTransaction")]
async fn hardhat_drop_transaction(&self, tx_hash: B256) -> RpcResult<bool>;
/// Allows Hardhat Network to sign transactions as the given address.
#[method(name = "impersonateAccount")]
async fn hardhat_impersonate_account(&self, address: Address) -> RpcResult<()>;
/// Returns `true` if automatic mining is enabled, and `false` otherwise.
#[method(name = "getAutomine")]
async fn hardhat_get_automine(&self) -> RpcResult<bool>;
/// Returns an object with metadata about the instance of the Hardhat network.
#[method(name = "metadata")]
async fn hardhat_metadata(&self) -> RpcResult<Metadata>;
/// Mines a specified number of blocks at a given interval.
#[method(name = "mine")]
async fn hardhat_mine(&self, blocks: Option<U256>, interval: Option<U256>) -> RpcResult<()>;
/// Resets back to a fresh forked state, fork from another block number or disable forking.
#[method(name = "reset")]
async fn hardhat_reset(&self, fork: Option<Forking>) -> RpcResult<()>;
/// Sets the balance for the given address.
#[method(name = "setBalance")]
async fn hardhat_set_balance(&self, address: Address, balance: U256) -> RpcResult<()>;
/// Modifies the bytecode stored at an account's address.
#[method(name = "setCode")]
async fn hardhat_set_code(&self, address: Address, code: Bytes) -> RpcResult<()>;
/// Sets the coinbase address to be used in new blocks.
#[method(name = "setCoinbase")]
async fn hardhat_set_coinbase(&self, address: Address) -> RpcResult<()>;
/// Enables or disables logging.
#[method(name = "setLoggingEnabled")]
async fn hardhat_set_logging_enabled(&self, enabled: bool) -> RpcResult<()>;
/// Changes the minimum gas price accepted by the network (in wei).
#[method(name = "setMinGasPrice")]
async fn hardhat_set_min_gas_price(&self, gas_price: U256) -> RpcResult<()>;
/// Sets the base fee of the next block.
#[method(name = "setNextBlockBaseFeePerGas")]
async fn hardhat_set_next_block_base_fee_per_gas(
&self,
base_fee_per_gas: U256,
) -> RpcResult<()>;
/// Sets the `PREVRANDAO` value of the next block.
#[method(name = "setPrevRandao")]
async fn hardhat_set_prev_randao(&self, prev_randao: B256) -> RpcResult<()>;
/// Modifies an account's nonce by overwriting it.
#[method(name = "setNonce")]
async fn hardhat_set_nonce(&self, address: Address, nonce: U256) -> RpcResult<()>;
/// Writes a single position of an account's storage.
#[method(name = "setStorageAt")]
async fn hardhat_set_storage_at(
&self,
address: Address,
slot: U256,
value: B256,
) -> RpcResult<()>;
/// Stops impersonating the given address.
#[method(name = "stopImpersonatingAccount")]
async fn hardhat_stop_impersonating_account(&self, address: Address) -> RpcResult<()>;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-convert/src/lib.rs | crates/rpc/rpc-convert/src/lib.rs | //! Reth compatibility and utils for RPC types
//!
//! This crate various helper functions to convert between reth primitive types and rpc types.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
pub mod block;
mod fees;
pub mod receipt;
mod rpc;
pub mod transaction;
pub use block::TryFromBlockResponse;
pub use fees::{CallFees, CallFeesError};
pub use receipt::TryFromReceiptResponse;
pub use rpc::*;
pub use transaction::{
EthTxEnvError, IntoRpcTx, RpcConvert, RpcConverter, TransactionConversionError,
TryFromTransactionResponse, TryIntoSimTx, TxInfoMapper,
};
#[cfg(feature = "op")]
pub use transaction::op::*;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-convert/src/block.rs | crates/rpc/rpc-convert/src/block.rs | //! Conversion traits for block responses to primitive block types.
use alloy_network::Network;
use std::convert::Infallible;
/// Trait for converting network block responses to primitive block types.
pub trait TryFromBlockResponse<N: Network> {
/// The error type returned if the conversion fails.
type Error: core::error::Error + Send + Sync + Unpin;
/// Converts a network block response to a primitive block type.
///
/// # Returns
///
/// Returns `Ok(Self)` on successful conversion, or `Err(Self::Error)` if the conversion fails.
fn from_block_response(block_response: N::BlockResponse) -> Result<Self, Self::Error>
where
Self: Sized;
}
impl<N: Network, T> TryFromBlockResponse<N> for alloy_consensus::Block<T>
where
N::BlockResponse: Into<Self>,
{
type Error = Infallible;
fn from_block_response(block_response: N::BlockResponse) -> Result<Self, Self::Error> {
Ok(block_response.into())
}
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_consensus::{Block, TxEnvelope};
use alloy_network::Ethereum;
use alloy_rpc_types_eth::BlockTransactions;
#[test]
fn test_try_from_block_response() {
let rpc_block: alloy_rpc_types_eth::Block =
alloy_rpc_types_eth::Block::new(Default::default(), BlockTransactions::Full(vec![]));
let result =
<Block<TxEnvelope> as TryFromBlockResponse<Ethereum>>::from_block_response(rpc_block);
assert!(result.is_ok());
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-convert/src/receipt.rs | crates/rpc/rpc-convert/src/receipt.rs | //! Conversion traits for receipt responses to primitive receipt types.
use alloy_network::Network;
use std::convert::Infallible;
/// Trait for converting network receipt responses to primitive receipt types.
pub trait TryFromReceiptResponse<N: Network> {
/// The error type returned if the conversion fails.
type Error: core::error::Error + Send + Sync + Unpin;
/// Converts a network receipt response to a primitive receipt type.
///
/// # Returns
///
/// Returns `Ok(Self)` on successful conversion, or `Err(Self::Error)` if the conversion fails.
fn from_receipt_response(receipt_response: N::ReceiptResponse) -> Result<Self, Self::Error>
where
Self: Sized;
}
impl TryFromReceiptResponse<alloy_network::Ethereum> for reth_ethereum_primitives::Receipt {
type Error = Infallible;
fn from_receipt_response(
receipt_response: alloy_rpc_types_eth::TransactionReceipt,
) -> Result<Self, Self::Error> {
Ok(receipt_response.into_inner().into())
}
}
#[cfg(feature = "op")]
impl TryFromReceiptResponse<op_alloy_network::Optimism> for reth_optimism_primitives::OpReceipt {
type Error = Infallible;
fn from_receipt_response(
receipt_response: op_alloy_rpc_types::OpTransactionReceipt,
) -> Result<Self, Self::Error> {
Ok(receipt_response.inner.inner.map_logs(Into::into).into())
}
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_consensus::ReceiptEnvelope;
use alloy_network::Ethereum;
use reth_ethereum_primitives::Receipt;
#[test]
fn test_try_from_receipt_response() {
let rpc_receipt = alloy_rpc_types_eth::TransactionReceipt {
inner: ReceiptEnvelope::Eip1559(Default::default()),
transaction_hash: Default::default(),
transaction_index: None,
block_hash: None,
block_number: None,
gas_used: 0,
effective_gas_price: 0,
blob_gas_used: None,
blob_gas_price: None,
from: Default::default(),
to: None,
contract_address: None,
};
let result =
<Receipt as TryFromReceiptResponse<Ethereum>>::from_receipt_response(rpc_receipt);
assert!(result.is_ok());
}
#[cfg(feature = "op")]
#[test]
fn test_try_from_receipt_response_optimism() {
use op_alloy_consensus::OpReceiptEnvelope;
use op_alloy_network::Optimism;
use op_alloy_rpc_types::OpTransactionReceipt;
use reth_optimism_primitives::OpReceipt;
let op_receipt = OpTransactionReceipt {
inner: alloy_rpc_types_eth::TransactionReceipt {
inner: OpReceiptEnvelope::Eip1559(Default::default()),
transaction_hash: Default::default(),
transaction_index: None,
block_hash: None,
block_number: None,
gas_used: 0,
effective_gas_price: 0,
blob_gas_used: None,
blob_gas_price: None,
from: Default::default(),
to: None,
contract_address: None,
},
l1_block_info: Default::default(),
};
let result =
<OpReceipt as TryFromReceiptResponse<Optimism>>::from_receipt_response(op_receipt);
assert!(result.is_ok());
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-convert/src/rpc.rs | crates/rpc/rpc-convert/src/rpc.rs | use std::{fmt::Debug, future::Future};
use alloy_consensus::{EthereumTxEnvelope, SignableTransaction, TxEip4844};
use alloy_json_rpc::RpcObject;
use alloy_network::{
primitives::HeaderResponse, Network, ReceiptResponse, TransactionResponse, TxSigner,
};
use alloy_primitives::Signature;
use alloy_rpc_types_eth::TransactionRequest;
/// RPC types used by the `eth_` RPC API.
///
/// This is a subset of [`Network`] trait with only RPC response types kept.
pub trait RpcTypes: Send + Sync + Clone + Unpin + Debug + 'static {
/// Header response type.
type Header: RpcObject + HeaderResponse;
/// Receipt response type.
type Receipt: RpcObject + ReceiptResponse;
/// Transaction response type.
type TransactionResponse: RpcObject + TransactionResponse;
/// Transaction response type.
type TransactionRequest: RpcObject + AsRef<TransactionRequest> + AsMut<TransactionRequest>;
}
impl<T> RpcTypes for T
where
T: Network<TransactionRequest: AsRef<TransactionRequest> + AsMut<TransactionRequest>> + Unpin,
{
type Header = T::HeaderResponse;
type Receipt = T::ReceiptResponse;
type TransactionResponse = T::TransactionResponse;
type TransactionRequest = T::TransactionRequest;
}
/// Adapter for network specific transaction response.
pub type RpcTransaction<T> = <T as RpcTypes>::TransactionResponse;
/// Adapter for network specific receipt response.
pub type RpcReceipt<T> = <T as RpcTypes>::Receipt;
/// Adapter for network specific header response.
pub type RpcHeader<T> = <T as RpcTypes>::Header;
/// Adapter for network specific block type.
pub type RpcBlock<T> = alloy_rpc_types_eth::Block<RpcTransaction<T>, RpcHeader<T>>;
/// Adapter for network specific transaction request.
pub type RpcTxReq<T> = <T as RpcTypes>::TransactionRequest;
/// Error for [`SignableTxRequest`] trait.
#[derive(Debug, thiserror::Error)]
pub enum SignTxRequestError {
/// The transaction request is invalid.
#[error("invalid transaction request")]
InvalidTransactionRequest,
/// The signer is not supported.
#[error(transparent)]
SignerNotSupported(#[from] alloy_signer::Error),
}
/// An abstraction over transaction requests that can be signed.
pub trait SignableTxRequest<T>: Send + Sync + 'static {
/// Attempts to build a transaction request and sign it with the given signer.
fn try_build_and_sign(
self,
signer: impl TxSigner<Signature> + Send,
) -> impl Future<Output = Result<T, SignTxRequestError>> + Send;
}
impl SignableTxRequest<EthereumTxEnvelope<TxEip4844>> for TransactionRequest {
async fn try_build_and_sign(
self,
signer: impl TxSigner<Signature> + Send,
) -> Result<EthereumTxEnvelope<TxEip4844>, SignTxRequestError> {
let mut tx =
self.build_typed_tx().map_err(|_| SignTxRequestError::InvalidTransactionRequest)?;
let signature = signer.sign_transaction(&mut tx).await?;
Ok(tx.into_signed(signature).into())
}
}
#[cfg(feature = "op")]
impl SignableTxRequest<op_alloy_consensus::OpTxEnvelope>
for op_alloy_rpc_types::OpTransactionRequest
{
async fn try_build_and_sign(
self,
signer: impl TxSigner<Signature> + Send,
) -> Result<op_alloy_consensus::OpTxEnvelope, SignTxRequestError> {
let mut tx =
self.build_typed_tx().map_err(|_| SignTxRequestError::InvalidTransactionRequest)?;
let signature = signer.sign_transaction(&mut tx).await?;
// sanity check
if tx.is_deposit() {
return Err(SignTxRequestError::InvalidTransactionRequest);
}
Ok(tx.into_signed(signature).into())
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-convert/src/transaction.rs | crates/rpc/rpc-convert/src/transaction.rs | //! Compatibility functions for rpc `Transaction` type.
use crate::{
fees::{CallFees, CallFeesError},
RpcHeader, RpcReceipt, RpcTransaction, RpcTxReq, RpcTypes,
};
use alloy_consensus::{
error::ValueError, transaction::Recovered, EthereumTxEnvelope, Sealable, TxEip4844,
};
use alloy_network::Network;
use alloy_primitives::{Address, TxKind, U256};
use alloy_rpc_types_eth::{
request::{TransactionInputError, TransactionRequest},
Transaction, TransactionInfo,
};
use core::error;
use reth_evm::{
revm::context_interface::{either::Either, Block},
ConfigureEvm, SpecFor, TxEnvFor,
};
use reth_primitives_traits::{
HeaderTy, NodePrimitives, SealedHeader, SealedHeaderFor, TransactionMeta, TxTy,
};
use revm_context::{BlockEnv, CfgEnv, TxEnv};
use std::{borrow::Cow, convert::Infallible, error::Error, fmt::Debug, marker::PhantomData};
use thiserror::Error;
/// Input for [`RpcConvert::convert_receipts`].
#[derive(Debug, Clone)]
pub struct ConvertReceiptInput<'a, N: NodePrimitives> {
/// Primitive receipt.
pub receipt: Cow<'a, N::Receipt>,
/// Transaction the receipt corresponds to.
pub tx: Recovered<&'a N::SignedTx>,
/// Gas used by the transaction.
pub gas_used: u64,
/// Number of logs emitted before this transaction.
pub next_log_index: usize,
/// Metadata for the transaction.
pub meta: TransactionMeta,
}
/// A type that knows how to convert primitive receipts to RPC representations.
pub trait ReceiptConverter<N: NodePrimitives>: Debug + 'static {
/// RPC representation.
type RpcReceipt;
/// Error that may occur during conversion.
type Error;
/// Converts a set of primitive receipts to RPC representations. It is guaranteed that all
/// receipts are from the same block.
fn convert_receipts(
&self,
receipts: Vec<ConvertReceiptInput<'_, N>>,
) -> Result<Vec<Self::RpcReceipt>, Self::Error>;
}
/// A type that knows how to convert a consensus header into an RPC header.
pub trait HeaderConverter<Consensus, Rpc>: Debug + Send + Sync + Unpin + Clone + 'static {
/// Converts a consensus header into an RPC header.
fn convert_header(&self, header: SealedHeader<Consensus>, block_size: usize) -> Rpc;
}
/// Default implementation of [`HeaderConverter`] that uses [`FromConsensusHeader`] to convert
/// headers.
impl<Consensus, Rpc> HeaderConverter<Consensus, Rpc> for ()
where
Rpc: FromConsensusHeader<Consensus>,
{
fn convert_header(&self, header: SealedHeader<Consensus>, block_size: usize) -> Rpc {
Rpc::from_consensus_header(header, block_size)
}
}
/// Conversion trait for obtaining RPC header from a consensus header.
pub trait FromConsensusHeader<T> {
/// Takes a consensus header and converts it into `self`.
fn from_consensus_header(header: SealedHeader<T>, block_size: usize) -> Self;
}
impl<T: Sealable> FromConsensusHeader<T> for alloy_rpc_types_eth::Header<T> {
fn from_consensus_header(header: SealedHeader<T>, block_size: usize) -> Self {
Self::from_consensus(header.into(), None, Some(U256::from(block_size)))
}
}
/// Responsible for the conversions from and into RPC requests and responses.
///
/// The JSON-RPC schema and the Node primitives are configurable using the [`RpcConvert::Network`]
/// and [`RpcConvert::Primitives`] associated types respectively.
///
/// A generic implementation [`RpcConverter`] should be preferred over a manual implementation. As
/// long as its trait bound requirements are met, the implementation is created automatically and
/// can be used in RPC method handlers for all the conversions.
pub trait RpcConvert: Send + Sync + Unpin + Clone + Debug + 'static {
/// Associated lower layer consensus types to convert from and into types of [`Self::Network`].
type Primitives: NodePrimitives;
/// Associated upper layer JSON-RPC API network requests and responses to convert from and into
/// types of [`Self::Primitives`].
type Network: RpcTypes + Send + Sync + Unpin + Clone + Debug;
/// A set of variables for executing a transaction.
type TxEnv;
/// An associated RPC conversion error.
type Error: error::Error + Into<jsonrpsee_types::ErrorObject<'static>>;
/// The EVM specification identifier.
type Spec;
/// Wrapper for `fill()` with default `TransactionInfo`
/// Create a new rpc transaction result for a _pending_ signed transaction, setting block
/// environment related fields to `None`.
fn fill_pending(
&self,
tx: Recovered<TxTy<Self::Primitives>>,
) -> Result<RpcTransaction<Self::Network>, Self::Error> {
self.fill(tx, TransactionInfo::default())
}
/// Create a new rpc transaction result for a mined transaction, using the given block hash,
/// number, and tx index fields to populate the corresponding fields in the rpc result.
///
/// The block hash, number, and tx index fields should be from the original block where the
/// transaction was mined.
fn fill(
&self,
tx: Recovered<TxTy<Self::Primitives>>,
tx_info: TransactionInfo,
) -> Result<RpcTransaction<Self::Network>, Self::Error>;
/// Builds a fake transaction from a transaction request for inclusion into block built in
/// `eth_simulateV1`.
fn build_simulate_v1_transaction(
&self,
request: RpcTxReq<Self::Network>,
) -> Result<TxTy<Self::Primitives>, Self::Error>;
/// Creates a transaction environment for execution based on `request` with corresponding
/// `cfg_env` and `block_env`.
fn tx_env(
&self,
request: RpcTxReq<Self::Network>,
cfg_env: &CfgEnv<Self::Spec>,
block_env: &BlockEnv,
) -> Result<Self::TxEnv, Self::Error>;
/// Converts a set of primitive receipts to RPC representations. It is guaranteed that all
/// receipts are from the same block.
fn convert_receipts(
&self,
receipts: Vec<ConvertReceiptInput<'_, Self::Primitives>>,
) -> Result<Vec<RpcReceipt<Self::Network>>, Self::Error>;
/// Converts a primitive header to an RPC header.
fn convert_header(
&self,
header: SealedHeaderFor<Self::Primitives>,
block_size: usize,
) -> Result<RpcHeader<Self::Network>, Self::Error>;
}
/// Converts `self` into `T`. The opposite of [`FromConsensusTx`].
///
/// Should create an RPC transaction response object based on a consensus transaction, its signer
/// [`Address`] and an additional context [`IntoRpcTx::TxInfo`].
///
/// Avoid implementing [`IntoRpcTx`] and use [`FromConsensusTx`] instead. Implementing it
/// automatically provides an implementation of [`IntoRpcTx`] thanks to the blanket implementation
/// in this crate.
///
/// Prefer using [`IntoRpcTx`] over [`FromConsensusTx`] when specifying trait bounds on a generic
/// function to ensure that types that only implement [`IntoRpcTx`] can be used as well.
pub trait IntoRpcTx<T> {
/// An additional context, usually [`TransactionInfo`] in a wrapper that carries some
/// implementation specific extra information.
type TxInfo;
/// Performs the conversion consuming `self` with `signer` and `tx_info`. See [`IntoRpcTx`]
/// for details.
fn into_rpc_tx(self, signer: Address, tx_info: Self::TxInfo) -> T;
}
/// Converts `T` into `self`. It is reciprocal of [`IntoRpcTx`].
///
/// Should create an RPC transaction response object based on a consensus transaction, its signer
/// [`Address`] and an additional context [`FromConsensusTx::TxInfo`].
///
/// Prefer implementing [`FromConsensusTx`] over [`IntoRpcTx`] because it automatically provides an
/// implementation of [`IntoRpcTx`] thanks to the blanket implementation in this crate.
///
/// Prefer using [`IntoRpcTx`] over using [`FromConsensusTx`] when specifying trait bounds on a
/// generic function. This way, types that directly implement [`IntoRpcTx`] can be used as arguments
/// as well.
pub trait FromConsensusTx<T> {
/// An additional context, usually [`TransactionInfo`] in a wrapper that carries some
/// implementation specific extra information.
type TxInfo;
/// Performs the conversion consuming `tx` with `signer` and `tx_info`. See [`FromConsensusTx`]
/// for details.
fn from_consensus_tx(tx: T, signer: Address, tx_info: Self::TxInfo) -> Self;
}
impl<TxIn: alloy_consensus::Transaction, T: alloy_consensus::Transaction + From<TxIn>>
FromConsensusTx<TxIn> for Transaction<T>
{
type TxInfo = TransactionInfo;
fn from_consensus_tx(tx: TxIn, signer: Address, tx_info: Self::TxInfo) -> Self {
Self::from_transaction(Recovered::new_unchecked(tx.into(), signer), tx_info)
}
}
impl<ConsensusTx, RpcTx> IntoRpcTx<RpcTx> for ConsensusTx
where
ConsensusTx: alloy_consensus::Transaction,
RpcTx: FromConsensusTx<Self>,
{
type TxInfo = RpcTx::TxInfo;
fn into_rpc_tx(self, signer: Address, tx_info: Self::TxInfo) -> RpcTx {
RpcTx::from_consensus_tx(self, signer, tx_info)
}
}
/// Converts `Tx` into `RpcTx`
///
/// Where:
/// * `Tx` is a transaction from the consensus layer.
/// * `RpcTx` is a transaction response object of the RPC API
///
/// The conversion function is accompanied by `signer`'s address and `tx_info` providing extra
/// context about a transaction in a block.
///
/// The `RpcTxConverter` has two blanket implementations:
/// * `()` assuming `Tx` implements [`IntoRpcTx`] and is used as default for [`RpcConverter`].
/// * `Fn(Tx, Address, TxInfo) -> RpcTx` and can be applied using
/// [`RpcConverter::with_rpc_tx_converter`].
///
/// One should prefer to implement [`IntoRpcTx`] for `Tx` to get the `RpcTxConverter` implementation
/// for free, thanks to the blanket implementation, unless the conversion requires more context. For
/// example, some configuration parameters or access handles to database, network, etc.
pub trait RpcTxConverter<Tx, RpcTx, TxInfo>: Clone + Debug + Unpin + Send + Sync + 'static {
/// An associated error that can happen during the conversion.
type Err;
/// Performs the conversion of `tx` from `Tx` into `RpcTx`.
///
/// See [`RpcTxConverter`] for more information.
fn convert_rpc_tx(&self, tx: Tx, signer: Address, tx_info: TxInfo) -> Result<RpcTx, Self::Err>;
}
impl<Tx, RpcTx> RpcTxConverter<Tx, RpcTx, Tx::TxInfo> for ()
where
Tx: IntoRpcTx<RpcTx>,
{
type Err = Infallible;
fn convert_rpc_tx(
&self,
tx: Tx,
signer: Address,
tx_info: Tx::TxInfo,
) -> Result<RpcTx, Self::Err> {
Ok(tx.into_rpc_tx(signer, tx_info))
}
}
impl<Tx, RpcTx, F, TxInfo, E> RpcTxConverter<Tx, RpcTx, TxInfo> for F
where
F: Fn(Tx, Address, TxInfo) -> Result<RpcTx, E> + Clone + Debug + Unpin + Send + Sync + 'static,
{
type Err = E;
fn convert_rpc_tx(&self, tx: Tx, signer: Address, tx_info: TxInfo) -> Result<RpcTx, Self::Err> {
self(tx, signer, tx_info)
}
}
/// Converts `TxReq` into `SimTx`.
///
/// Where:
/// * `TxReq` is a transaction request received from an RPC API
/// * `SimTx` is the corresponding consensus layer transaction for execution simulation
///
/// The `SimTxConverter` has two blanket implementations:
/// * `()` assuming `TxReq` implements [`TryIntoSimTx`] and is used as default for [`RpcConverter`].
/// * `Fn(TxReq) -> Result<SimTx, ValueError<TxReq>>` and can be applied using
/// [`RpcConverter::with_sim_tx_converter`].
///
/// One should prefer to implement [`TryIntoSimTx`] for `TxReq` to get the `SimTxConverter`
/// implementation for free, thanks to the blanket implementation, unless the conversion requires
/// more context. For example, some configuration parameters or access handles to database, network,
/// etc.
pub trait SimTxConverter<TxReq, SimTx>: Clone + Debug + Unpin + Send + Sync + 'static {
/// An associated error that can occur during the conversion.
type Err: Error;
/// Performs the conversion from `tx_req` into `SimTx`.
///
/// See [`SimTxConverter`] for more information.
fn convert_sim_tx(&self, tx_req: TxReq) -> Result<SimTx, Self::Err>;
}
impl<TxReq, SimTx> SimTxConverter<TxReq, SimTx> for ()
where
TxReq: TryIntoSimTx<SimTx> + Debug,
{
type Err = ValueError<TxReq>;
fn convert_sim_tx(&self, tx_req: TxReq) -> Result<SimTx, Self::Err> {
tx_req.try_into_sim_tx()
}
}
impl<TxReq, SimTx, F, E> SimTxConverter<TxReq, SimTx> for F
where
TxReq: Debug,
E: Error,
F: Fn(TxReq) -> Result<SimTx, E> + Clone + Debug + Unpin + Send + Sync + 'static,
{
type Err = E;
fn convert_sim_tx(&self, tx_req: TxReq) -> Result<SimTx, Self::Err> {
self(tx_req)
}
}
/// Converts `self` into `T`.
///
/// Should create a fake transaction for simulation using [`TransactionRequest`].
pub trait TryIntoSimTx<T>
where
Self: Sized,
{
/// Performs the conversion.
///
/// Should return a signed typed transaction envelope for the [`eth_simulateV1`] endpoint with a
/// dummy signature or an error if [required fields] are missing.
///
/// [`eth_simulateV1`]: <https://github.com/ethereum/execution-apis/pull/484>
/// [required fields]: TransactionRequest::buildable_type
fn try_into_sim_tx(self) -> Result<T, ValueError<Self>>;
}
/// Adds extra context to [`TransactionInfo`].
pub trait TxInfoMapper<T> {
/// An associated output type that carries [`TransactionInfo`] with some extra context.
type Out;
/// An associated error that can occur during the mapping.
type Err;
/// Performs the conversion.
fn try_map(&self, tx: &T, tx_info: TransactionInfo) -> Result<Self::Out, Self::Err>;
}
impl<T> TxInfoMapper<T> for () {
type Out = TransactionInfo;
type Err = Infallible;
fn try_map(&self, _tx: &T, tx_info: TransactionInfo) -> Result<Self::Out, Self::Err> {
Ok(tx_info)
}
}
impl TryIntoSimTx<EthereumTxEnvelope<TxEip4844>> for TransactionRequest {
fn try_into_sim_tx(self) -> Result<EthereumTxEnvelope<TxEip4844>, ValueError<Self>> {
Self::build_typed_simulate_transaction(self)
}
}
/// Converts `TxReq` into `TxEnv`.
///
/// Where:
/// * `TxReq` is a transaction request received from an RPC API
/// * `TxEnv` is the corresponding transaction environment for execution
///
/// The `TxEnvConverter` has two blanket implementations:
/// * `()` assuming `TxReq` implements [`TryIntoTxEnv`] and is used as default for [`RpcConverter`].
/// * `Fn(TxReq, &CfgEnv<Spec>, &BlockEnv) -> Result<TxEnv, E>` and can be applied using
/// [`RpcConverter::with_tx_env_converter`].
///
/// One should prefer to implement [`TryIntoTxEnv`] for `TxReq` to get the `TxEnvConverter`
/// implementation for free, thanks to the blanket implementation, unless the conversion requires
/// more context. For example, some configuration parameters or access handles to database, network,
/// etc.
pub trait TxEnvConverter<TxReq, TxEnv, Spec>:
Debug + Send + Sync + Unpin + Clone + 'static
{
/// An associated error that can occur during conversion.
type Error;
/// Converts a rpc transaction request into a transaction environment.
///
/// See [`TxEnvConverter`] for more information.
fn convert_tx_env(
&self,
tx_req: TxReq,
cfg_env: &CfgEnv<Spec>,
block_env: &BlockEnv,
) -> Result<TxEnv, Self::Error>;
}
impl<TxReq, TxEnv, Spec> TxEnvConverter<TxReq, TxEnv, Spec> for ()
where
TxReq: TryIntoTxEnv<TxEnv>,
{
type Error = TxReq::Err;
fn convert_tx_env(
&self,
tx_req: TxReq,
cfg_env: &CfgEnv<Spec>,
block_env: &BlockEnv,
) -> Result<TxEnv, Self::Error> {
tx_req.try_into_tx_env(cfg_env, block_env)
}
}
/// Converts rpc transaction requests into transaction environment using a closure.
impl<F, TxReq, TxEnv, E, Spec> TxEnvConverter<TxReq, TxEnv, Spec> for F
where
F: Fn(TxReq, &CfgEnv<Spec>, &BlockEnv) -> Result<TxEnv, E>
+ Debug
+ Send
+ Sync
+ Unpin
+ Clone
+ 'static,
TxReq: Clone,
E: error::Error + Send + Sync + 'static,
{
type Error = E;
fn convert_tx_env(
&self,
tx_req: TxReq,
cfg_env: &CfgEnv<Spec>,
block_env: &BlockEnv,
) -> Result<TxEnv, Self::Error> {
self(tx_req, cfg_env, block_env)
}
}
/// Converts `self` into `T`.
///
/// Should create an executable transaction environment using [`TransactionRequest`].
pub trait TryIntoTxEnv<T> {
/// An associated error that can occur during the conversion.
type Err;
/// Performs the conversion.
fn try_into_tx_env<Spec>(
self,
cfg_env: &CfgEnv<Spec>,
block_env: &BlockEnv,
) -> Result<T, Self::Err>;
}
/// An Ethereum specific transaction environment error than can occur during conversion from
/// [`TransactionRequest`].
#[derive(Debug, Error)]
pub enum EthTxEnvError {
/// Error while decoding or validating transaction request fees.
#[error(transparent)]
CallFees(#[from] CallFeesError),
/// Both data and input fields are set and not equal.
#[error(transparent)]
Input(#[from] TransactionInputError),
}
impl TryIntoTxEnv<TxEnv> for TransactionRequest {
type Err = EthTxEnvError;
fn try_into_tx_env<Spec>(
self,
cfg_env: &CfgEnv<Spec>,
block_env: &BlockEnv,
) -> Result<TxEnv, Self::Err> {
// Ensure that if versioned hashes are set, they're not empty
if self.blob_versioned_hashes.as_ref().is_some_and(|hashes| hashes.is_empty()) {
return Err(CallFeesError::BlobTransactionMissingBlobHashes.into())
}
let tx_type = self.minimal_tx_type() as u8;
let Self {
from,
to,
gas_price,
max_fee_per_gas,
max_priority_fee_per_gas,
gas,
value,
input,
nonce,
access_list,
chain_id,
blob_versioned_hashes,
max_fee_per_blob_gas,
authorization_list,
transaction_type: _,
sidecar: _,
} = self;
let CallFees { max_priority_fee_per_gas, gas_price, max_fee_per_blob_gas } =
CallFees::ensure_fees(
gas_price.map(U256::from),
max_fee_per_gas.map(U256::from),
max_priority_fee_per_gas.map(U256::from),
U256::from(block_env.basefee),
blob_versioned_hashes.as_deref(),
max_fee_per_blob_gas.map(U256::from),
block_env.blob_gasprice().map(U256::from),
)?;
let gas_limit = gas.unwrap_or(
// Use maximum allowed gas limit. The reason for this
// is that both Erigon and Geth use pre-configured gas cap even if
// it's possible to derive the gas limit from the block:
// <https://github.com/ledgerwatch/erigon/blob/eae2d9a79cb70dbe30b3a6b79c436872e4605458/cmd/rpcdaemon/commands/trace_adhoc.go#L956
// https://github.com/ledgerwatch/erigon/blob/eae2d9a79cb70dbe30b3a6b79c436872e4605458/eth/ethconfig/config.go#L94>
block_env.gas_limit,
);
let chain_id = chain_id.unwrap_or(cfg_env.chain_id);
let caller = from.unwrap_or_default();
let nonce = nonce.unwrap_or_default();
let env = TxEnv {
tx_type,
gas_limit,
nonce,
caller,
gas_price: gas_price.saturating_to(),
gas_priority_fee: max_priority_fee_per_gas.map(|v| v.saturating_to()),
kind: to.unwrap_or(TxKind::Create),
value: value.unwrap_or_default(),
data: input.try_into_unique_input().map_err(EthTxEnvError::from)?.unwrap_or_default(),
chain_id: Some(chain_id),
access_list: access_list.unwrap_or_default(),
// EIP-4844 fields
blob_hashes: blob_versioned_hashes.unwrap_or_default(),
max_fee_per_blob_gas: max_fee_per_blob_gas
.map(|v| v.saturating_to())
.unwrap_or_default(),
// EIP-7702 fields
authorization_list: authorization_list
.unwrap_or_default()
.into_iter()
.map(Either::Left)
.collect(),
};
Ok(env)
}
}
/// Conversion into transaction RPC response failed.
#[derive(Debug, Clone, Error)]
#[error("Failed to convert transaction into RPC response: {0}")]
pub struct TransactionConversionError(String);
/// Generic RPC response object converter for `Evm` and network `Network`.
///
/// The main purpose of this struct is to provide an implementation of [`RpcConvert`] for generic
/// associated types. This struct can then be used for conversions in RPC method handlers.
///
/// An [`RpcConvert`] implementation is generated if the following traits are implemented for the
/// network and EVM associated primitives:
/// * [`FromConsensusTx`]: from signed transaction into RPC response object.
/// * [`TryIntoSimTx`]: from RPC transaction request into a simulated transaction.
/// * [`TryIntoTxEnv`] or [`TxEnvConverter`]: from RPC transaction request into an executable
/// transaction.
/// * [`TxInfoMapper`]: from [`TransactionInfo`] into [`FromConsensusTx::TxInfo`]. Should be
/// implemented for a dedicated struct that is assigned to `Map`. If [`FromConsensusTx::TxInfo`]
/// is [`TransactionInfo`] then `()` can be used as `Map` which trivially passes over the input
/// object.
#[derive(Debug)]
pub struct RpcConverter<
Network,
Evm,
Receipt,
Header = (),
Map = (),
SimTx = (),
RpcTx = (),
TxEnv = (),
> {
network: PhantomData<Network>,
evm: PhantomData<Evm>,
receipt_converter: Receipt,
header_converter: Header,
mapper: Map,
tx_env_converter: TxEnv,
sim_tx_converter: SimTx,
rpc_tx_converter: RpcTx,
}
impl<Network, Evm, Receipt> RpcConverter<Network, Evm, Receipt> {
/// Creates a new [`RpcConverter`] with `receipt_converter` and `mapper`.
pub const fn new(receipt_converter: Receipt) -> Self {
Self {
network: PhantomData,
evm: PhantomData,
receipt_converter,
header_converter: (),
mapper: (),
tx_env_converter: (),
sim_tx_converter: (),
rpc_tx_converter: (),
}
}
}
impl<Network, Evm, Receipt, Header, Map, SimTx, RpcTx, TxEnv>
RpcConverter<Network, Evm, Receipt, Header, Map, SimTx, RpcTx, TxEnv>
{
/// Converts the network type
pub fn with_network<N>(
self,
) -> RpcConverter<N, Evm, Receipt, Header, Map, SimTx, RpcTx, TxEnv> {
let Self {
receipt_converter,
header_converter,
mapper,
evm,
sim_tx_converter,
rpc_tx_converter,
tx_env_converter,
..
} = self;
RpcConverter {
receipt_converter,
header_converter,
mapper,
network: Default::default(),
evm,
sim_tx_converter,
rpc_tx_converter,
tx_env_converter,
}
}
/// Converts the transaction environment type.
pub fn with_tx_env_converter<TxEnvNew>(
self,
tx_env_converter: TxEnvNew,
) -> RpcConverter<Network, Evm, Receipt, Header, Map, SimTx, RpcTx, TxEnvNew> {
let Self {
receipt_converter,
header_converter,
mapper,
network,
evm,
sim_tx_converter,
rpc_tx_converter,
tx_env_converter: _,
..
} = self;
RpcConverter {
receipt_converter,
header_converter,
mapper,
network,
evm,
sim_tx_converter,
rpc_tx_converter,
tx_env_converter,
}
}
/// Configures the header converter.
pub fn with_header_converter<HeaderNew>(
self,
header_converter: HeaderNew,
) -> RpcConverter<Network, Evm, Receipt, HeaderNew, Map, SimTx, RpcTx, TxEnv> {
let Self {
receipt_converter,
header_converter: _,
mapper,
network,
evm,
sim_tx_converter,
rpc_tx_converter,
tx_env_converter,
} = self;
RpcConverter {
receipt_converter,
header_converter,
mapper,
network,
evm,
sim_tx_converter,
rpc_tx_converter,
tx_env_converter,
}
}
/// Configures the mapper.
pub fn with_mapper<MapNew>(
self,
mapper: MapNew,
) -> RpcConverter<Network, Evm, Receipt, Header, MapNew, SimTx, RpcTx, TxEnv> {
let Self {
receipt_converter,
header_converter,
mapper: _,
network,
evm,
sim_tx_converter,
rpc_tx_converter,
tx_env_converter,
} = self;
RpcConverter {
receipt_converter,
header_converter,
mapper,
network,
evm,
sim_tx_converter,
rpc_tx_converter,
tx_env_converter,
}
}
/// Swaps the simulate transaction converter with `sim_tx_converter`.
pub fn with_sim_tx_converter<SimTxNew>(
self,
sim_tx_converter: SimTxNew,
) -> RpcConverter<Network, Evm, Receipt, Header, Map, SimTxNew, RpcTx, TxEnv> {
let Self {
receipt_converter,
header_converter,
mapper,
network,
evm,
rpc_tx_converter,
tx_env_converter,
..
} = self;
RpcConverter {
receipt_converter,
header_converter,
mapper,
network,
evm,
sim_tx_converter,
rpc_tx_converter,
tx_env_converter,
}
}
/// Swaps the RPC transaction converter with `rpc_tx_converter`.
pub fn with_rpc_tx_converter<RpcTxNew>(
self,
rpc_tx_converter: RpcTxNew,
) -> RpcConverter<Network, Evm, Receipt, Header, Map, SimTx, RpcTxNew, TxEnv> {
let Self {
receipt_converter,
header_converter,
mapper,
network,
evm,
sim_tx_converter,
tx_env_converter,
..
} = self;
RpcConverter {
receipt_converter,
header_converter,
mapper,
network,
evm,
sim_tx_converter,
rpc_tx_converter,
tx_env_converter,
}
}
}
impl<Network, Evm, Receipt, Header, Map, SimTx, RpcTx, TxEnv> Default
for RpcConverter<Network, Evm, Receipt, Header, Map, SimTx, RpcTx, TxEnv>
where
Receipt: Default,
Header: Default,
Map: Default,
SimTx: Default,
RpcTx: Default,
TxEnv: Default,
{
fn default() -> Self {
Self {
network: Default::default(),
evm: Default::default(),
receipt_converter: Default::default(),
header_converter: Default::default(),
mapper: Default::default(),
sim_tx_converter: Default::default(),
rpc_tx_converter: Default::default(),
tx_env_converter: Default::default(),
}
}
}
impl<
Network,
Evm,
Receipt: Clone,
Header: Clone,
Map: Clone,
SimTx: Clone,
RpcTx: Clone,
TxEnv: Clone,
> Clone for RpcConverter<Network, Evm, Receipt, Header, Map, SimTx, RpcTx, TxEnv>
{
fn clone(&self) -> Self {
Self {
network: Default::default(),
evm: Default::default(),
receipt_converter: self.receipt_converter.clone(),
header_converter: self.header_converter.clone(),
mapper: self.mapper.clone(),
sim_tx_converter: self.sim_tx_converter.clone(),
rpc_tx_converter: self.rpc_tx_converter.clone(),
tx_env_converter: self.tx_env_converter.clone(),
}
}
}
impl<N, Network, Evm, Receipt, Header, Map, SimTx, RpcTx, TxEnv> RpcConvert
for RpcConverter<Network, Evm, Receipt, Header, Map, SimTx, RpcTx, TxEnv>
where
N: NodePrimitives,
Network: RpcTypes + Send + Sync + Unpin + Clone + Debug,
Evm: ConfigureEvm<Primitives = N> + 'static,
Receipt: ReceiptConverter<
N,
RpcReceipt = RpcReceipt<Network>,
Error: From<TransactionConversionError>
+ From<TxEnv::Error>
+ From<<Map as TxInfoMapper<TxTy<N>>>::Err>
+ From<RpcTx::Err>
+ Error
+ Unpin
+ Sync
+ Send
+ Into<jsonrpsee_types::ErrorObject<'static>>,
> + Send
+ Sync
+ Unpin
+ Clone
+ Debug,
Header: HeaderConverter<HeaderTy<N>, RpcHeader<Network>>,
Map: TxInfoMapper<TxTy<N>> + Clone + Debug + Unpin + Send + Sync + 'static,
SimTx: SimTxConverter<RpcTxReq<Network>, TxTy<N>>,
RpcTx:
RpcTxConverter<TxTy<N>, Network::TransactionResponse, <Map as TxInfoMapper<TxTy<N>>>::Out>,
TxEnv: TxEnvConverter<RpcTxReq<Network>, TxEnvFor<Evm>, SpecFor<Evm>>,
{
type Primitives = N;
type Network = Network;
type TxEnv = TxEnvFor<Evm>;
type Error = Receipt::Error;
type Spec = SpecFor<Evm>;
fn fill(
&self,
tx: Recovered<TxTy<N>>,
tx_info: TransactionInfo,
) -> Result<Network::TransactionResponse, Self::Error> {
let (tx, signer) = tx.into_parts();
let tx_info = self.mapper.try_map(&tx, tx_info)?;
Ok(self.rpc_tx_converter.convert_rpc_tx(tx, signer, tx_info)?)
}
fn build_simulate_v1_transaction(
&self,
request: RpcTxReq<Network>,
) -> Result<TxTy<N>, Self::Error> {
Ok(self
.sim_tx_converter
.convert_sim_tx(request)
.map_err(|e| TransactionConversionError(e.to_string()))?)
}
fn tx_env(
&self,
request: RpcTxReq<Network>,
cfg_env: &CfgEnv<SpecFor<Evm>>,
block_env: &BlockEnv,
) -> Result<Self::TxEnv, Self::Error> {
self.tx_env_converter.convert_tx_env(request, cfg_env, block_env).map_err(Into::into)
}
fn convert_receipts(
&self,
receipts: Vec<ConvertReceiptInput<'_, Self::Primitives>>,
) -> Result<Vec<RpcReceipt<Self::Network>>, Self::Error> {
self.receipt_converter.convert_receipts(receipts)
}
fn convert_header(
&self,
header: SealedHeaderFor<Self::Primitives>,
block_size: usize,
) -> Result<RpcHeader<Self::Network>, Self::Error> {
Ok(self.header_converter.convert_header(header, block_size))
}
}
/// Optimism specific RPC transaction compatibility implementations.
#[cfg(feature = "op")]
pub mod op {
use super::*;
use alloy_consensus::SignableTransaction;
use alloy_primitives::{Address, Bytes, Signature};
use op_alloy_consensus::{
transaction::{OpDepositInfo, OpTransactionInfo},
OpTxEnvelope,
};
use op_alloy_rpc_types::OpTransactionRequest;
use op_revm::OpTransaction;
use reth_optimism_primitives::DepositReceipt;
use reth_primitives_traits::SignedTransaction;
use reth_storage_api::{errors::ProviderError, ReceiptProvider};
/// Creates [`OpTransactionInfo`] by adding [`OpDepositInfo`] to [`TransactionInfo`] if `tx` is
/// a deposit.
pub fn try_into_op_tx_info<Tx, T>(
provider: &T,
tx: &Tx,
tx_info: TransactionInfo,
) -> Result<OpTransactionInfo, ProviderError>
where
Tx: op_alloy_consensus::OpTransaction + SignedTransaction,
T: ReceiptProvider<Receipt: DepositReceipt>,
{
let deposit_meta = if tx.is_deposit() {
provider.receipt_by_hash(*tx.tx_hash())?.and_then(|receipt| {
receipt.as_deposit_receipt().map(|receipt| OpDepositInfo {
deposit_receipt_version: receipt.deposit_receipt_version,
deposit_nonce: receipt.deposit_nonce,
})
})
} else {
None
}
.unwrap_or_default();
Ok(OpTransactionInfo::new(tx_info, deposit_meta))
}
impl<T: op_alloy_consensus::OpTransaction + alloy_consensus::Transaction> FromConsensusTx<T>
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | true |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-convert/src/fees.rs | crates/rpc/rpc-convert/src/fees.rs | use alloy_primitives::{B256, U256};
use std::cmp::min;
use thiserror::Error;
/// Helper type for representing the fees of a `TransactionRequest`
#[derive(Debug)]
pub struct CallFees {
/// EIP-1559 priority fee
pub max_priority_fee_per_gas: Option<U256>,
/// Unified gas price setting
///
/// Will be the configured `basefee` if unset in the request
///
/// `gasPrice` for legacy,
/// `maxFeePerGas` for EIP-1559
pub gas_price: U256,
/// Max Fee per Blob gas for EIP-4844 transactions
pub max_fee_per_blob_gas: Option<U256>,
}
impl CallFees {
/// Ensures the fields of a `TransactionRequest` are not conflicting.
///
/// # EIP-4844 transactions
///
/// Blob transactions have an additional fee parameter `maxFeePerBlobGas`.
/// If the `maxFeePerBlobGas` or `blobVersionedHashes` are set we treat it as an EIP-4844
/// transaction.
///
/// Note: Due to the `Default` impl of [`BlockEnv`] (Some(0)) this assumes the `block_blob_fee`
/// is always `Some`
///
/// ## Notable design decisions
///
/// For compatibility reasons, this contains several exceptions when fee values are validated:
/// - If both `maxFeePerGas` and `maxPriorityFeePerGas` are set to `0` they are treated as
/// missing values, bypassing fee checks wrt. `baseFeePerGas`.
///
/// This mirrors geth's behaviour when transaction requests are executed: <https://github.com/ethereum/go-ethereum/blob/380688c636a654becc8f114438c2a5d93d2db032/core/state_transition.go#L306-L306>
///
/// [`BlockEnv`]: revm_context::BlockEnv
pub fn ensure_fees(
call_gas_price: Option<U256>,
call_max_fee: Option<U256>,
call_priority_fee: Option<U256>,
block_base_fee: U256,
blob_versioned_hashes: Option<&[B256]>,
max_fee_per_blob_gas: Option<U256>,
block_blob_fee: Option<U256>,
) -> Result<Self, CallFeesError> {
/// Get the effective gas price of a transaction as specfified in EIP-1559 with relevant
/// checks.
fn get_effective_gas_price(
max_fee_per_gas: Option<U256>,
max_priority_fee_per_gas: Option<U256>,
block_base_fee: U256,
) -> Result<U256, CallFeesError> {
match max_fee_per_gas {
Some(max_fee) => {
let max_priority_fee_per_gas = max_priority_fee_per_gas.unwrap_or(U256::ZERO);
// only enforce the fee cap if provided input is not zero
if !(max_fee.is_zero() && max_priority_fee_per_gas.is_zero()) &&
max_fee < block_base_fee
{
// `base_fee_per_gas` is greater than the `max_fee_per_gas`
return Err(CallFeesError::FeeCapTooLow)
}
if max_fee < max_priority_fee_per_gas {
return Err(
// `max_priority_fee_per_gas` is greater than the `max_fee_per_gas`
CallFeesError::TipAboveFeeCap,
)
}
// ref <https://github.com/ethereum/go-ethereum/blob/0dd173a727dd2d2409b8e401b22e85d20c25b71f/internal/ethapi/transaction_args.go#L446-L446>
Ok(min(
max_fee,
block_base_fee
.checked_add(max_priority_fee_per_gas)
.ok_or(CallFeesError::TipVeryHigh)?,
))
}
None => Ok(block_base_fee
.checked_add(max_priority_fee_per_gas.unwrap_or(U256::ZERO))
.ok_or(CallFeesError::TipVeryHigh)?),
}
}
let has_blob_hashes =
blob_versioned_hashes.as_ref().map(|blobs| !blobs.is_empty()).unwrap_or(false);
match (call_gas_price, call_max_fee, call_priority_fee, max_fee_per_blob_gas) {
(gas_price, None, None, None) => {
// either legacy transaction or no fee fields are specified
// when no fields are specified, set gas price to zero
let gas_price = gas_price.unwrap_or(U256::ZERO);
Ok(Self {
gas_price,
max_priority_fee_per_gas: None,
max_fee_per_blob_gas: has_blob_hashes.then_some(block_blob_fee).flatten(),
})
}
(None, max_fee_per_gas, max_priority_fee_per_gas, None) => {
// request for eip-1559 transaction
let effective_gas_price = get_effective_gas_price(
max_fee_per_gas,
max_priority_fee_per_gas,
block_base_fee,
)?;
let max_fee_per_blob_gas = has_blob_hashes.then_some(block_blob_fee).flatten();
Ok(Self {
gas_price: effective_gas_price,
max_priority_fee_per_gas,
max_fee_per_blob_gas,
})
}
(None, max_fee_per_gas, max_priority_fee_per_gas, Some(max_fee_per_blob_gas)) => {
// request for eip-4844 transaction
let effective_gas_price = get_effective_gas_price(
max_fee_per_gas,
max_priority_fee_per_gas,
block_base_fee,
)?;
// Ensure blob_hashes are present
if !has_blob_hashes {
// Blob transaction but no blob hashes
return Err(CallFeesError::BlobTransactionMissingBlobHashes)
}
Ok(Self {
gas_price: effective_gas_price,
max_priority_fee_per_gas,
max_fee_per_blob_gas: Some(max_fee_per_blob_gas),
})
}
_ => {
// this fallback covers incompatible combinations of fields
Err(CallFeesError::ConflictingFeeFieldsInRequest)
}
}
}
}
/// Error coming from decoding and validating transaction request fees.
#[derive(Debug, Error)]
pub enum CallFeesError {
/// Thrown when a call or transaction request (`eth_call`, `eth_estimateGas`,
/// `eth_sendTransaction`) contains conflicting fields (legacy, EIP-1559)
#[error("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified")]
ConflictingFeeFieldsInRequest,
/// Thrown post London if the transaction's fee is less than the base fee of the block
#[error("max fee per gas less than block base fee")]
FeeCapTooLow,
/// Thrown to ensure no one is able to specify a transaction with a tip higher than the total
/// fee cap.
#[error("max priority fee per gas higher than max fee per gas")]
TipAboveFeeCap,
/// A sanity error to avoid huge numbers specified in the tip field.
#[error("max priority fee per gas higher than 2^256-1")]
TipVeryHigh,
/// Blob transaction has no versioned hashes
#[error("blob transaction missing blob hashes")]
BlobTransactionMissingBlobHashes,
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_consensus::constants::GWEI_TO_WEI;
#[test]
fn test_ensure_0_fallback() {
let CallFees { gas_price, .. } =
CallFees::ensure_fees(None, None, None, U256::from(99), None, None, Some(U256::ZERO))
.unwrap();
assert!(gas_price.is_zero());
}
#[test]
fn test_ensure_max_fee_0_exception() {
let CallFees { gas_price, .. } =
CallFees::ensure_fees(None, Some(U256::ZERO), None, U256::from(99), None, None, None)
.unwrap();
assert!(gas_price.is_zero());
}
#[test]
fn test_blob_fees() {
let CallFees { gas_price, max_fee_per_blob_gas, .. } =
CallFees::ensure_fees(None, None, None, U256::from(99), None, None, Some(U256::ZERO))
.unwrap();
assert!(gas_price.is_zero());
assert_eq!(max_fee_per_blob_gas, None);
let CallFees { gas_price, max_fee_per_blob_gas, .. } = CallFees::ensure_fees(
None,
None,
None,
U256::from(99),
Some(&[B256::from(U256::ZERO)]),
None,
Some(U256::from(99)),
)
.unwrap();
assert!(gas_price.is_zero());
assert_eq!(max_fee_per_blob_gas, Some(U256::from(99)));
}
#[test]
fn test_eip_1559_fees() {
let CallFees { gas_price, .. } = CallFees::ensure_fees(
None,
Some(U256::from(25 * GWEI_TO_WEI)),
Some(U256::from(15 * GWEI_TO_WEI)),
U256::from(15 * GWEI_TO_WEI),
None,
None,
Some(U256::ZERO),
)
.unwrap();
assert_eq!(gas_price, U256::from(25 * GWEI_TO_WEI));
let CallFees { gas_price, .. } = CallFees::ensure_fees(
None,
Some(U256::from(25 * GWEI_TO_WEI)),
Some(U256::from(5 * GWEI_TO_WEI)),
U256::from(15 * GWEI_TO_WEI),
None,
None,
Some(U256::ZERO),
)
.unwrap();
assert_eq!(gas_price, U256::from(20 * GWEI_TO_WEI));
let CallFees { gas_price, .. } = CallFees::ensure_fees(
None,
Some(U256::from(30 * GWEI_TO_WEI)),
Some(U256::from(30 * GWEI_TO_WEI)),
U256::from(15 * GWEI_TO_WEI),
None,
None,
Some(U256::ZERO),
)
.unwrap();
assert_eq!(gas_price, U256::from(30 * GWEI_TO_WEI));
let call_fees = CallFees::ensure_fees(
None,
Some(U256::from(30 * GWEI_TO_WEI)),
Some(U256::from(31 * GWEI_TO_WEI)),
U256::from(15 * GWEI_TO_WEI),
None,
None,
Some(U256::ZERO),
);
assert!(call_fees.is_err());
let call_fees = CallFees::ensure_fees(
None,
Some(U256::from(5 * GWEI_TO_WEI)),
Some(U256::from(GWEI_TO_WEI)),
U256::from(15 * GWEI_TO_WEI),
None,
None,
Some(U256::ZERO),
);
assert!(call_fees.is_err());
let call_fees = CallFees::ensure_fees(
None,
Some(U256::MAX),
Some(U256::MAX),
U256::from(5 * GWEI_TO_WEI),
None,
None,
Some(U256::ZERO),
);
assert!(call_fees.is_err());
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-builder/src/config.rs | crates/rpc/rpc-builder/src/config.rs | use jsonrpsee::server::ServerConfigBuilder;
use reth_node_core::{args::RpcServerArgs, utils::get_or_create_jwt_secret_from_path};
use reth_rpc::ValidationApiConfig;
use reth_rpc_eth_types::{EthConfig, EthStateCacheConfig, GasPriceOracleConfig};
use reth_rpc_layer::{JwtError, JwtSecret};
use reth_rpc_server_types::RpcModuleSelection;
use std::{net::SocketAddr, path::PathBuf};
use tower::layer::util::Identity;
use tracing::{debug, warn};
use crate::{
auth::AuthServerConfig, error::RpcError, IpcServerBuilder, RpcModuleConfig, RpcServerConfig,
TransportRpcModuleConfig,
};
/// A trait that provides a configured RPC server.
///
/// This provides all basic config values for the RPC server and is implemented by the
/// [`RpcServerArgs`] type.
pub trait RethRpcServerConfig {
/// Returns whether ipc is enabled.
fn is_ipc_enabled(&self) -> bool;
/// Returns the path to the target ipc socket if enabled.
fn ipc_path(&self) -> &str;
/// The configured ethereum RPC settings.
fn eth_config(&self) -> EthConfig;
/// The configured ethereum RPC settings.
fn flashbots_config(&self) -> ValidationApiConfig;
/// Returns state cache configuration.
fn state_cache_config(&self) -> EthStateCacheConfig;
/// Returns the max request size in bytes.
fn rpc_max_request_size_bytes(&self) -> u32;
/// Returns the max response size in bytes.
fn rpc_max_response_size_bytes(&self) -> u32;
/// Extracts the gas price oracle config from the args.
fn gas_price_oracle_config(&self) -> GasPriceOracleConfig;
/// Creates the [`TransportRpcModuleConfig`] from cli args.
///
/// This sets all the api modules, and configures additional settings like gas price oracle
/// settings in the [`TransportRpcModuleConfig`].
fn transport_rpc_module_config(&self) -> TransportRpcModuleConfig;
/// Returns the default server config for http/ws
fn http_ws_server_builder(&self) -> ServerConfigBuilder;
/// Returns the default ipc server builder
fn ipc_server_builder(&self) -> IpcServerBuilder<Identity, Identity>;
/// Creates the [`RpcServerConfig`] from cli args.
fn rpc_server_config(&self) -> RpcServerConfig;
/// Creates the [`AuthServerConfig`] from cli args.
fn auth_server_config(&self, jwt_secret: JwtSecret) -> Result<AuthServerConfig, RpcError>;
/// The execution layer and consensus layer clients SHOULD accept a configuration parameter:
/// jwt-secret, which designates a file containing the hex-encoded 256 bit secret key to be used
/// for verifying/generating JWT tokens.
///
/// If such a parameter is given, but the file cannot be read, or does not contain a hex-encoded
/// key of 256 bits, the client SHOULD treat this as an error.
///
/// If such a parameter is not given, the client SHOULD generate such a token, valid for the
/// duration of the execution, and SHOULD store the hex-encoded secret as a jwt.hex file on
/// the filesystem. This file can then be used to provision the counterpart client.
///
/// The `default_jwt_path` provided as an argument will be used as the default location for the
/// jwt secret in case the `auth_jwtsecret` argument is not provided.
fn auth_jwt_secret(&self, default_jwt_path: PathBuf) -> Result<JwtSecret, JwtError>;
/// Returns the configured jwt secret key for the regular rpc servers, if any.
///
/// Note: this is not used for the auth server (engine API).
fn rpc_secret_key(&self) -> Option<JwtSecret>;
}
impl RethRpcServerConfig for RpcServerArgs {
fn is_ipc_enabled(&self) -> bool {
// By default IPC is enabled therefore it is enabled if the `ipcdisable` is false.
!self.ipcdisable
}
fn ipc_path(&self) -> &str {
self.ipcpath.as_str()
}
fn eth_config(&self) -> EthConfig {
EthConfig::default()
.max_tracing_requests(self.rpc_max_tracing_requests)
.max_trace_filter_blocks(self.rpc_max_trace_filter_blocks)
.max_blocks_per_filter(self.rpc_max_blocks_per_filter.unwrap_or_max())
.max_logs_per_response(self.rpc_max_logs_per_response.unwrap_or_max() as usize)
.eth_proof_window(self.rpc_eth_proof_window)
.rpc_gas_cap(self.rpc_gas_cap)
.rpc_max_simulate_blocks(self.rpc_max_simulate_blocks)
.state_cache(self.state_cache_config())
.gpo_config(self.gas_price_oracle_config())
.proof_permits(self.rpc_proof_permits)
.pending_block_kind(self.rpc_pending_block)
.raw_tx_forwarder(self.rpc_forwarder.clone())
}
fn flashbots_config(&self) -> ValidationApiConfig {
ValidationApiConfig {
disallow: self.builder_disallow.clone().unwrap_or_default(),
validation_window: self.rpc_eth_proof_window,
}
}
fn state_cache_config(&self) -> EthStateCacheConfig {
EthStateCacheConfig {
max_blocks: self.rpc_state_cache.max_blocks,
max_receipts: self.rpc_state_cache.max_receipts,
max_headers: self.rpc_state_cache.max_headers,
max_concurrent_db_requests: self.rpc_state_cache.max_concurrent_db_requests,
}
}
fn rpc_max_request_size_bytes(&self) -> u32 {
self.rpc_max_request_size.get().saturating_mul(1024 * 1024)
}
fn rpc_max_response_size_bytes(&self) -> u32 {
self.rpc_max_response_size.get().saturating_mul(1024 * 1024)
}
fn gas_price_oracle_config(&self) -> GasPriceOracleConfig {
self.gas_price_oracle.gas_price_oracle_config()
}
fn transport_rpc_module_config(&self) -> TransportRpcModuleConfig {
let mut config = TransportRpcModuleConfig::default()
.with_config(RpcModuleConfig::new(self.eth_config(), self.flashbots_config()));
if self.http {
config = config.with_http(
self.http_api
.clone()
.unwrap_or_else(|| RpcModuleSelection::standard_modules().into()),
);
}
if self.ws {
config = config.with_ws(
self.ws_api
.clone()
.unwrap_or_else(|| RpcModuleSelection::standard_modules().into()),
);
}
if self.is_ipc_enabled() {
config = config.with_ipc(RpcModuleSelection::default_ipc_modules());
}
config
}
fn http_ws_server_builder(&self) -> ServerConfigBuilder {
ServerConfigBuilder::new()
.max_connections(self.rpc_max_connections.get())
.max_request_body_size(self.rpc_max_request_size_bytes())
.max_response_body_size(self.rpc_max_response_size_bytes())
.max_subscriptions_per_connection(self.rpc_max_subscriptions_per_connection.get())
}
fn ipc_server_builder(&self) -> IpcServerBuilder<Identity, Identity> {
IpcServerBuilder::default()
.max_subscriptions_per_connection(self.rpc_max_subscriptions_per_connection.get())
.max_request_body_size(self.rpc_max_request_size_bytes())
.max_response_body_size(self.rpc_max_response_size_bytes())
.max_connections(self.rpc_max_connections.get())
.set_ipc_socket_permissions(self.ipc_socket_permissions.clone())
}
fn rpc_server_config(&self) -> RpcServerConfig {
let mut config = RpcServerConfig::default().with_jwt_secret(self.rpc_secret_key());
if self.http_api.is_some() && !self.http {
warn!(
target: "reth::cli",
"The --http.api flag is set but --http is not enabled. HTTP RPC API will not be exposed."
);
}
if self.http {
let socket_address = SocketAddr::new(self.http_addr, self.http_port);
config = config
.with_http_address(socket_address)
.with_http(self.http_ws_server_builder())
.with_http_cors(self.http_corsdomain.clone())
.with_http_disable_compression(self.http_disable_compression)
.with_ws_cors(self.ws_allowed_origins.clone());
}
if self.ws {
let socket_address = SocketAddr::new(self.ws_addr, self.ws_port);
config = config.with_ws_address(socket_address).with_ws(self.http_ws_server_builder());
}
if self.is_ipc_enabled() {
config =
config.with_ipc(self.ipc_server_builder()).with_ipc_endpoint(self.ipcpath.clone());
}
config
}
fn auth_server_config(&self, jwt_secret: JwtSecret) -> Result<AuthServerConfig, RpcError> {
let address = SocketAddr::new(self.auth_addr, self.auth_port);
let mut builder = AuthServerConfig::builder(jwt_secret).socket_addr(address);
if self.auth_ipc {
builder = builder
.ipc_endpoint(self.auth_ipc_path.clone())
.with_ipc_config(self.ipc_server_builder());
}
Ok(builder.build())
}
fn auth_jwt_secret(&self, default_jwt_path: PathBuf) -> Result<JwtSecret, JwtError> {
match self.auth_jwtsecret.as_ref() {
Some(fpath) => {
debug!(target: "reth::cli", user_path=?fpath, "Reading JWT auth secret file");
JwtSecret::from_file(fpath)
}
None => get_or_create_jwt_secret_from_path(&default_jwt_path),
}
}
fn rpc_secret_key(&self) -> Option<JwtSecret> {
self.rpc_jwtsecret
}
}
#[cfg(test)]
mod tests {
use clap::{Args, Parser};
use reth_node_core::args::RpcServerArgs;
use reth_rpc_eth_types::RPC_DEFAULT_GAS_CAP;
use reth_rpc_server_types::{constants, RethRpcModule, RpcModuleSelection};
use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4};
use crate::config::RethRpcServerConfig;
/// A helper type to parse Args more easily
#[derive(Parser)]
struct CommandParser<T: Args> {
#[command(flatten)]
args: T,
}
#[test]
fn test_rpc_gas_cap() {
let args = CommandParser::<RpcServerArgs>::parse_from(["reth"]).args;
let config = args.eth_config();
assert_eq!(config.rpc_gas_cap, u64::from(RPC_DEFAULT_GAS_CAP));
let args =
CommandParser::<RpcServerArgs>::parse_from(["reth", "--rpc.gascap", "1000"]).args;
let config = args.eth_config();
assert_eq!(config.rpc_gas_cap, 1000);
let args = CommandParser::<RpcServerArgs>::try_parse_from(["reth", "--rpc.gascap", "0"]);
assert!(args.is_err());
}
#[test]
fn test_transport_rpc_module_config() {
let args = CommandParser::<RpcServerArgs>::parse_from([
"reth",
"--http.api",
"eth,admin,debug",
"--http",
"--ws",
])
.args;
let config = args.transport_rpc_module_config();
let expected = [RethRpcModule::Eth, RethRpcModule::Admin, RethRpcModule::Debug];
assert_eq!(config.http().cloned().unwrap().into_selection(), expected.into());
assert_eq!(
config.ws().cloned().unwrap().into_selection(),
RpcModuleSelection::standard_modules()
);
}
#[test]
fn test_transport_rpc_module_trim_config() {
let args = CommandParser::<RpcServerArgs>::parse_from([
"reth",
"--http.api",
" eth, admin, debug",
"--http",
"--ws",
])
.args;
let config = args.transport_rpc_module_config();
let expected = [RethRpcModule::Eth, RethRpcModule::Admin, RethRpcModule::Debug];
assert_eq!(config.http().cloned().unwrap().into_selection(), expected.into());
assert_eq!(
config.ws().cloned().unwrap().into_selection(),
RpcModuleSelection::standard_modules()
);
}
#[test]
fn test_unique_rpc_modules() {
let args = CommandParser::<RpcServerArgs>::parse_from([
"reth",
"--http.api",
" eth, admin, debug, eth,admin",
"--http",
"--ws",
])
.args;
let config = args.transport_rpc_module_config();
let expected = [RethRpcModule::Eth, RethRpcModule::Admin, RethRpcModule::Debug];
assert_eq!(config.http().cloned().unwrap().into_selection(), expected.into());
assert_eq!(
config.ws().cloned().unwrap().into_selection(),
RpcModuleSelection::standard_modules()
);
}
#[test]
fn test_rpc_server_config() {
let args = CommandParser::<RpcServerArgs>::parse_from([
"reth",
"--http.api",
"eth,admin,debug",
"--http",
"--ws",
"--ws.addr",
"127.0.0.1",
"--ws.port",
"8888",
])
.args;
let config = args.rpc_server_config();
assert_eq!(
config.http_address().unwrap(),
SocketAddr::V4(SocketAddrV4::new(
Ipv4Addr::LOCALHOST,
constants::DEFAULT_HTTP_RPC_PORT
))
);
assert_eq!(
config.ws_address().unwrap(),
SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 8888))
);
assert_eq!(config.ipc_endpoint().unwrap(), constants::DEFAULT_IPC_ENDPOINT);
}
#[test]
fn test_zero_filter_limits() {
let args = CommandParser::<RpcServerArgs>::parse_from([
"reth",
"--rpc-max-blocks-per-filter",
"0",
"--rpc-max-logs-per-response",
"0",
])
.args;
let config = args.eth_config().filter_config();
assert_eq!(config.max_blocks_per_filter, Some(u64::MAX));
assert_eq!(config.max_logs_per_response, Some(usize::MAX));
}
#[test]
fn test_custom_filter_limits() {
let args = CommandParser::<RpcServerArgs>::parse_from([
"reth",
"--rpc-max-blocks-per-filter",
"100",
"--rpc-max-logs-per-response",
"200",
])
.args;
let config = args.eth_config().filter_config();
assert_eq!(config.max_blocks_per_filter, Some(100));
assert_eq!(config.max_logs_per_response, Some(200));
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-builder/src/lib.rs | crates/rpc/rpc-builder/src/lib.rs | //! Configure reth RPC.
//!
//! This crate contains several builder and config types that allow to configure the selection of
//! [`RethRpcModule`] specific to transports (ws, http, ipc).
//!
//! The [`RpcModuleBuilder`] is the main entrypoint for configuring all reth modules. It takes
//! instances of components required to start the servers, such as provider impls, network and
//! transaction pool. [`RpcModuleBuilder::build`] returns a [`TransportRpcModules`] which contains
//! the transport specific config (what APIs are available via this transport).
//!
//! The [`RpcServerConfig`] is used to assemble and start the http server, ws server, ipc servers,
//! it requires the [`TransportRpcModules`] so it can start the servers with the configured modules.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
use crate::{auth::AuthRpcModule, error::WsHttpSamePortError, metrics::RpcRequestMetrics};
use alloy_network::{Ethereum, IntoWallet};
use alloy_provider::{fillers::RecommendedFillers, Provider, ProviderBuilder};
use core::marker::PhantomData;
use error::{ConflictingModules, RpcError, ServerKind};
use http::{header::AUTHORIZATION, HeaderMap};
use jsonrpsee::{
core::RegisterMethodError,
server::{middleware::rpc::RpcServiceBuilder, AlreadyStoppedError, IdProvider, ServerHandle},
Methods, RpcModule,
};
use reth_chainspec::{ChainSpecProvider, EthereumHardforks};
use reth_consensus::{ConsensusError, FullConsensus};
use reth_evm::ConfigureEvm;
use reth_network_api::{noop::NoopNetwork, NetworkInfo, Peers};
use reth_primitives_traits::NodePrimitives;
use reth_rpc::{
AdminApi, DebugApi, EngineEthApi, EthApi, EthApiBuilder, EthBundle, MinerApi, NetApi,
OtterscanApi, RPCApi, RethApi, TraceApi, TxPoolApi, ValidationApiConfig, Web3Api,
};
use reth_rpc_api::servers::*;
use reth_rpc_eth_api::{
helpers::{
pending_block::PendingEnvBuilder, Call, EthApiSpec, EthTransactions, LoadPendingBlock,
TraceExt,
},
node::RpcNodeCoreAdapter,
EthApiServer, EthApiTypes, FullEthApiServer, RpcBlock, RpcConvert, RpcConverter, RpcHeader,
RpcNodeCore, RpcReceipt, RpcTransaction, RpcTxReq,
};
use reth_rpc_eth_types::{receipt::EthReceiptConverter, EthConfig, EthSubscriptionIdProvider};
use reth_rpc_layer::{AuthLayer, Claims, CompressionLayer, JwtAuthValidator, JwtSecret};
use reth_storage_api::{
AccountReader, BlockReader, ChangeSetReader, FullRpcProvider, ProviderBlock,
StateProviderFactory,
};
use reth_tasks::{pool::BlockingTaskGuard, TaskSpawner, TokioTaskExecutor};
use reth_transaction_pool::{noop::NoopTransactionPool, TransactionPool};
use serde::{Deserialize, Serialize};
use std::{
collections::HashMap,
fmt::Debug,
net::{Ipv4Addr, SocketAddr, SocketAddrV4},
time::{Duration, SystemTime, UNIX_EPOCH},
};
use tower_http::cors::CorsLayer;
pub use cors::CorsDomainError;
// re-export for convenience
pub use jsonrpsee::server::ServerBuilder;
use jsonrpsee::server::ServerConfigBuilder;
pub use reth_ipc::server::{
Builder as IpcServerBuilder, RpcServiceBuilder as IpcRpcServiceBuilder,
};
pub use reth_rpc_server_types::{constants, RethRpcModule, RpcModuleSelection};
pub use tower::layer::util::{Identity, Stack};
/// Auth server utilities.
pub mod auth;
/// RPC server utilities.
pub mod config;
/// Utils for installing Rpc middleware
pub mod middleware;
/// Cors utilities.
mod cors;
/// Rpc error utilities.
pub mod error;
/// Eth utils
pub mod eth;
pub use eth::EthHandlers;
// Rpc server metrics
mod metrics;
use crate::middleware::RethRpcMiddleware;
pub use metrics::{MeteredRequestFuture, RpcRequestMetricsService};
use reth_chain_state::CanonStateSubscriptions;
use reth_rpc::eth::sim_bundle::EthSimBundle;
// Rpc rate limiter
pub mod rate_limiter;
/// A builder type to configure the RPC module: See [`RpcModule`]
///
/// This is the main entrypoint and the easiest way to configure an RPC server.
#[derive(Debug, Clone)]
pub struct RpcModuleBuilder<N, Provider, Pool, Network, EvmConfig, Consensus> {
/// The Provider type to when creating all rpc handlers
provider: Provider,
/// The Pool type to when creating all rpc handlers
pool: Pool,
/// The Network type to when creating all rpc handlers
network: Network,
/// How additional tasks are spawned, for example in the eth pubsub namespace
executor: Box<dyn TaskSpawner + 'static>,
/// Defines how the EVM should be configured before execution.
evm_config: EvmConfig,
/// The consensus implementation.
consensus: Consensus,
/// Node data primitives.
_primitives: PhantomData<N>,
}
// === impl RpcBuilder ===
impl<N, Provider, Pool, Network, EvmConfig, Consensus>
RpcModuleBuilder<N, Provider, Pool, Network, EvmConfig, Consensus>
{
/// Create a new instance of the builder
pub const fn new(
provider: Provider,
pool: Pool,
network: Network,
executor: Box<dyn TaskSpawner + 'static>,
evm_config: EvmConfig,
consensus: Consensus,
) -> Self {
Self { provider, pool, network, executor, evm_config, consensus, _primitives: PhantomData }
}
/// Configure the provider instance.
pub fn with_provider<P>(
self,
provider: P,
) -> RpcModuleBuilder<N, P, Pool, Network, EvmConfig, Consensus> {
let Self { pool, network, executor, evm_config, consensus, _primitives, .. } = self;
RpcModuleBuilder { provider, network, pool, executor, evm_config, consensus, _primitives }
}
/// Configure the transaction pool instance.
pub fn with_pool<P>(
self,
pool: P,
) -> RpcModuleBuilder<N, Provider, P, Network, EvmConfig, Consensus> {
let Self { provider, network, executor, evm_config, consensus, _primitives, .. } = self;
RpcModuleBuilder { provider, network, pool, executor, evm_config, consensus, _primitives }
}
/// Configure a [`NoopTransactionPool`] instance.
///
/// Caution: This will configure a pool API that does absolutely nothing.
/// This is only intended for allow easier setup of namespaces that depend on the
/// [`EthApi`] which requires a [`TransactionPool`] implementation.
pub fn with_noop_pool(
self,
) -> RpcModuleBuilder<N, Provider, NoopTransactionPool, Network, EvmConfig, Consensus> {
let Self { provider, executor, network, evm_config, consensus, _primitives, .. } = self;
RpcModuleBuilder {
provider,
executor,
network,
evm_config,
pool: NoopTransactionPool::default(),
consensus,
_primitives,
}
}
/// Configure the network instance.
pub fn with_network<Net>(
self,
network: Net,
) -> RpcModuleBuilder<N, Provider, Pool, Net, EvmConfig, Consensus> {
let Self { provider, pool, executor, evm_config, consensus, _primitives, .. } = self;
RpcModuleBuilder { provider, network, pool, executor, evm_config, consensus, _primitives }
}
/// Configure a [`NoopNetwork`] instance.
///
/// Caution: This will configure a network API that does absolutely nothing.
/// This is only intended for allow easier setup of namespaces that depend on the
/// [`EthApi`] which requires a [`NetworkInfo`] implementation.
pub fn with_noop_network(
self,
) -> RpcModuleBuilder<N, Provider, Pool, NoopNetwork, EvmConfig, Consensus> {
let Self { provider, pool, executor, evm_config, consensus, _primitives, .. } = self;
RpcModuleBuilder {
provider,
pool,
executor,
network: NoopNetwork::default(),
evm_config,
consensus,
_primitives,
}
}
/// Configure the task executor to use for additional tasks.
pub fn with_executor(self, executor: Box<dyn TaskSpawner + 'static>) -> Self {
let Self { pool, network, provider, evm_config, consensus, _primitives, .. } = self;
Self { provider, network, pool, executor, evm_config, consensus, _primitives }
}
/// Configure [`TokioTaskExecutor`] as the task executor to use for additional tasks.
///
/// This will spawn additional tasks directly via `tokio::task::spawn`, See
/// [`TokioTaskExecutor`].
pub fn with_tokio_executor(self) -> Self {
let Self { pool, network, provider, evm_config, consensus, _primitives, .. } = self;
Self {
provider,
network,
pool,
executor: Box::new(TokioTaskExecutor::default()),
evm_config,
consensus,
_primitives,
}
}
/// Configure the evm configuration type
pub fn with_evm_config<E>(
self,
evm_config: E,
) -> RpcModuleBuilder<N, Provider, Pool, Network, E, Consensus> {
let Self { provider, pool, executor, network, consensus, _primitives, .. } = self;
RpcModuleBuilder { provider, network, pool, executor, evm_config, consensus, _primitives }
}
/// Configure the consensus implementation.
pub fn with_consensus<C>(
self,
consensus: C,
) -> RpcModuleBuilder<N, Provider, Pool, Network, EvmConfig, C> {
let Self { provider, network, pool, executor, evm_config, _primitives, .. } = self;
RpcModuleBuilder { provider, network, pool, executor, evm_config, consensus, _primitives }
}
/// Instantiates a new [`EthApiBuilder`] from the configured components.
#[expect(clippy::type_complexity)]
pub fn eth_api_builder<ChainSpec>(
&self,
) -> EthApiBuilder<
RpcNodeCoreAdapter<Provider, Pool, Network, EvmConfig>,
RpcConverter<Ethereum, EvmConfig, EthReceiptConverter<ChainSpec>>,
>
where
Provider: Clone,
Pool: Clone,
Network: Clone,
EvmConfig: Clone,
RpcNodeCoreAdapter<Provider, Pool, Network, EvmConfig>:
RpcNodeCore<Provider: ChainSpecProvider<ChainSpec = ChainSpec>, Evm = EvmConfig>,
{
EthApiBuilder::new(
self.provider.clone(),
self.pool.clone(),
self.network.clone(),
self.evm_config.clone(),
)
}
/// Initializes a new [`EthApiServer`] with the configured components and default settings.
///
/// Note: This spawns all necessary tasks.
///
/// See also [`EthApiBuilder`].
#[expect(clippy::type_complexity)]
pub fn bootstrap_eth_api<ChainSpec>(
&self,
) -> EthApi<
RpcNodeCoreAdapter<Provider, Pool, Network, EvmConfig>,
RpcConverter<Ethereum, EvmConfig, EthReceiptConverter<ChainSpec>>,
>
where
Provider: Clone,
Pool: Clone,
Network: Clone,
EvmConfig: ConfigureEvm + Clone,
RpcNodeCoreAdapter<Provider, Pool, Network, EvmConfig>:
RpcNodeCore<Provider: ChainSpecProvider<ChainSpec = ChainSpec>, Evm = EvmConfig>,
RpcConverter<Ethereum, EvmConfig, EthReceiptConverter<ChainSpec>>: RpcConvert,
(): PendingEnvBuilder<EvmConfig>,
{
self.eth_api_builder().build()
}
}
impl<N, Provider, Pool, Network, EvmConfig, Consensus>
RpcModuleBuilder<N, Provider, Pool, Network, EvmConfig, Consensus>
where
N: NodePrimitives,
Provider: FullRpcProvider<Block = N::Block, Receipt = N::Receipt, Header = N::BlockHeader>
+ CanonStateSubscriptions<Primitives = N>
+ AccountReader
+ ChangeSetReader,
Pool: TransactionPool + 'static,
Network: NetworkInfo + Peers + Clone + 'static,
EvmConfig: ConfigureEvm<Primitives = N> + 'static,
Consensus: FullConsensus<N, Error = ConsensusError> + Clone + 'static,
{
/// Configures all [`RpcModule`]s specific to the given [`TransportRpcModuleConfig`] which can
/// be used to start the transport server(s).
///
/// This behaves exactly as [`RpcModuleBuilder::build`] for the [`TransportRpcModules`], but
/// also configures the auth (engine api) server, which exposes a subset of the `eth_`
/// namespace.
pub fn build_with_auth_server<EthApi>(
self,
module_config: TransportRpcModuleConfig,
engine: impl IntoEngineApiRpcModule,
eth: EthApi,
) -> (
TransportRpcModules,
AuthRpcModule,
RpcRegistryInner<Provider, Pool, Network, EthApi, EvmConfig, Consensus>,
)
where
EthApi: FullEthApiServer<Provider = Provider, Pool = Pool>,
{
let Self { provider, pool, network, executor, consensus, evm_config, .. } = self;
let config = module_config.config.clone().unwrap_or_default();
let mut registry = RpcRegistryInner::new(
provider, pool, network, executor, consensus, config, evm_config, eth,
);
let modules = registry.create_transport_rpc_modules(module_config);
let auth_module = registry.create_auth_module(engine);
(modules, auth_module, registry)
}
/// Converts the builder into a [`RpcRegistryInner`] which can be used to create all
/// components.
///
/// This is useful for getting access to API handlers directly
pub fn into_registry<EthApi>(
self,
config: RpcModuleConfig,
eth: EthApi,
) -> RpcRegistryInner<Provider, Pool, Network, EthApi, EvmConfig, Consensus>
where
EthApi: EthApiTypes + 'static,
{
let Self { provider, pool, network, executor, consensus, evm_config, .. } = self;
RpcRegistryInner::new(provider, pool, network, executor, consensus, config, evm_config, eth)
}
/// Configures all [`RpcModule`]s specific to the given [`TransportRpcModuleConfig`] which can
/// be used to start the transport server(s).
pub fn build<EthApi>(
self,
module_config: TransportRpcModuleConfig,
eth: EthApi,
) -> TransportRpcModules<()>
where
EthApi: FullEthApiServer<Provider = Provider, Pool = Pool>,
{
let mut modules = TransportRpcModules::default();
let Self { provider, pool, network, executor, consensus, evm_config, .. } = self;
if !module_config.is_empty() {
let TransportRpcModuleConfig { http, ws, ipc, config } = module_config.clone();
let mut registry = RpcRegistryInner::new(
provider,
pool,
network,
executor,
consensus,
config.unwrap_or_default(),
evm_config,
eth,
);
modules.config = module_config;
modules.http = registry.maybe_module(http.as_ref());
modules.ws = registry.maybe_module(ws.as_ref());
modules.ipc = registry.maybe_module(ipc.as_ref());
}
modules
}
}
impl<N: NodePrimitives> Default for RpcModuleBuilder<N, (), (), (), (), ()> {
fn default() -> Self {
Self::new((), (), (), Box::new(TokioTaskExecutor::default()), (), ())
}
}
/// Bundles settings for modules
#[derive(Debug, Default, Clone, Eq, PartialEq, Serialize, Deserialize)]
pub struct RpcModuleConfig {
/// `eth` namespace settings
eth: EthConfig,
/// `flashbots` namespace settings
flashbots: ValidationApiConfig,
}
// === impl RpcModuleConfig ===
impl RpcModuleConfig {
/// Convenience method to create a new [`RpcModuleConfigBuilder`]
pub fn builder() -> RpcModuleConfigBuilder {
RpcModuleConfigBuilder::default()
}
/// Returns a new RPC module config given the eth namespace config
pub const fn new(eth: EthConfig, flashbots: ValidationApiConfig) -> Self {
Self { eth, flashbots }
}
/// Get a reference to the eth namespace config
pub const fn eth(&self) -> &EthConfig {
&self.eth
}
/// Get a mutable reference to the eth namespace config
pub const fn eth_mut(&mut self) -> &mut EthConfig {
&mut self.eth
}
}
/// Configures [`RpcModuleConfig`]
#[derive(Clone, Debug, Default)]
pub struct RpcModuleConfigBuilder {
eth: Option<EthConfig>,
flashbots: Option<ValidationApiConfig>,
}
// === impl RpcModuleConfigBuilder ===
impl RpcModuleConfigBuilder {
/// Configures a custom eth namespace config
pub fn eth(mut self, eth: EthConfig) -> Self {
self.eth = Some(eth);
self
}
/// Configures a custom flashbots namespace config
pub fn flashbots(mut self, flashbots: ValidationApiConfig) -> Self {
self.flashbots = Some(flashbots);
self
}
/// Consumes the type and creates the [`RpcModuleConfig`]
pub fn build(self) -> RpcModuleConfig {
let Self { eth, flashbots } = self;
RpcModuleConfig { eth: eth.unwrap_or_default(), flashbots: flashbots.unwrap_or_default() }
}
/// Get a reference to the eth namespace config, if any
pub const fn get_eth(&self) -> Option<&EthConfig> {
self.eth.as_ref()
}
/// Get a mutable reference to the eth namespace config, if any
pub const fn eth_mut(&mut self) -> &mut Option<EthConfig> {
&mut self.eth
}
/// Get the eth namespace config, creating a default if none is set
pub fn eth_mut_or_default(&mut self) -> &mut EthConfig {
self.eth.get_or_insert_with(EthConfig::default)
}
}
/// A Helper type the holds instances of the configured modules.
#[derive(Debug, Clone)]
#[expect(dead_code)] // Consensus generic, might be useful in the future
pub struct RpcRegistryInner<
Provider: BlockReader,
Pool,
Network,
EthApi: EthApiTypes,
EvmConfig,
Consensus,
> {
provider: Provider,
pool: Pool,
network: Network,
executor: Box<dyn TaskSpawner + 'static>,
evm_config: EvmConfig,
consensus: Consensus,
/// Holds all `eth_` namespace handlers
eth: EthHandlers<EthApi>,
/// to put trace calls behind semaphore
blocking_pool_guard: BlockingTaskGuard,
/// Contains the [Methods] of a module
modules: HashMap<RethRpcModule, Methods>,
/// eth config settings
eth_config: EthConfig,
}
// === impl RpcRegistryInner ===
impl<N, Provider, Pool, Network, EthApi, EvmConfig, Consensus>
RpcRegistryInner<Provider, Pool, Network, EthApi, EvmConfig, Consensus>
where
N: NodePrimitives,
Provider: StateProviderFactory
+ CanonStateSubscriptions<Primitives = N>
+ BlockReader<Block = N::Block, Receipt = N::Receipt>
+ Clone
+ Unpin
+ 'static,
Pool: Send + Sync + Clone + 'static,
Network: Clone + 'static,
EthApi: EthApiTypes + 'static,
EvmConfig: ConfigureEvm<Primitives = N>,
{
/// Creates a new, empty instance.
#[expect(clippy::too_many_arguments)]
pub fn new(
provider: Provider,
pool: Pool,
network: Network,
executor: Box<dyn TaskSpawner + 'static>,
consensus: Consensus,
config: RpcModuleConfig,
evm_config: EvmConfig,
eth_api: EthApi,
) -> Self
where
EvmConfig: ConfigureEvm<Primitives = N>,
{
let blocking_pool_guard = BlockingTaskGuard::new(config.eth.max_tracing_requests);
let eth = EthHandlers::bootstrap(config.eth.clone(), executor.clone(), eth_api);
Self {
provider,
pool,
network,
eth,
executor,
consensus,
modules: Default::default(),
blocking_pool_guard,
eth_config: config.eth,
evm_config,
}
}
}
impl<Provider, Pool, Network, EthApi, BlockExecutor, Consensus>
RpcRegistryInner<Provider, Pool, Network, EthApi, BlockExecutor, Consensus>
where
Provider: BlockReader,
EthApi: EthApiTypes,
{
/// Returns a reference to the installed [`EthApi`].
pub const fn eth_api(&self) -> &EthApi {
&self.eth.api
}
/// Returns a reference to the installed [`EthHandlers`].
pub const fn eth_handlers(&self) -> &EthHandlers<EthApi> {
&self.eth
}
/// Returns a reference to the pool
pub const fn pool(&self) -> &Pool {
&self.pool
}
/// Returns a reference to the tasks type
pub const fn tasks(&self) -> &(dyn TaskSpawner + 'static) {
&*self.executor
}
/// Returns a reference to the provider
pub const fn provider(&self) -> &Provider {
&self.provider
}
/// Returns all installed methods
pub fn methods(&self) -> Vec<Methods> {
self.modules.values().cloned().collect()
}
/// Returns a merged `RpcModule`
pub fn module(&self) -> RpcModule<()> {
let mut module = RpcModule::new(());
for methods in self.modules.values().cloned() {
module.merge(methods).expect("No conflicts");
}
module
}
}
impl<Provider, Pool, Network, EthApi, EvmConfig, Consensus>
RpcRegistryInner<Provider, Pool, Network, EthApi, EvmConfig, Consensus>
where
Network: NetworkInfo + Clone + 'static,
EthApi: EthApiTypes,
Provider: BlockReader + ChainSpecProvider<ChainSpec: EthereumHardforks>,
EvmConfig: ConfigureEvm,
{
/// Instantiates `AdminApi`
pub fn admin_api(&self) -> AdminApi<Network, Provider::ChainSpec>
where
Network: Peers,
{
AdminApi::new(self.network.clone(), self.provider.chain_spec())
}
/// Instantiates `Web3Api`
pub fn web3_api(&self) -> Web3Api<Network> {
Web3Api::new(self.network.clone())
}
/// Register Admin Namespace
pub fn register_admin(&mut self) -> &mut Self
where
Network: Peers,
{
let adminapi = self.admin_api();
self.modules.insert(RethRpcModule::Admin, adminapi.into_rpc().into());
self
}
/// Register Web3 Namespace
pub fn register_web3(&mut self) -> &mut Self {
let web3api = self.web3_api();
self.modules.insert(RethRpcModule::Web3, web3api.into_rpc().into());
self
}
}
impl<N, Provider, Pool, Network, EthApi, EvmConfig, Consensus>
RpcRegistryInner<Provider, Pool, Network, EthApi, EvmConfig, Consensus>
where
N: NodePrimitives,
Provider: FullRpcProvider<
Header = N::BlockHeader,
Block = N::Block,
Receipt = N::Receipt,
Transaction = N::SignedTx,
> + AccountReader
+ ChangeSetReader
+ CanonStateSubscriptions,
Network: NetworkInfo + Peers + Clone + 'static,
EthApi: EthApiServer<
RpcTxReq<EthApi::NetworkTypes>,
RpcTransaction<EthApi::NetworkTypes>,
RpcBlock<EthApi::NetworkTypes>,
RpcReceipt<EthApi::NetworkTypes>,
RpcHeader<EthApi::NetworkTypes>,
> + EthApiTypes,
EvmConfig: ConfigureEvm<Primitives = N> + 'static,
{
/// Register Eth Namespace
///
/// # Panics
///
/// If called outside of the tokio runtime. See also [`Self::eth_api`]
pub fn register_eth(&mut self) -> &mut Self {
let eth_api = self.eth_api().clone();
self.modules.insert(RethRpcModule::Eth, eth_api.into_rpc().into());
self
}
/// Register Otterscan Namespace
///
/// # Panics
///
/// If called outside of the tokio runtime. See also [`Self::eth_api`]
pub fn register_ots(&mut self) -> &mut Self
where
EthApi: TraceExt + EthTransactions,
{
let otterscan_api = self.otterscan_api();
self.modules.insert(RethRpcModule::Ots, otterscan_api.into_rpc().into());
self
}
/// Register Debug Namespace
///
/// # Panics
///
/// If called outside of the tokio runtime. See also [`Self::eth_api`]
pub fn register_debug(&mut self) -> &mut Self
where
EthApi: EthApiSpec + EthTransactions + TraceExt,
EvmConfig::Primitives: NodePrimitives<Block = ProviderBlock<EthApi::Provider>>,
{
let debug_api = self.debug_api();
self.modules.insert(RethRpcModule::Debug, debug_api.into_rpc().into());
self
}
/// Register Trace Namespace
///
/// # Panics
///
/// If called outside of the tokio runtime. See also [`Self::eth_api`]
pub fn register_trace(&mut self) -> &mut Self
where
EthApi: TraceExt,
{
let trace_api = self.trace_api();
self.modules.insert(RethRpcModule::Trace, trace_api.into_rpc().into());
self
}
/// Register Net Namespace
///
/// See also [`Self::eth_api`]
///
/// # Panics
///
/// If called outside of the tokio runtime.
pub fn register_net(&mut self) -> &mut Self
where
EthApi: EthApiSpec + 'static,
{
let netapi = self.net_api();
self.modules.insert(RethRpcModule::Net, netapi.into_rpc().into());
self
}
/// Register Reth namespace
///
/// See also [`Self::eth_api`]
///
/// # Panics
///
/// If called outside of the tokio runtime.
pub fn register_reth(&mut self) -> &mut Self {
let rethapi = self.reth_api();
self.modules.insert(RethRpcModule::Reth, rethapi.into_rpc().into());
self
}
/// Instantiates `OtterscanApi`
///
/// # Panics
///
/// If called outside of the tokio runtime. See also [`Self::eth_api`]
pub fn otterscan_api(&self) -> OtterscanApi<EthApi> {
let eth_api = self.eth_api().clone();
OtterscanApi::new(eth_api)
}
}
impl<N, Provider, Pool, Network, EthApi, EvmConfig, Consensus>
RpcRegistryInner<Provider, Pool, Network, EthApi, EvmConfig, Consensus>
where
N: NodePrimitives,
Provider: FullRpcProvider<
Block = N::Block,
Header = N::BlockHeader,
Transaction = N::SignedTx,
Receipt = N::Receipt,
> + AccountReader
+ ChangeSetReader,
Network: NetworkInfo + Peers + Clone + 'static,
EthApi: EthApiTypes,
EvmConfig: ConfigureEvm<Primitives = N>,
{
/// Instantiates `TraceApi`
///
/// # Panics
///
/// If called outside of the tokio runtime. See also [`Self::eth_api`]
pub fn trace_api(&self) -> TraceApi<EthApi> {
TraceApi::new(
self.eth_api().clone(),
self.blocking_pool_guard.clone(),
self.eth_config.clone(),
)
}
/// Instantiates [`EthBundle`] Api
///
/// # Panics
///
/// If called outside of the tokio runtime. See also [`Self::eth_api`]
pub fn bundle_api(&self) -> EthBundle<EthApi>
where
EthApi: EthTransactions + LoadPendingBlock + Call,
{
let eth_api = self.eth_api().clone();
EthBundle::new(eth_api, self.blocking_pool_guard.clone())
}
/// Instantiates `DebugApi`
///
/// # Panics
///
/// If called outside of the tokio runtime. See also [`Self::eth_api`]
pub fn debug_api(&self) -> DebugApi<EthApi> {
DebugApi::new(self.eth_api().clone(), self.blocking_pool_guard.clone())
}
/// Instantiates `NetApi`
///
/// # Panics
///
/// If called outside of the tokio runtime. See also [`Self::eth_api`]
pub fn net_api(&self) -> NetApi<Network, EthApi>
where
EthApi: EthApiSpec + 'static,
{
let eth_api = self.eth_api().clone();
NetApi::new(self.network.clone(), eth_api)
}
/// Instantiates `RethApi`
pub fn reth_api(&self) -> RethApi<Provider> {
RethApi::new(self.provider.clone(), self.executor.clone())
}
}
impl<N, Provider, Pool, Network, EthApi, EvmConfig, Consensus>
RpcRegistryInner<Provider, Pool, Network, EthApi, EvmConfig, Consensus>
where
N: NodePrimitives,
Provider: FullRpcProvider<Block = N::Block>
+ CanonStateSubscriptions<Primitives = N>
+ AccountReader
+ ChangeSetReader,
Pool: TransactionPool + 'static,
Network: NetworkInfo + Peers + Clone + 'static,
EthApi: FullEthApiServer,
EvmConfig: ConfigureEvm<Primitives = N> + 'static,
Consensus: FullConsensus<N, Error = ConsensusError> + Clone + 'static,
{
/// Configures the auth module that includes the
/// * `engine_` namespace
/// * `api_` namespace
///
/// Note: This does _not_ register the `engine_` in this registry.
pub fn create_auth_module(&self, engine_api: impl IntoEngineApiRpcModule) -> AuthRpcModule {
let mut module = engine_api.into_rpc_module();
// also merge a subset of `eth_` handlers
let eth_handlers = self.eth_handlers();
let engine_eth = EngineEthApi::new(eth_handlers.api.clone(), eth_handlers.filter.clone());
module.merge(engine_eth.into_rpc()).expect("No conflicting methods");
AuthRpcModule { inner: module }
}
/// Helper function to create a [`RpcModule`] if it's not `None`
fn maybe_module(&mut self, config: Option<&RpcModuleSelection>) -> Option<RpcModule<()>> {
config.map(|config| self.module_for(config))
}
/// Configure a [`TransportRpcModules`] using the current registry. This
/// creates [`RpcModule`] instances for the modules selected by the
/// `config`.
pub fn create_transport_rpc_modules(
&mut self,
config: TransportRpcModuleConfig,
) -> TransportRpcModules<()> {
let mut modules = TransportRpcModules::default();
let http = self.maybe_module(config.http.as_ref());
let ws = self.maybe_module(config.ws.as_ref());
let ipc = self.maybe_module(config.ipc.as_ref());
modules.config = config;
modules.http = http;
modules.ws = ws;
modules.ipc = ipc;
modules
}
/// Populates a new [`RpcModule`] based on the selected [`RethRpcModule`]s in the given
/// [`RpcModuleSelection`]
pub fn module_for(&mut self, config: &RpcModuleSelection) -> RpcModule<()> {
let mut module = RpcModule::new(());
let all_methods = self.reth_methods(config.iter_selection());
for methods in all_methods {
module.merge(methods).expect("No conflicts");
}
module
}
/// Returns the [Methods] for the given [`RethRpcModule`]
///
/// If this is the first time the namespace is requested, a new instance of API implementation
/// will be created.
///
/// # Panics
///
/// If called outside of the tokio runtime. See also [`Self::eth_api`]
pub fn reth_methods(
&mut self,
namespaces: impl Iterator<Item = RethRpcModule>,
) -> Vec<Methods> {
let EthHandlers { api: eth_api, filter: eth_filter, pubsub: eth_pubsub, .. } =
self.eth_handlers().clone();
// Create a copy, so we can list out all the methods for rpc_ api
let namespaces: Vec<_> = namespaces.collect();
namespaces
.iter()
.copied()
.map(|namespace| {
self.modules
.entry(namespace)
.or_insert_with(|| match namespace {
RethRpcModule::Admin => {
AdminApi::new(self.network.clone(), self.provider.chain_spec())
.into_rpc()
.into()
}
RethRpcModule::Debug => {
DebugApi::new(eth_api.clone(), self.blocking_pool_guard.clone())
.into_rpc()
.into()
}
RethRpcModule::Eth => {
// merge all eth handlers
let mut module = eth_api.clone().into_rpc();
module.merge(eth_filter.clone().into_rpc()).expect("No conflicts");
module.merge(eth_pubsub.clone().into_rpc()).expect("No conflicts");
module
.merge(
EthBundle::new(
eth_api.clone(),
self.blocking_pool_guard.clone(),
)
.into_rpc(),
)
.expect("No conflicts");
module.into()
}
RethRpcModule::Net => {
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | true |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-builder/src/rate_limiter.rs | crates/rpc/rpc-builder/src/rate_limiter.rs | //! [`jsonrpsee`] helper layer for rate limiting certain methods.
use jsonrpsee::{server::middleware::rpc::RpcServiceT, types::Request};
use std::{
future::Future,
pin::Pin,
sync::Arc,
task::{ready, Context, Poll},
};
use tokio::sync::{OwnedSemaphorePermit, Semaphore};
use tokio_util::sync::PollSemaphore;
use tower::Layer;
/// Rate limiter for the RPC server.
///
/// Rate limits expensive calls such as debug_ and trace_.
#[derive(Debug, Clone)]
pub struct RpcRequestRateLimiter {
inner: Arc<RpcRequestRateLimiterInner>,
}
impl RpcRequestRateLimiter {
/// Create a new rate limit layer with the given number of permits.
pub fn new(rate_limit: usize) -> Self {
Self {
inner: Arc::new(RpcRequestRateLimiterInner {
call_guard: PollSemaphore::new(Arc::new(Semaphore::new(rate_limit))),
}),
}
}
}
impl<S> Layer<S> for RpcRequestRateLimiter {
type Service = RpcRequestRateLimitingService<S>;
fn layer(&self, inner: S) -> Self::Service {
RpcRequestRateLimitingService::new(inner, self.clone())
}
}
/// Rate Limiter for the RPC server
#[derive(Debug, Clone)]
struct RpcRequestRateLimiterInner {
/// Semaphore to rate limit calls
call_guard: PollSemaphore,
}
/// A [`RpcServiceT`] middleware that rate limits RPC calls to the server.
#[derive(Debug, Clone)]
pub struct RpcRequestRateLimitingService<S> {
/// The rate limiter for RPC requests
rate_limiter: RpcRequestRateLimiter,
/// The inner service being wrapped
inner: S,
}
impl<S> RpcRequestRateLimitingService<S> {
/// Create a new rate limited service.
pub const fn new(service: S, rate_limiter: RpcRequestRateLimiter) -> Self {
Self { inner: service, rate_limiter }
}
}
impl<S> RpcServiceT for RpcRequestRateLimitingService<S>
where
S: RpcServiceT + Send + Sync + Clone + 'static,
{
type MethodResponse = S::MethodResponse;
type NotificationResponse = S::NotificationResponse;
type BatchResponse = S::BatchResponse;
fn call<'a>(&self, req: Request<'a>) -> impl Future<Output = Self::MethodResponse> + Send + 'a {
let method_name = req.method_name();
if method_name.starts_with("trace_") || method_name.starts_with("debug_") {
RateLimitingRequestFuture {
fut: self.inner.call(req),
guard: Some(self.rate_limiter.inner.call_guard.clone()),
permit: None,
}
} else {
// if we don't need to rate limit, then there
// is no need to get a semaphore permit
RateLimitingRequestFuture { fut: self.inner.call(req), guard: None, permit: None }
}
}
fn batch<'a>(
&self,
requests: jsonrpsee::core::middleware::Batch<'a>,
) -> impl Future<Output = Self::BatchResponse> + Send + 'a {
self.inner.batch(requests)
}
fn notification<'a>(
&self,
n: jsonrpsee::core::middleware::Notification<'a>,
) -> impl Future<Output = Self::NotificationResponse> + Send + 'a {
self.inner.notification(n)
}
}
/// Response future.
#[pin_project::pin_project]
pub struct RateLimitingRequestFuture<F> {
#[pin]
fut: F,
guard: Option<PollSemaphore>,
permit: Option<OwnedSemaphorePermit>,
}
impl<F> std::fmt::Debug for RateLimitingRequestFuture<F> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("RateLimitingRequestFuture")
}
}
impl<F: Future> Future for RateLimitingRequestFuture<F> {
type Output = F::Output;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
if let Some(guard) = this.guard.as_mut() {
*this.permit = ready!(guard.poll_acquire(cx));
*this.guard = None;
}
let res = this.fut.poll(cx);
if res.is_ready() {
*this.permit = None;
}
res
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-builder/src/eth.rs | crates/rpc/rpc-builder/src/eth.rs | use reth_rpc::{EthFilter, EthPubSub};
use reth_rpc_eth_api::EthApiTypes;
use reth_rpc_eth_types::EthConfig;
use reth_tasks::TaskSpawner;
/// Handlers for core, filter and pubsub `eth` namespace APIs.
#[derive(Debug, Clone)]
pub struct EthHandlers<EthApi: EthApiTypes> {
/// Main `eth_` request handler
pub api: EthApi,
/// Polling based filter handler available on all transports
pub filter: EthFilter<EthApi>,
/// Handler for subscriptions only available for transports that support it (ws, ipc)
pub pubsub: EthPubSub<EthApi>,
}
impl<EthApi> EthHandlers<EthApi>
where
EthApi: EthApiTypes + 'static,
{
/// Returns a new instance with the additional handlers for the `eth` namespace.
///
/// This will spawn all necessary tasks for the additional handlers.
pub fn bootstrap(
config: EthConfig,
executor: Box<dyn TaskSpawner + 'static>,
eth_api: EthApi,
) -> Self {
let filter = EthFilter::new(eth_api.clone(), config.filter_config(), executor.clone());
let pubsub = EthPubSub::with_spawner(eth_api.clone(), executor);
Self { api: eth_api, filter, pubsub }
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-builder/src/middleware.rs | crates/rpc/rpc-builder/src/middleware.rs | use jsonrpsee::server::middleware::rpc::RpcService;
use tower::Layer;
/// A Helper alias trait for the RPC middleware supported by the server.
pub trait RethRpcMiddleware:
Layer<
RpcService,
Service: jsonrpsee::server::middleware::rpc::RpcServiceT<
MethodResponse = jsonrpsee::MethodResponse,
BatchResponse = jsonrpsee::MethodResponse,
NotificationResponse = jsonrpsee::MethodResponse,
> + Send
+ Sync
+ Clone
+ 'static,
> + Clone
+ Send
+ 'static
{
}
impl<T> RethRpcMiddleware for T where
T: Layer<
RpcService,
Service: jsonrpsee::server::middleware::rpc::RpcServiceT<
MethodResponse = jsonrpsee::MethodResponse,
BatchResponse = jsonrpsee::MethodResponse,
NotificationResponse = jsonrpsee::MethodResponse,
> + Send
+ Sync
+ Clone
+ 'static,
> + Clone
+ Send
+ 'static
{
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-builder/src/error.rs | crates/rpc/rpc-builder/src/error.rs | use crate::{cors::CorsDomainError, RethRpcModule};
use reth_ipc::server::IpcServerStartError;
use std::{
collections::HashSet,
io::{self, ErrorKind},
net::SocketAddr,
};
/// Rpc server kind.
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub enum ServerKind {
/// Http.
Http(SocketAddr),
/// Websocket.
WS(SocketAddr),
/// WS and http on the same port
WsHttp(SocketAddr),
/// Auth.
Auth(SocketAddr),
}
impl ServerKind {
/// Returns the appropriate flags for each variant.
pub const fn flags(&self) -> &'static str {
match self {
Self::Http(_) => "--http.port",
Self::WS(_) => "--ws.port",
Self::WsHttp(_) => "--ws.port and --http.port",
Self::Auth(_) => "--authrpc.port",
}
}
}
impl std::fmt::Display for ServerKind {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Http(addr) => write!(f, "{addr} (HTTP-RPC server)"),
Self::WS(addr) => write!(f, "{addr} (WS-RPC server)"),
Self::WsHttp(addr) => write!(f, "{addr} (WS-HTTP-RPC server)"),
Self::Auth(addr) => write!(f, "{addr} (AUTH server)"),
}
}
}
/// Rpc Server related errors
#[derive(Debug, thiserror::Error)]
pub enum RpcError {
/// Thrown during server start.
#[error("Failed to start {kind} server: {error}")]
ServerError {
/// Server kind.
kind: ServerKind,
/// IO error.
error: io::Error,
},
/// Address already in use.
#[error("address {kind} is already in use (os error 98). Choose a different port using {}", kind.flags())]
AddressAlreadyInUse {
/// Server kind.
kind: ServerKind,
/// IO error.
error: io::Error,
},
/// Cors parsing error.
#[error(transparent)]
Cors(#[from] CorsDomainError),
/// Http and WS server configured on the same port but with conflicting settings.
#[error(transparent)]
WsHttpSamePortError(#[from] WsHttpSamePortError),
/// Thrown when IPC server fails to start.
#[error(transparent)]
IpcServerError(#[from] IpcServerStartError),
/// Custom error.
#[error("{0}")]
Custom(String),
}
impl RpcError {
/// Converts an [`io::Error`] to a more descriptive `RpcError`.
pub fn server_error(io_error: io::Error, kind: ServerKind) -> Self {
if io_error.kind() == ErrorKind::AddrInUse {
return Self::AddressAlreadyInUse { kind, error: io_error }
}
Self::ServerError { kind, error: io_error }
}
}
/// Conflicting modules between http and ws servers.
#[derive(Debug)]
pub struct ConflictingModules {
/// Modules present in both http and ws.
pub overlap: HashSet<RethRpcModule>,
/// Modules present in http but not in ws.
pub http_not_ws: HashSet<RethRpcModule>,
/// Modules present in ws but not in http.
pub ws_not_http: HashSet<RethRpcModule>,
}
impl std::fmt::Display for ConflictingModules {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"different API modules for HTTP and WS on the same port is currently not supported: \
Overlap: {:?}, \
HTTP modules not present in WS: {:?} \
WS modules not present in HTTP: {:?}
",
self.overlap, self.http_not_ws, self.ws_not_http
)
}
}
/// Errors when trying to launch ws and http server on the same port.
#[derive(Debug, thiserror::Error)]
pub enum WsHttpSamePortError {
/// Ws and http server configured on same port but with different cors domains.
#[error(
"CORS domains for HTTP and WS are different, but they are on the same port: \
HTTP: {http_cors_domains:?}, WS: {ws_cors_domains:?}"
)]
ConflictingCorsDomains {
/// Http cors domains.
http_cors_domains: Option<String>,
/// Ws cors domains.
ws_cors_domains: Option<String>,
},
/// Ws and http server configured on same port but with different modules.
#[error("{0}")]
ConflictingModules(Box<ConflictingModules>),
}
#[cfg(test)]
mod tests {
use super::*;
use std::net::{Ipv4Addr, SocketAddrV4};
#[test]
fn test_address_in_use_message() {
let addr = SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 1234));
let kinds = [
ServerKind::Http(addr),
ServerKind::WS(addr),
ServerKind::WsHttp(addr),
ServerKind::Auth(addr),
];
for kind in &kinds {
let err = RpcError::AddressAlreadyInUse {
kind: *kind,
error: io::Error::from(ErrorKind::AddrInUse),
};
assert!(err.to_string().contains(kind.flags()));
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-builder/src/cors.rs | crates/rpc/rpc-builder/src/cors.rs | use http::{HeaderValue, Method};
use tower_http::cors::{AllowOrigin, Any, CorsLayer};
/// Error thrown when parsing cors domains went wrong
#[derive(Debug, thiserror::Error)]
pub enum CorsDomainError {
/// Represents an invalid header value for a domain
#[error("{domain} is an invalid header value")]
InvalidHeader {
/// The domain that caused the invalid header
domain: String,
},
/// Indicates that a wildcard origin was used incorrectly in a list
#[error("wildcard origin (`*`) cannot be passed as part of a list: {input}")]
WildCardNotAllowed {
/// The input string containing the incorrectly used wildcard
input: String,
},
}
/// Creates a [`CorsLayer`] from the given domains
pub(crate) fn create_cors_layer(http_cors_domains: &str) -> Result<CorsLayer, CorsDomainError> {
let cors = match http_cors_domains.trim() {
"*" => CorsLayer::new()
.allow_methods([Method::GET, Method::POST])
.allow_origin(Any)
.allow_headers(Any),
_ => {
let iter = http_cors_domains.split(',');
if iter.clone().any(|o| o == "*") {
return Err(CorsDomainError::WildCardNotAllowed {
input: http_cors_domains.to_string(),
})
}
let origins = iter
.map(|domain| {
domain
.parse::<HeaderValue>()
.map_err(|_| CorsDomainError::InvalidHeader { domain: domain.to_string() })
})
.collect::<Result<Vec<HeaderValue>, _>>()?;
let origin = AllowOrigin::list(origins);
CorsLayer::new()
.allow_methods([Method::GET, Method::POST])
.allow_origin(origin)
.allow_headers(Any)
}
};
Ok(cors)
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-builder/src/auth.rs | crates/rpc/rpc-builder/src/auth.rs | use crate::{
error::{RpcError, ServerKind},
middleware::RethRpcMiddleware,
};
use http::header::AUTHORIZATION;
use jsonrpsee::{
core::{client::SubscriptionClientT, RegisterMethodError},
http_client::HeaderMap,
server::{AlreadyStoppedError, RpcModule},
ws_client::RpcServiceBuilder,
Methods,
};
use reth_rpc_api::servers::*;
use reth_rpc_eth_types::EthSubscriptionIdProvider;
use reth_rpc_layer::{
secret_to_bearer_header, AuthClientLayer, AuthLayer, JwtAuthValidator, JwtSecret,
};
use reth_rpc_server_types::constants;
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use tower::layer::util::Identity;
pub use jsonrpsee::server::ServerBuilder;
use jsonrpsee::server::{ServerConfig, ServerConfigBuilder};
pub use reth_ipc::server::Builder as IpcServerBuilder;
/// Server configuration for the auth server.
#[derive(Debug)]
pub struct AuthServerConfig<RpcMiddleware = Identity> {
/// Where the server should listen.
pub(crate) socket_addr: SocketAddr,
/// The secret for the auth layer of the server.
pub(crate) secret: JwtSecret,
/// Configs for JSON-RPC Http.
pub(crate) server_config: ServerConfigBuilder,
/// Configs for IPC server
pub(crate) ipc_server_config: Option<IpcServerBuilder<Identity, Identity>>,
/// IPC endpoint
pub(crate) ipc_endpoint: Option<String>,
/// Configurable RPC middleware
pub(crate) rpc_middleware: RpcMiddleware,
}
// === impl AuthServerConfig ===
impl AuthServerConfig {
/// Convenience function to create a new `AuthServerConfig`.
pub const fn builder(secret: JwtSecret) -> AuthServerConfigBuilder {
AuthServerConfigBuilder::new(secret)
}
}
impl<RpcMiddleware> AuthServerConfig<RpcMiddleware> {
/// Returns the address the server will listen on.
pub const fn address(&self) -> SocketAddr {
self.socket_addr
}
/// Configures the rpc middleware.
pub fn with_rpc_middleware<T>(self, rpc_middleware: T) -> AuthServerConfig<T> {
let Self { socket_addr, secret, server_config, ipc_server_config, ipc_endpoint, .. } = self;
AuthServerConfig {
socket_addr,
secret,
server_config,
ipc_server_config,
ipc_endpoint,
rpc_middleware,
}
}
/// Convenience function to start a server in one step.
pub async fn start(self, module: AuthRpcModule) -> Result<AuthServerHandle, RpcError>
where
RpcMiddleware: RethRpcMiddleware,
{
let Self {
socket_addr,
secret,
server_config,
ipc_server_config,
ipc_endpoint,
rpc_middleware,
} = self;
// Create auth middleware.
let middleware =
tower::ServiceBuilder::new().layer(AuthLayer::new(JwtAuthValidator::new(secret)));
let rpc_middleware = RpcServiceBuilder::default().layer(rpc_middleware);
// By default, both http and ws are enabled.
let server = ServerBuilder::new()
.set_config(server_config.build())
.set_http_middleware(middleware)
.set_rpc_middleware(rpc_middleware)
.build(socket_addr)
.await
.map_err(|err| RpcError::server_error(err, ServerKind::Auth(socket_addr)))?;
let local_addr = server
.local_addr()
.map_err(|err| RpcError::server_error(err, ServerKind::Auth(socket_addr)))?;
let handle = server.start(module.inner.clone());
let ipc_handle = if let Some(ipc_server_config) = ipc_server_config {
let ipc_endpoint_str = ipc_endpoint
.clone()
.unwrap_or_else(|| constants::DEFAULT_ENGINE_API_IPC_ENDPOINT.to_string());
let ipc_server = ipc_server_config.build(ipc_endpoint_str);
let res = ipc_server.start(module.inner).await?;
Some(res)
} else {
None
};
Ok(AuthServerHandle { handle: Some(handle), local_addr, secret, ipc_endpoint, ipc_handle })
}
}
/// Builder type for configuring an `AuthServerConfig`.
#[derive(Debug)]
pub struct AuthServerConfigBuilder<RpcMiddleware = Identity> {
socket_addr: Option<SocketAddr>,
secret: JwtSecret,
server_config: Option<ServerConfigBuilder>,
ipc_server_config: Option<IpcServerBuilder<Identity, Identity>>,
ipc_endpoint: Option<String>,
rpc_middleware: RpcMiddleware,
}
// === impl AuthServerConfigBuilder ===
impl AuthServerConfigBuilder {
/// Create a new `AuthServerConfigBuilder` with the given `secret`.
pub const fn new(secret: JwtSecret) -> Self {
Self {
socket_addr: None,
secret,
server_config: None,
ipc_server_config: None,
ipc_endpoint: None,
rpc_middleware: Identity::new(),
}
}
}
impl<RpcMiddleware> AuthServerConfigBuilder<RpcMiddleware> {
/// Configures the rpc middleware.
pub fn with_rpc_middleware<T>(self, rpc_middleware: T) -> AuthServerConfigBuilder<T> {
let Self { socket_addr, secret, server_config, ipc_server_config, ipc_endpoint, .. } = self;
AuthServerConfigBuilder {
socket_addr,
secret,
server_config,
ipc_server_config,
ipc_endpoint,
rpc_middleware,
}
}
/// Set the socket address for the server.
pub const fn socket_addr(mut self, socket_addr: SocketAddr) -> Self {
self.socket_addr = Some(socket_addr);
self
}
/// Set the socket address for the server.
pub const fn maybe_socket_addr(mut self, socket_addr: Option<SocketAddr>) -> Self {
self.socket_addr = socket_addr;
self
}
/// Set the secret for the server.
pub const fn secret(mut self, secret: JwtSecret) -> Self {
self.secret = secret;
self
}
/// Configures the JSON-RPC server
///
/// Note: this always configures an [`EthSubscriptionIdProvider`]
/// [`IdProvider`](jsonrpsee::server::IdProvider) for convenience.
pub fn with_server_config(mut self, config: ServerConfigBuilder) -> Self {
self.server_config = Some(config.set_id_provider(EthSubscriptionIdProvider::default()));
self
}
/// Set the ipc endpoint for the server.
pub fn ipc_endpoint(mut self, ipc_endpoint: String) -> Self {
self.ipc_endpoint = Some(ipc_endpoint);
self
}
/// Configures the IPC server
///
/// Note: this always configures an [`EthSubscriptionIdProvider`]
pub fn with_ipc_config(mut self, config: IpcServerBuilder<Identity, Identity>) -> Self {
self.ipc_server_config = Some(config.set_id_provider(EthSubscriptionIdProvider::default()));
self
}
/// Build the `AuthServerConfig`.
pub fn build(self) -> AuthServerConfig<RpcMiddleware> {
AuthServerConfig {
socket_addr: self.socket_addr.unwrap_or_else(|| {
SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), constants::DEFAULT_AUTH_PORT)
}),
secret: self.secret,
server_config: self.server_config.unwrap_or_else(|| {
ServerConfig::builder()
// This needs to large enough to handle large eth_getLogs responses and
// maximum payload bodies limit for
// `engine_getPayloadBodiesByRangeV` ~750MB per
// response should be enough
.max_response_body_size(750 * 1024 * 1024)
// Connections to this server are always authenticated, hence this only
// affects connections from the CL or any other
// client that uses JWT, this should be
// more than enough so that the CL (or multiple CL nodes) will never get
// rate limited
.max_connections(500)
// bump the default request size slightly, there aren't any methods exposed
// with dynamic request params that can exceed this
.max_request_body_size(128 * 1024 * 1024)
.set_id_provider(EthSubscriptionIdProvider::default())
}),
ipc_server_config: self.ipc_server_config.map(|ipc_server_config| {
ipc_server_config
.max_response_body_size(750 * 1024 * 1024)
.max_connections(500)
.max_request_body_size(128 * 1024 * 1024)
.set_id_provider(EthSubscriptionIdProvider::default())
}),
ipc_endpoint: self.ipc_endpoint,
rpc_middleware: self.rpc_middleware,
}
}
}
/// Holds installed modules for the auth server.
#[derive(Debug, Clone)]
pub struct AuthRpcModule {
pub(crate) inner: RpcModule<()>,
}
impl AuthRpcModule {
/// Create a new `AuthRpcModule` with the given `engine_api`.
pub fn new(engine: impl IntoEngineApiRpcModule) -> Self {
Self { inner: engine.into_rpc_module() }
}
/// Get a reference to the inner `RpcModule`.
pub const fn module_mut(&mut self) -> &mut RpcModule<()> {
&mut self.inner
}
/// Merge the given [Methods] in the configured authenticated methods.
///
/// Fails if any of the methods in other is present already.
pub fn merge_auth_methods(
&mut self,
other: impl Into<Methods>,
) -> Result<bool, RegisterMethodError> {
self.module_mut().merge(other.into()).map(|_| true)
}
/// Removes the method with the given name from the configured authenticated methods.
///
/// Returns `true` if the method was found and removed, `false` otherwise.
pub fn remove_auth_method(&mut self, method_name: &'static str) -> bool {
self.module_mut().remove_method(method_name).is_some()
}
/// Removes the given methods from the configured authenticated methods.
pub fn remove_auth_methods(&mut self, methods: impl IntoIterator<Item = &'static str>) {
for name in methods {
self.remove_auth_method(name);
}
}
/// Replace the given [Methods] in the configured authenticated methods.
pub fn replace_auth_methods(
&mut self,
other: impl Into<Methods>,
) -> Result<bool, RegisterMethodError> {
let other = other.into();
self.remove_auth_methods(other.method_names());
self.merge_auth_methods(other)
}
/// Convenience function for starting a server
pub async fn start_server(
self,
config: AuthServerConfig,
) -> Result<AuthServerHandle, RpcError> {
config.start(self).await
}
}
/// A handle to the spawned auth server.
///
/// When this type is dropped or [`AuthServerHandle::stop`] has been called the server will be
/// stopped.
#[derive(Clone, Debug)]
#[must_use = "Server stops if dropped"]
pub struct AuthServerHandle {
local_addr: SocketAddr,
handle: Option<jsonrpsee::server::ServerHandle>,
secret: JwtSecret,
ipc_endpoint: Option<String>,
ipc_handle: Option<jsonrpsee::server::ServerHandle>,
}
// === impl AuthServerHandle ===
impl AuthServerHandle {
/// Creates a new handle that isn't connected to any server.
///
/// This can be used to satisfy types that require an engine API.
pub fn noop() -> Self {
Self {
local_addr: SocketAddr::new(
IpAddr::V4(Ipv4Addr::LOCALHOST),
constants::DEFAULT_AUTH_PORT,
),
handle: None,
secret: JwtSecret::random(),
ipc_endpoint: None,
ipc_handle: None,
}
}
/// Returns the [`SocketAddr`] of the http server if started.
pub const fn local_addr(&self) -> SocketAddr {
self.local_addr
}
/// Tell the server to stop without waiting for the server to stop.
pub fn stop(self) -> Result<(), AlreadyStoppedError> {
let Some(handle) = self.handle else { return Ok(()) };
handle.stop()
}
/// Returns the url to the http server
pub fn http_url(&self) -> String {
format!("http://{}", self.local_addr)
}
/// Returns the url to the ws server
pub fn ws_url(&self) -> String {
format!("ws://{}", self.local_addr)
}
/// Returns a http client connected to the server.
///
/// This client uses the JWT token to authenticate requests.
pub fn http_client(&self) -> impl SubscriptionClientT + Clone + Send + Sync + Unpin + 'static {
// Create a middleware that adds a new JWT token to every request.
let secret_layer = AuthClientLayer::new(self.secret);
let middleware = tower::ServiceBuilder::default().layer(secret_layer);
jsonrpsee::http_client::HttpClientBuilder::default()
.set_http_middleware(middleware)
.build(self.http_url())
.expect("Failed to create http client")
}
/// Returns a ws client connected to the server. Note that the connection can only be
/// be established within 1 minute due to the JWT token expiration.
pub async fn ws_client(&self) -> jsonrpsee::ws_client::WsClient {
jsonrpsee::ws_client::WsClientBuilder::default()
.set_headers(HeaderMap::from_iter([(
AUTHORIZATION,
secret_to_bearer_header(&self.secret),
)]))
.build(self.ws_url())
.await
.expect("Failed to create ws client")
}
/// Returns an ipc client connected to the server.
#[cfg(unix)]
pub async fn ipc_client(&self) -> Option<jsonrpsee::async_client::Client> {
use reth_ipc::client::IpcClientBuilder;
if let Some(ipc_endpoint) = &self.ipc_endpoint {
return Some(
IpcClientBuilder::default()
.build(ipc_endpoint)
.await
.expect("Failed to create ipc client"),
)
}
None
}
/// Returns an ipc handle
pub fn ipc_handle(&self) -> Option<jsonrpsee::server::ServerHandle> {
self.ipc_handle.clone()
}
/// Return an ipc endpoint
pub fn ipc_endpoint(&self) -> Option<String> {
self.ipc_endpoint.clone()
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-builder/src/metrics.rs | crates/rpc/rpc-builder/src/metrics.rs | use jsonrpsee::{
core::middleware::{Batch, Notification},
server::middleware::rpc::RpcServiceT,
types::Request,
MethodResponse, RpcModule,
};
use reth_metrics::{
metrics::{Counter, Histogram},
Metrics,
};
use std::{
collections::HashMap,
future::Future,
pin::Pin,
sync::Arc,
task::{Context, Poll},
time::Instant,
};
use tower::Layer;
/// Metrics for the RPC server.
///
/// Metrics are divided into two categories:
/// - Connection metrics: metrics for the connection (e.g. number of connections opened, relevant
/// for WS and IPC)
/// - Request metrics: metrics for each RPC method (e.g. number of calls started, time taken to
/// process a call)
#[derive(Default, Debug, Clone)]
pub(crate) struct RpcRequestMetrics {
inner: Arc<RpcServerMetricsInner>,
}
impl RpcRequestMetrics {
pub(crate) fn new(module: &RpcModule<()>, transport: RpcTransport) -> Self {
Self {
inner: Arc::new(RpcServerMetricsInner {
connection_metrics: transport.connection_metrics(),
call_metrics: module
.method_names()
.map(|method| {
(method, RpcServerCallMetrics::new_with_labels(&[("method", method)]))
})
.collect(),
}),
}
}
/// Creates a new instance of the metrics layer for HTTP.
pub(crate) fn http(module: &RpcModule<()>) -> Self {
Self::new(module, RpcTransport::Http)
}
/// Creates a new instance of the metrics layer for same port.
///
/// Note: currently it's not possible to track transport specific metrics for a server that runs http and ws on the same port: <https://github.com/paritytech/jsonrpsee/issues/1345> until we have this feature we will use the http metrics for this case.
pub(crate) fn same_port(module: &RpcModule<()>) -> Self {
Self::http(module)
}
/// Creates a new instance of the metrics layer for Ws.
pub(crate) fn ws(module: &RpcModule<()>) -> Self {
Self::new(module, RpcTransport::WebSocket)
}
/// Creates a new instance of the metrics layer for Ws.
pub(crate) fn ipc(module: &RpcModule<()>) -> Self {
Self::new(module, RpcTransport::Ipc)
}
}
impl<S> Layer<S> for RpcRequestMetrics {
type Service = RpcRequestMetricsService<S>;
fn layer(&self, inner: S) -> Self::Service {
RpcRequestMetricsService::new(inner, self.clone())
}
}
/// Metrics for the RPC server
#[derive(Default, Clone, Debug)]
struct RpcServerMetricsInner {
/// Connection metrics per transport type
connection_metrics: RpcServerConnectionMetrics,
/// Call metrics per RPC method
call_metrics: HashMap<&'static str, RpcServerCallMetrics>,
}
/// A [`RpcServiceT`] middleware that captures RPC metrics for the server.
///
/// This is created per connection and captures metrics for each request.
#[derive(Clone, Debug)]
pub struct RpcRequestMetricsService<S> {
/// The metrics collector for RPC requests
metrics: RpcRequestMetrics,
/// The inner service being wrapped
inner: S,
}
impl<S> RpcRequestMetricsService<S> {
pub(crate) fn new(service: S, metrics: RpcRequestMetrics) -> Self {
// this instance is kept alive for the duration of the connection
metrics.inner.connection_metrics.connections_opened_total.increment(1);
Self { inner: service, metrics }
}
}
impl<S> RpcServiceT for RpcRequestMetricsService<S>
where
S: RpcServiceT<MethodResponse = MethodResponse> + Send + Sync + Clone + 'static,
{
type MethodResponse = S::MethodResponse;
type NotificationResponse = S::NotificationResponse;
type BatchResponse = S::BatchResponse;
fn call<'a>(&self, req: Request<'a>) -> impl Future<Output = S::MethodResponse> + Send + 'a {
self.metrics.inner.connection_metrics.requests_started_total.increment(1);
let call_metrics = self.metrics.inner.call_metrics.get_key_value(req.method.as_ref());
if let Some((_, call_metrics)) = &call_metrics {
call_metrics.started_total.increment(1);
}
MeteredRequestFuture {
fut: self.inner.call(req),
started_at: Instant::now(),
metrics: self.metrics.clone(),
method: call_metrics.map(|(method, _)| *method),
}
}
fn batch<'a>(&self, req: Batch<'a>) -> impl Future<Output = Self::BatchResponse> + Send + 'a {
self.inner.batch(req)
}
fn notification<'a>(
&self,
n: Notification<'a>,
) -> impl Future<Output = Self::NotificationResponse> + Send + 'a {
self.inner.notification(n)
}
}
impl<S> Drop for RpcRequestMetricsService<S> {
fn drop(&mut self) {
// update connection metrics, connection closed
self.metrics.inner.connection_metrics.connections_closed_total.increment(1);
}
}
/// Response future to update the metrics for a single request/response pair.
#[pin_project::pin_project]
pub struct MeteredRequestFuture<F> {
#[pin]
fut: F,
/// time when the request started
started_at: Instant,
/// metrics for the method call
metrics: RpcRequestMetrics,
/// the method name if known
method: Option<&'static str>,
}
impl<F> std::fmt::Debug for MeteredRequestFuture<F> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("MeteredRequestFuture")
}
}
impl<F: Future<Output = MethodResponse>> Future for MeteredRequestFuture<F> {
type Output = F::Output;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
let res = this.fut.poll(cx);
if let Poll::Ready(resp) = &res {
let elapsed = this.started_at.elapsed().as_secs_f64();
// update transport metrics
this.metrics.inner.connection_metrics.requests_finished_total.increment(1);
this.metrics.inner.connection_metrics.request_time_seconds.record(elapsed);
// update call metrics
if let Some(call_metrics) =
this.method.and_then(|method| this.metrics.inner.call_metrics.get(method))
{
call_metrics.time_seconds.record(elapsed);
if resp.is_success() {
call_metrics.successful_total.increment(1);
} else {
call_metrics.failed_total.increment(1);
}
}
}
res
}
}
/// The transport protocol used for the RPC connection.
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub(crate) enum RpcTransport {
Http,
WebSocket,
Ipc,
}
impl RpcTransport {
/// Returns the string representation of the transport protocol.
pub(crate) const fn as_str(&self) -> &'static str {
match self {
Self::Http => "http",
Self::WebSocket => "ws",
Self::Ipc => "ipc",
}
}
/// Returns the connection metrics for the transport protocol.
fn connection_metrics(&self) -> RpcServerConnectionMetrics {
RpcServerConnectionMetrics::new_with_labels(&[("transport", self.as_str())])
}
}
/// Metrics for the RPC connections
#[derive(Metrics, Clone)]
#[metrics(scope = "rpc_server.connections")]
struct RpcServerConnectionMetrics {
/// The number of connections opened
connections_opened_total: Counter,
/// The number of connections closed
connections_closed_total: Counter,
/// The number of requests started
requests_started_total: Counter,
/// The number of requests finished
requests_finished_total: Counter,
/// Response for a single request/response pair
request_time_seconds: Histogram,
}
/// Metrics for the RPC calls
#[derive(Metrics, Clone)]
#[metrics(scope = "rpc_server.calls")]
struct RpcServerCallMetrics {
/// The number of calls started
started_total: Counter,
/// The number of successful calls
successful_total: Counter,
/// The number of failed calls
failed_total: Counter,
/// Response for a single call
time_seconds: Histogram,
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-builder/tests/it/serde.rs | crates/rpc/rpc-builder/tests/it/serde.rs | //! various serde test
use crate::utils::launch_http;
use alloy_primitives::U256;
use jsonrpsee::{
core::{client::ClientT, traits::ToRpcParams},
types::Request,
};
use reth_rpc_server_types::RethRpcModule;
use serde_json::value::RawValue;
struct RawRpcParams(Box<RawValue>);
impl ToRpcParams for RawRpcParams {
fn to_rpc_params(self) -> Result<Option<Box<RawValue>>, serde_json::Error> {
Ok(Some(self.0))
}
}
#[tokio::test(flavor = "multi_thread")]
async fn test_eth_balance_serde() {
let handle = launch_http(vec![RethRpcModule::Eth]).await;
let s = r#"{"jsonrpc":"2.0","id":1,"method":"eth_getBalance","params":["0xaa00000000000000000000000000000000000000","0x898753d8fdd8d92c1907ca21e68c7970abd290c647a202091181deec3f30a0b2"]}"#;
let req: Request<'_> = serde_json::from_str(s).unwrap();
let client = handle.http_client().unwrap();
let params = RawRpcParams(RawValue::from_string(req.params.unwrap().to_string()).unwrap());
client.request::<U256, _>("eth_getBalance", params).await.unwrap();
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-builder/tests/it/http.rs | crates/rpc/rpc-builder/tests/it/http.rs | #![allow(unreachable_pub)]
//! Standalone http tests
use crate::utils::{launch_http, launch_http_ws, launch_ws};
use alloy_eips::{eip1898::LenientBlockNumberOrTag, BlockId, BlockNumberOrTag};
use alloy_primitives::{hex_literal::hex, Address, Bytes, TxHash, B256, B64, U256, U64};
use alloy_rpc_types_eth::{
transaction::TransactionRequest, Block, FeeHistory, Filter, Header, Index, Log,
PendingTransactionFilterKind, SyncStatus, Transaction, TransactionReceipt,
};
use alloy_rpc_types_trace::filter::TraceFilter;
use jsonrpsee::{
core::{
client::{ClientT, SubscriptionClientT},
params::ArrayParams,
},
http_client::HttpClient,
rpc_params,
types::error::ErrorCode,
};
use reth_ethereum_primitives::Receipt;
use reth_network_peers::NodeRecord;
use reth_rpc_api::{
clients::{AdminApiClient, EthApiClient},
DebugApiClient, EthCallBundleApiClient, EthFilterApiClient, NetApiClient, OtterscanClient,
TraceApiClient, Web3ApiClient,
};
use reth_rpc_server_types::RethRpcModule;
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use serde_json::Value;
use std::collections::HashSet;
fn is_unimplemented(err: jsonrpsee::core::client::Error) -> bool {
match err {
jsonrpsee::core::client::Error::Call(error_obj) => {
error_obj.code() == ErrorCode::InternalError.code() &&
error_obj.message() == "unimplemented"
}
_ => false,
}
}
async fn test_rpc_call_ok<R>(client: &HttpClient, method_name: &str, params: ArrayParams)
where
R: DeserializeOwned,
{
// Make the RPC request
match client.request::<R, _>(method_name, params).await {
Ok(_) => {} // If the request is successful, do nothing
Err(e) => {
// If an error occurs, panic with the error message
panic!("Expected successful response, got error: {e:?}");
}
}
}
async fn test_rpc_call_err<R>(client: &HttpClient, method_name: &str, params: ArrayParams)
where
R: DeserializeOwned + std::fmt::Debug,
{
// Make the RPC request
if let Ok(resp) = client.request::<R, _>(method_name, params).await {
// Panic if an unexpected successful response is received
panic!("Expected error response, got successful response: {resp:?}");
};
}
/// Represents a builder for creating JSON-RPC requests.
#[derive(Clone, Serialize, Deserialize)]
pub struct RawRpcParamsBuilder {
method: Option<String>,
params: Vec<Value>,
id: i32,
}
impl RawRpcParamsBuilder {
/// Sets the method name for the JSON-RPC request.
pub fn method(mut self, method: impl Into<String>) -> Self {
self.method = Some(method.into());
self
}
/// Adds a parameter to the JSON-RPC request.
pub fn add_param<S: Serialize>(mut self, param: S) -> Self {
self.params.push(serde_json::to_value(param).expect("Failed to serialize parameter"));
self
}
/// Sets the ID for the JSON-RPC request.
pub const fn set_id(mut self, id: i32) -> Self {
self.id = id;
self
}
/// Constructs the JSON-RPC request string based on the provided configurations.
pub fn build(self) -> String {
let Self { method, params, id } = self;
let method = method.unwrap_or_else(|| panic!("JSON-RPC method not set"));
let params: Vec<String> = params.into_iter().map(|p| p.to_string()).collect();
format!(
r#"{{"jsonrpc":"2.0","id":{},"method":"{}","params":[{}]}}"#,
id,
method,
params.join(",")
)
}
}
impl Default for RawRpcParamsBuilder {
fn default() -> Self {
Self { method: None, params: Vec::new(), id: 1 }
}
}
async fn test_filter_calls<C>(client: &C)
where
C: ClientT + SubscriptionClientT + Sync,
{
EthFilterApiClient::<Transaction>::new_filter(client, Filter::default()).await.unwrap();
EthFilterApiClient::<Transaction>::new_pending_transaction_filter(client, None).await.unwrap();
EthFilterApiClient::<Transaction>::new_pending_transaction_filter(
client,
Some(PendingTransactionFilterKind::Full),
)
.await
.unwrap();
let id = EthFilterApiClient::<Transaction>::new_block_filter(client).await.unwrap();
EthFilterApiClient::<Transaction>::filter_changes(client, id.clone()).await.unwrap();
EthFilterApiClient::<Transaction>::logs(client, Filter::default()).await.unwrap();
let id =
EthFilterApiClient::<Transaction>::new_filter(client, Filter::default()).await.unwrap();
EthFilterApiClient::<Transaction>::filter_logs(client, id.clone()).await.unwrap();
EthFilterApiClient::<Transaction>::uninstall_filter(client, id).await.unwrap();
}
async fn test_basic_admin_calls<C>(client: &C)
where
C: ClientT + SubscriptionClientT + Sync,
{
let url = "enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301";
let node: NodeRecord = url.parse().unwrap();
AdminApiClient::add_peer(client, node).await.unwrap();
AdminApiClient::remove_peer(client, node.into()).await.unwrap();
AdminApiClient::add_trusted_peer(client, node.into()).await.unwrap();
AdminApiClient::remove_trusted_peer(client, node.into()).await.unwrap();
AdminApiClient::node_info(client).await.unwrap();
}
async fn test_basic_eth_calls<C>(client: &C)
where
C: ClientT + SubscriptionClientT + Sync,
{
let address = Address::default();
let index = Index::default();
let hash = B256::default();
let tx_hash = TxHash::default();
let block_number = BlockNumberOrTag::default();
let call_request = TransactionRequest::default();
let transaction_request = TransactionRequest::default();
let bytes = Bytes::default();
let tx = Bytes::from(hex!(
"02f871018303579880850555633d1b82520894eee27662c2b8eba3cd936a23f039f3189633e4c887ad591c62bdaeb180c080a07ea72c68abfb8fca1bd964f0f99132ed9280261bdca3e549546c0205e800f7d0a05b4ef3039e9c9b9babc179a1878fb825b5aaf5aed2fa8744854150157b08d6f3"
));
let typed_data = serde_json::from_str(
r#"{
"types": {
"EIP712Domain": []
},
"primaryType": "EIP712Domain",
"domain": {},
"message": {}
}"#,
)
.unwrap();
// Implemented
EthApiClient::<TransactionRequest, Transaction, Block, Receipt, Header>::protocol_version(
client,
)
.await
.unwrap();
EthApiClient::<TransactionRequest, Transaction, Block, Receipt, Header>::chain_id(client)
.await
.unwrap();
EthApiClient::<TransactionRequest, Transaction, Block, Receipt, Header>::accounts(client)
.await
.unwrap();
EthApiClient::<TransactionRequest, Transaction, Block, Receipt, Header>::get_account(
client,
address,
block_number.into(),
)
.await
.unwrap();
EthApiClient::<TransactionRequest, Transaction, Block, Receipt, Header>::block_number(client)
.await
.unwrap();
EthApiClient::<TransactionRequest, Transaction, Block, Receipt, Header>::get_code(
client, address, None,
)
.await
.unwrap();
EthApiClient::<TransactionRequest, Transaction, Block, Receipt, Header>::send_raw_transaction(
client, tx,
)
.await
.unwrap();
EthApiClient::<TransactionRequest, Transaction, Block, Receipt, Header>::fee_history(
client,
U64::from(0),
block_number,
None,
)
.await
.unwrap();
EthApiClient::<TransactionRequest, Transaction, Block, Receipt, Header>::balance(
client, address, None,
)
.await
.unwrap();
EthApiClient::<TransactionRequest, Transaction, Block, Receipt, Header>::transaction_count(
client, address, None,
)
.await
.unwrap();
EthApiClient::<TransactionRequest, Transaction, Block, Receipt, Header>::storage_at(
client,
address,
U256::default().into(),
None,
)
.await
.unwrap();
EthApiClient::<TransactionRequest, Transaction, Block, Receipt, Header>::block_by_hash(
client, hash, false,
)
.await
.unwrap();
EthApiClient::<TransactionRequest, Transaction, Block, Receipt, Header>::block_by_number(
client,
block_number,
false,
)
.await
.unwrap();
EthApiClient::<TransactionRequest, Transaction, Block, Receipt, Header>::block_transaction_count_by_number(
client,
block_number,
)
.await
.unwrap();
EthApiClient::<TransactionRequest, Transaction, Block, Receipt, Header>::block_transaction_count_by_hash(
client, hash,
)
.await
.unwrap();
EthApiClient::<TransactionRequest, Transaction, Block, Receipt, Header>::block_uncles_count_by_hash(client, hash)
.await
.unwrap();
EthApiClient::<TransactionRequest, Transaction, Block, Receipt, Header>::block_uncles_count_by_number(
client,
block_number,
)
.await
.unwrap();
EthApiClient::<TransactionRequest, Transaction, Block, Receipt, Header>::uncle_by_block_hash_and_index(
client, hash, index,
)
.await
.unwrap();
EthApiClient::<TransactionRequest, Transaction, Block, Receipt, Header>::uncle_by_block_number_and_index(
client,
block_number,
index,
)
.await
.unwrap();
EthApiClient::<TransactionRequest, Transaction, Block, Receipt, Header>::sign(
client,
address,
bytes.clone(),
)
.await
.unwrap_err();
EthApiClient::<TransactionRequest, Transaction, Block, Receipt, Header>::sign_typed_data(
client, address, typed_data,
)
.await
.unwrap_err();
EthApiClient::<TransactionRequest, Transaction, Block, Receipt, Header>::transaction_by_hash(
client, tx_hash,
)
.await
.unwrap();
EthApiClient::<TransactionRequest, Transaction, Block, Receipt, Header>::transaction_by_block_hash_and_index(
client, hash, index,
)
.await
.unwrap();
EthApiClient::<TransactionRequest, Transaction, Block, Receipt, Header>::transaction_by_block_number_and_index(
client,
block_number,
index,
)
.await
.unwrap();
EthApiClient::<TransactionRequest, Transaction, Block, Receipt, Header>::create_access_list(
client,
call_request.clone(),
Some(block_number.into()),
None,
)
.await
.unwrap_err();
EthApiClient::<TransactionRequest, Transaction, Block, Receipt, Header>::estimate_gas(
client,
call_request.clone(),
Some(block_number.into()),
None,
)
.await
.unwrap_err();
EthApiClient::<TransactionRequest, Transaction, Block, Receipt, Header>::call(
client,
call_request.clone(),
Some(block_number.into()),
None,
None,
)
.await
.unwrap_err();
EthApiClient::<TransactionRequest, Transaction, Block, Receipt, Header>::syncing(client)
.await
.unwrap();
EthApiClient::<TransactionRequest, Transaction, Block, Receipt, Header>::send_transaction(
client,
transaction_request.clone(),
)
.await
.unwrap_err();
EthApiClient::<TransactionRequest, Transaction, Block, Receipt, Header>::sign_transaction(
client,
transaction_request,
)
.await
.unwrap_err();
EthApiClient::<TransactionRequest, Transaction, Block, Receipt, Header>::hashrate(client)
.await
.unwrap();
EthApiClient::<TransactionRequest, Transaction, Block, Receipt, Header>::submit_hashrate(
client,
U256::default(),
B256::default(),
)
.await
.unwrap();
EthApiClient::<TransactionRequest, Transaction, Block, Receipt, Header>::gas_price(client)
.await
.unwrap_err();
EthApiClient::<TransactionRequest, Transaction, Block, Receipt, Header>::max_priority_fee_per_gas(client)
.await
.unwrap_err();
EthApiClient::<TransactionRequest, Transaction, Block, Receipt, Header>::get_proof(
client,
address,
vec![],
None,
)
.await
.unwrap();
// Unimplemented
assert!(is_unimplemented(
EthApiClient::<TransactionRequest, Transaction, Block, Receipt, Header>::author(client)
.await
.err()
.unwrap()
));
assert!(is_unimplemented(
EthApiClient::<TransactionRequest, Transaction, Block, Receipt, Header>::is_mining(client)
.await
.err()
.unwrap()
));
assert!(is_unimplemented(
EthApiClient::<TransactionRequest, Transaction, Block, Receipt, Header>::get_work(client)
.await
.err()
.unwrap()
));
assert!(is_unimplemented(
EthApiClient::<TransactionRequest, Transaction, Block, Receipt, Header>::submit_work(
client,
B64::default(),
B256::default(),
B256::default()
)
.await
.err()
.unwrap()
));
EthCallBundleApiClient::call_bundle(client, Default::default()).await.unwrap_err();
}
async fn test_basic_debug_calls<C>(client: &C)
where
C: ClientT + SubscriptionClientT + Sync,
{
let block_id = BlockId::Number(BlockNumberOrTag::default());
DebugApiClient::<TransactionRequest>::raw_header(client, block_id).await.unwrap();
DebugApiClient::<TransactionRequest>::raw_block(client, block_id).await.unwrap_err();
DebugApiClient::<TransactionRequest>::raw_transaction(client, B256::default()).await.unwrap();
DebugApiClient::<TransactionRequest>::raw_receipts(client, block_id).await.unwrap();
DebugApiClient::<TransactionRequest>::bad_blocks(client).await.unwrap();
}
async fn test_basic_net_calls<C>(client: &C)
where
C: ClientT + SubscriptionClientT + Sync,
{
NetApiClient::version(client).await.unwrap();
NetApiClient::peer_count(client).await.unwrap();
NetApiClient::is_listening(client).await.unwrap();
}
async fn test_basic_trace_calls<C>(client: &C)
where
C: ClientT + SubscriptionClientT + Sync,
{
let block_id = BlockId::Number(BlockNumberOrTag::default());
let trace_filter = TraceFilter {
from_block: Default::default(),
to_block: Default::default(),
from_address: Default::default(),
to_address: Default::default(),
mode: Default::default(),
after: None,
count: None,
};
TraceApiClient::<TransactionRequest>::trace_raw_transaction(
client,
Bytes::default(),
HashSet::default(),
None,
)
.await
.unwrap_err();
TraceApiClient::<TransactionRequest>::trace_call_many(
client,
vec![],
Some(BlockNumberOrTag::Latest.into()),
)
.await
.unwrap_err();
TraceApiClient::<TransactionRequest>::replay_transaction(
client,
B256::default(),
HashSet::default(),
)
.await
.err()
.unwrap();
TraceApiClient::<TransactionRequest>::trace_block(client, block_id).await.unwrap_err();
TraceApiClient::<TransactionRequest>::replay_block_transactions(
client,
block_id,
HashSet::default(),
)
.await
.unwrap_err();
TraceApiClient::<TransactionRequest>::trace_filter(client, trace_filter).await.unwrap();
}
async fn test_basic_web3_calls<C>(client: &C)
where
C: ClientT + SubscriptionClientT + Sync,
{
Web3ApiClient::client_version(client).await.unwrap();
Web3ApiClient::sha3(client, Bytes::default()).await.unwrap();
}
async fn test_basic_otterscan_calls<C>(client: &C)
where
C: ClientT + SubscriptionClientT + Sync,
{
let address = Address::default();
let sender = Address::default();
let tx_hash = TxHash::default();
let block_number = 1;
let page_number = 1;
let page_size = 10;
let nonce = 1;
let block_hash = B256::default();
OtterscanClient::<Transaction, Header>::get_header_by_number(
client,
LenientBlockNumberOrTag::new(BlockNumberOrTag::Number(block_number)),
)
.await
.unwrap();
OtterscanClient::<Transaction, Header>::has_code(client, address, None).await.unwrap();
OtterscanClient::<Transaction, Header>::has_code(client, address, Some(block_number.into()))
.await
.unwrap();
OtterscanClient::<Transaction, Header>::get_api_level(client).await.unwrap();
OtterscanClient::<Transaction, Header>::get_internal_operations(client, tx_hash).await.unwrap();
OtterscanClient::<Transaction, Header>::get_transaction_error(client, tx_hash).await.unwrap();
OtterscanClient::<Transaction, Header>::trace_transaction(client, tx_hash).await.unwrap();
OtterscanClient::<Transaction, Header>::get_block_details(
client,
LenientBlockNumberOrTag::new(BlockNumberOrTag::Number(block_number)),
)
.await
.unwrap_err();
OtterscanClient::<Transaction, Header>::get_block_details(client, Default::default())
.await
.unwrap_err();
OtterscanClient::<Transaction, Header>::get_block_details_by_hash(client, block_hash)
.await
.unwrap_err();
OtterscanClient::<Transaction, Header>::get_block_transactions(
client,
LenientBlockNumberOrTag::new(BlockNumberOrTag::Number(block_number)),
page_number,
page_size,
)
.await
.err()
.unwrap();
assert!(is_unimplemented(
OtterscanClient::<Transaction, Header>::search_transactions_before(
client,
address,
LenientBlockNumberOrTag::new(BlockNumberOrTag::Number(block_number)),
page_size,
)
.await
.err()
.unwrap()
));
assert!(is_unimplemented(
OtterscanClient::<Transaction, Header>::search_transactions_after(
client,
address,
LenientBlockNumberOrTag::new(BlockNumberOrTag::Number(block_number)),
page_size,
)
.await
.err()
.unwrap()
));
assert!(OtterscanClient::<Transaction, Header>::get_transaction_by_sender_and_nonce(
client, sender, nonce
)
.await
.err()
.is_none());
assert!(OtterscanClient::<Transaction, Header>::get_contract_creator(client, address)
.await
.unwrap()
.is_none());
}
#[tokio::test(flavor = "multi_thread")]
async fn test_call_filter_functions_http() {
reth_tracing::init_test_tracing();
let handle = launch_http(vec![RethRpcModule::Eth]).await;
let client = handle.http_client().unwrap();
test_filter_calls(&client).await;
}
#[tokio::test(flavor = "multi_thread")]
async fn test_call_admin_functions_http() {
reth_tracing::init_test_tracing();
let handle = launch_http(vec![RethRpcModule::Admin]).await;
let client = handle.http_client().unwrap();
test_basic_admin_calls(&client).await;
}
#[tokio::test(flavor = "multi_thread")]
async fn test_call_admin_functions_ws() {
reth_tracing::init_test_tracing();
let handle = launch_ws(vec![RethRpcModule::Admin]).await;
let client = handle.ws_client().await.unwrap();
test_basic_admin_calls(&client).await;
}
#[tokio::test(flavor = "multi_thread")]
async fn test_call_admin_functions_http_and_ws() {
reth_tracing::init_test_tracing();
let handle = launch_http_ws(vec![RethRpcModule::Admin]).await;
let client = handle.http_client().unwrap();
test_basic_admin_calls(&client).await;
}
#[tokio::test(flavor = "multi_thread")]
async fn test_call_eth_functions_http() {
reth_tracing::init_test_tracing();
let handle = launch_http(vec![RethRpcModule::Eth]).await;
let client = handle.http_client().unwrap();
test_basic_eth_calls(&client).await;
}
#[tokio::test(flavor = "multi_thread")]
async fn test_call_eth_functions_ws() {
reth_tracing::init_test_tracing();
let handle = launch_ws(vec![RethRpcModule::Eth]).await;
let client = handle.ws_client().await.unwrap();
test_basic_eth_calls(&client).await;
}
#[tokio::test(flavor = "multi_thread")]
async fn test_call_eth_functions_http_and_ws() {
reth_tracing::init_test_tracing();
let handle = launch_http_ws(vec![RethRpcModule::Eth]).await;
let client = handle.http_client().unwrap();
test_basic_eth_calls(&client).await;
}
#[tokio::test(flavor = "multi_thread")]
async fn test_call_debug_functions_http() {
reth_tracing::init_test_tracing();
let handle = launch_http(vec![RethRpcModule::Debug]).await;
let client = handle.http_client().unwrap();
test_basic_debug_calls(&client).await;
}
#[tokio::test(flavor = "multi_thread")]
async fn test_call_debug_functions_ws() {
reth_tracing::init_test_tracing();
let handle = launch_ws(vec![RethRpcModule::Debug]).await;
let client = handle.ws_client().await.unwrap();
test_basic_debug_calls(&client).await;
}
#[tokio::test(flavor = "multi_thread")]
async fn test_call_debug_functions_http_and_ws() {
reth_tracing::init_test_tracing();
let handle = launch_http_ws(vec![RethRpcModule::Debug]).await;
let client = handle.http_client().unwrap();
test_basic_debug_calls(&client).await;
}
#[tokio::test(flavor = "multi_thread")]
async fn test_call_net_functions_http() {
reth_tracing::init_test_tracing();
let handle = launch_http(vec![RethRpcModule::Net]).await;
let client = handle.http_client().unwrap();
test_basic_net_calls(&client).await;
}
#[tokio::test(flavor = "multi_thread")]
async fn test_call_net_functions_ws() {
reth_tracing::init_test_tracing();
let handle = launch_ws(vec![RethRpcModule::Net]).await;
let client = handle.ws_client().await.unwrap();
test_basic_net_calls(&client).await;
}
#[tokio::test(flavor = "multi_thread")]
async fn test_call_net_functions_http_and_ws() {
reth_tracing::init_test_tracing();
let handle = launch_http_ws(vec![RethRpcModule::Net]).await;
let client = handle.http_client().unwrap();
test_basic_net_calls(&client).await;
}
#[tokio::test(flavor = "multi_thread")]
async fn test_call_trace_functions_http() {
reth_tracing::init_test_tracing();
let handle = launch_http(vec![RethRpcModule::Trace]).await;
let client = handle.http_client().unwrap();
test_basic_trace_calls(&client).await;
}
#[tokio::test(flavor = "multi_thread")]
async fn test_call_trace_functions_ws() {
reth_tracing::init_test_tracing();
let handle = launch_ws(vec![RethRpcModule::Trace]).await;
let client = handle.ws_client().await.unwrap();
test_basic_trace_calls(&client).await;
}
#[tokio::test(flavor = "multi_thread")]
async fn test_call_trace_functions_http_and_ws() {
reth_tracing::init_test_tracing();
let handle = launch_http_ws(vec![RethRpcModule::Trace]).await;
let client = handle.http_client().unwrap();
test_basic_trace_calls(&client).await;
}
#[tokio::test(flavor = "multi_thread")]
async fn test_call_web3_functions_http() {
reth_tracing::init_test_tracing();
let handle = launch_http(vec![RethRpcModule::Web3]).await;
let client = handle.http_client().unwrap();
test_basic_web3_calls(&client).await;
}
#[tokio::test(flavor = "multi_thread")]
async fn test_call_web3_functions_ws() {
reth_tracing::init_test_tracing();
let handle = launch_ws(vec![RethRpcModule::Web3]).await;
let client = handle.ws_client().await.unwrap();
test_basic_web3_calls(&client).await;
}
#[tokio::test(flavor = "multi_thread")]
async fn test_call_web3_functions_http_and_ws() {
reth_tracing::init_test_tracing();
let handle = launch_http_ws(vec![RethRpcModule::Web3]).await;
let client = handle.http_client().unwrap();
test_basic_web3_calls(&client).await;
}
#[tokio::test(flavor = "multi_thread")]
async fn test_call_otterscan_functions_http() {
reth_tracing::init_test_tracing();
let handle = launch_http(vec![RethRpcModule::Ots]).await;
let client = handle.http_client().unwrap();
test_basic_otterscan_calls(&client).await;
}
#[tokio::test(flavor = "multi_thread")]
async fn test_call_otterscan_functions_ws() {
reth_tracing::init_test_tracing();
let handle = launch_ws(vec![RethRpcModule::Ots]).await;
let client = handle.ws_client().await.unwrap();
test_basic_otterscan_calls(&client).await;
}
#[tokio::test(flavor = "multi_thread")]
async fn test_call_otterscan_functions_http_and_ws() {
reth_tracing::init_test_tracing();
let handle = launch_http_ws(vec![RethRpcModule::Ots]).await;
let client = handle.http_client().unwrap();
test_basic_otterscan_calls(&client).await;
}
// <https://github.com/paradigmxyz/reth/issues/5830>
#[tokio::test(flavor = "multi_thread")]
async fn test_eth_logs_args() {
reth_tracing::init_test_tracing();
let handle = launch_http_ws(vec![RethRpcModule::Eth]).await;
let client = handle.http_client().unwrap();
let mut params = ArrayParams::default();
params.insert(serde_json::json!({"blockHash":"0x58dc57ab582b282c143424bd01e8d923cddfdcda9455bad02a29522f6274a948"})).unwrap();
let resp = client.request::<Vec<Log>, _>("eth_getLogs", params).await;
// block does not exist
assert!(resp.is_err());
}
#[tokio::test(flavor = "multi_thread")]
async fn test_eth_get_block_by_number_rpc_call() {
reth_tracing::init_test_tracing();
// Launch HTTP server with the specified RPC module
let handle = launch_http(vec![RethRpcModule::Eth]).await;
let client = handle.http_client().unwrap();
// Requesting block by number with proper fields
test_rpc_call_ok::<Option<Block>>(
&client,
"eth_getBlockByNumber",
rpc_params!["0x1b4", true], // Block number and full transaction object flag
)
.await;
// Requesting block by number with wrong fields
test_rpc_call_err::<Option<Block>>(
&client,
"eth_getBlockByNumber",
rpc_params!["0x1b4", "0x1b4"],
)
.await;
// Requesting block by number with missing fields
test_rpc_call_err::<Option<Block>>(&client, "eth_getBlockByNumber", rpc_params!["0x1b4"]).await;
}
#[tokio::test(flavor = "multi_thread")]
async fn test_eth_get_block_by_hash_rpc_call() {
reth_tracing::init_test_tracing();
// Launch HTTP server with the specified RPC module
let handle = launch_http(vec![RethRpcModule::Eth]).await;
let client = handle.http_client().unwrap();
// Requesting block by hash with proper fields
test_rpc_call_ok::<Option<Block>>(
&client,
"eth_getBlockByHash",
rpc_params!["0xdc0818cf78f21a8e70579cb46a43643f78291264dda342ae31049421c82d21ae", false],
)
.await;
// Requesting block by hash with wrong fields
test_rpc_call_err::<Option<Block>>(
&client,
"eth_getBlockByHash",
rpc_params!["0xdc0818cf78f21a8e70579cb46a43643f78291264dda342ae31049421c82d21ae", "0x1b4"],
)
.await;
// Requesting block by hash with missing fields
test_rpc_call_err::<Option<Block>>(
&client,
"eth_getBlockByHash",
rpc_params!["0xdc0818cf78f21a8e70579cb46a43643f78291264dda342ae31049421c82d21ae"],
)
.await;
}
#[tokio::test(flavor = "multi_thread")]
async fn test_eth_get_code_rpc_call() {
reth_tracing::init_test_tracing();
// Launch HTTP server with the specified RPC module
let handle = launch_http(vec![RethRpcModule::Eth]).await;
let client = handle.http_client().unwrap();
// Requesting code at a given address with proper fields
test_rpc_call_ok::<Bytes>(
&client,
"eth_getCode",
rpc_params![
"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b",
"0x2" // 2
],
)
.await;
// Define test cases with different default block parameters
let default_block_params = vec!["earliest", "latest", "pending"];
// Iterate over test cases
for param in default_block_params {
// Requesting code at a given address with default block parameter
test_rpc_call_ok::<Bytes>(
&client,
"eth_getCode",
rpc_params!["0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b", param],
)
.await;
}
// Without block number which is optional
test_rpc_call_ok::<Bytes>(
&client,
"eth_getCode",
rpc_params!["0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b"],
)
.await;
// Requesting code at a given address with invalid default block parameter
test_rpc_call_err::<Bytes>(
&client,
"eth_getCode",
rpc_params!["0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b", "finalized"],
)
.await;
// Requesting code at a given address with wrong fields
test_rpc_call_err::<Bytes>(
&client,
"eth_getCode",
rpc_params!["0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b", false],
)
.await;
}
#[tokio::test(flavor = "multi_thread")]
async fn test_eth_block_number_rpc_call() {
reth_tracing::init_test_tracing();
// Launch HTTP server with the specified RPC module
let handle = launch_http(vec![RethRpcModule::Eth]).await;
let client = handle.http_client().unwrap();
// Requesting block number without any parameter
test_rpc_call_ok::<U256>(&client, "eth_blockNumber", rpc_params![]).await;
// Define test cases with different default block parameters
let invalid_default_block_params = vec!["finalized", "0x2"];
// Iterate over test cases
for param in invalid_default_block_params {
// Requesting block number with invalid parameter should not throw an error
test_rpc_call_ok::<U256>(&client, "eth_blockNumber", rpc_params![param]).await;
}
}
#[tokio::test(flavor = "multi_thread")]
async fn test_eth_chain_id_rpc_call() {
reth_tracing::init_test_tracing();
// Launch HTTP server with the specified RPC module
let handle = launch_http(vec![RethRpcModule::Eth]).await;
let client = handle.http_client().unwrap();
// Requesting chain ID without any parameter
test_rpc_call_ok::<Option<U64>>(&client, "eth_chainId", rpc_params![]).await;
// Define test cases with different invalid parameters
let invalid_params = vec!["finalized", "0x2"];
// Iterate over test cases
for param in invalid_params {
// Requesting chain ID with invalid parameter should not throw an error
test_rpc_call_ok::<Option<U64>>(&client, "eth_chainId", rpc_params![param]).await;
}
}
#[tokio::test(flavor = "multi_thread")]
async fn test_eth_syncing_rpc_call() {
reth_tracing::init_test_tracing();
// Launch HTTP server with the specified RPC module
let handle = launch_http(vec![RethRpcModule::Eth]).await;
let client = handle.http_client().unwrap();
// Requesting syncing status
test_rpc_call_ok::<Option<SyncStatus>>(&client, "eth_syncing", rpc_params![]).await;
// Define test cases with invalid parameters
let invalid_params = vec!["latest", "earliest", "pending", "0x2"];
// Iterate over test cases
for param in invalid_params {
// Requesting syncing status with invalid parameter should not throw an error
test_rpc_call_ok::<Option<SyncStatus>>(&client, "eth_syncing", rpc_params![param]).await;
}
}
#[tokio::test(flavor = "multi_thread")]
async fn test_eth_protocol_version_rpc_call() {
reth_tracing::init_test_tracing();
// Launch HTTP server with the specified RPC module
let handle = launch_http(vec![RethRpcModule::Eth]).await;
let client = handle.http_client().unwrap();
// Requesting protocol version without any parameter
test_rpc_call_ok::<U64>(&client, "eth_protocolVersion", rpc_params![]).await;
// Define test cases with invalid parameters
let invalid_params = vec!["latest", "earliest", "pending", "0x2"];
// Iterate over test cases
for param in invalid_params {
// Requesting protocol version with invalid parameter should not throw an error
test_rpc_call_ok::<U64>(&client, "eth_protocolVersion", rpc_params![param]).await;
}
}
#[tokio::test(flavor = "multi_thread")]
async fn test_eth_coinbase_rpc_call() {
reth_tracing::init_test_tracing();
// Launch HTTP server with the specified RPC module
let handle = launch_http(vec![RethRpcModule::Eth]).await;
let client = handle.http_client().unwrap();
// Requesting coinbase address without any parameter should return Unimplemented
match client.request::<Address, _>("eth_coinbase", rpc_params![]).await {
Ok(_) => {
// If there's a response, it's unexpected, panic
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | true |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-builder/tests/it/middleware.rs | crates/rpc/rpc-builder/tests/it/middleware.rs | use crate::utils::{test_address, test_rpc_builder};
use alloy_rpc_types_eth::{Block, Header, Receipt, Transaction, TransactionRequest};
use jsonrpsee::{
core::middleware::{Batch, Notification},
server::middleware::rpc::RpcServiceT,
types::Request,
};
use reth_rpc_builder::{RpcServerConfig, TransportRpcModuleConfig};
use reth_rpc_eth_api::EthApiClient;
use reth_rpc_server_types::RpcModuleSelection;
use std::{
future::Future,
sync::{
atomic::{AtomicUsize, Ordering},
Arc,
},
};
use tower::Layer;
#[derive(Clone, Default)]
struct MyMiddlewareLayer {
count: Arc<AtomicUsize>,
}
impl<S> Layer<S> for MyMiddlewareLayer {
type Service = MyMiddlewareService<S>;
fn layer(&self, inner: S) -> Self::Service {
MyMiddlewareService { service: inner, count: self.count.clone() }
}
}
#[derive(Clone)]
struct MyMiddlewareService<S> {
service: S,
count: Arc<AtomicUsize>,
}
impl<S> RpcServiceT for MyMiddlewareService<S>
where
S: RpcServiceT + Send + Sync + Clone + 'static,
{
type MethodResponse = S::MethodResponse;
type NotificationResponse = S::NotificationResponse;
type BatchResponse = S::BatchResponse;
fn call<'a>(&self, req: Request<'a>) -> impl Future<Output = Self::MethodResponse> + Send + 'a {
tracing::info!("MyMiddleware processed call {}", req.method);
let count = self.count.clone();
let service = self.service.clone();
Box::pin(async move {
let rp = service.call(req).await;
// Modify the state.
count.fetch_add(1, Ordering::Relaxed);
rp
})
}
fn batch<'a>(&self, req: Batch<'a>) -> impl Future<Output = Self::BatchResponse> + Send + 'a {
self.service.batch(req)
}
fn notification<'a>(
&self,
n: Notification<'a>,
) -> impl Future<Output = Self::NotificationResponse> + Send + 'a {
self.service.notification(n)
}
}
#[tokio::test(flavor = "multi_thread")]
async fn test_rpc_middleware() {
let builder = test_rpc_builder();
let eth_api = builder.bootstrap_eth_api();
let modules =
builder.build(TransportRpcModuleConfig::set_http(RpcModuleSelection::All), eth_api);
let mylayer = MyMiddlewareLayer::default();
let handle = RpcServerConfig::http(Default::default())
.with_http_address(test_address())
.set_rpc_middleware(mylayer.clone())
.start(&modules)
.await
.unwrap();
let client = handle.http_client().unwrap();
EthApiClient::<TransactionRequest, Transaction, Block, Receipt, Header>::protocol_version(
&client,
)
.await
.unwrap();
let count = mylayer.count.load(Ordering::Relaxed);
assert_eq!(count, 1);
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-builder/tests/it/auth.rs | crates/rpc/rpc-builder/tests/it/auth.rs | //! Auth server tests
use crate::utils::launch_auth;
use alloy_primitives::U64;
use alloy_rpc_types_engine::{
ExecutionPayloadInputV2, ExecutionPayloadV1, ForkchoiceState, PayloadId,
};
use jsonrpsee::core::client::{ClientT, SubscriptionClientT};
use reth_ethereum_engine_primitives::EthEngineTypes;
use reth_ethereum_primitives::{Block, TransactionSigned};
use reth_primitives_traits::block::Block as _;
use reth_rpc_api::clients::EngineApiClient;
use reth_rpc_layer::JwtSecret;
#[expect(unused_must_use)]
async fn test_basic_engine_calls<C>(client: &C)
where
C: ClientT + SubscriptionClientT + Sync + EngineApiClient<EthEngineTypes>,
{
let block = Block::default().seal_slow();
EngineApiClient::new_payload_v1(
client,
ExecutionPayloadV1::from_block_unchecked(block.hash(), &block.clone().into_block()),
)
.await;
EngineApiClient::new_payload_v2(
client,
ExecutionPayloadInputV2 {
execution_payload: ExecutionPayloadV1::from_block_slow::<TransactionSigned, _>(
&block.into_block(),
),
withdrawals: None,
},
)
.await;
EngineApiClient::fork_choice_updated_v1(client, ForkchoiceState::default(), None).await;
EngineApiClient::get_payload_v1(client, PayloadId::new([0, 0, 0, 0, 0, 0, 0, 0])).await;
EngineApiClient::get_payload_v2(client, PayloadId::new([0, 0, 0, 0, 0, 0, 0, 0])).await;
EngineApiClient::get_payload_bodies_by_hash_v1(client, vec![]).await;
EngineApiClient::get_payload_bodies_by_range_v1(client, U64::ZERO, U64::from(1u64)).await;
EngineApiClient::exchange_capabilities(client, vec![]).await;
}
#[tokio::test(flavor = "multi_thread")]
async fn test_auth_endpoints_http() {
reth_tracing::init_test_tracing();
let secret = JwtSecret::random();
let handle = launch_auth(secret).await;
let client = handle.http_client();
test_basic_engine_calls(&client).await
}
#[tokio::test(flavor = "multi_thread")]
async fn test_auth_endpoints_ws() {
reth_tracing::init_test_tracing();
let secret = JwtSecret::random();
let handle = launch_auth(secret).await;
let client = handle.ws_client().await;
test_basic_engine_calls(&client).await
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-builder/tests/it/utils.rs | crates/rpc/rpc-builder/tests/it/utils.rs | use alloy_rpc_types_engine::{ClientCode, ClientVersionV1};
use reth_chainspec::MAINNET;
use reth_consensus::noop::NoopConsensus;
use reth_engine_primitives::ConsensusEngineHandle;
use reth_ethereum_engine_primitives::EthEngineTypes;
use reth_ethereum_primitives::EthPrimitives;
use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4};
use reth_evm_ethereum::EthEvmConfig;
use reth_network_api::noop::NoopNetwork;
use reth_node_ethereum::EthereumEngineValidator;
use reth_payload_builder::test_utils::spawn_test_payload_service;
use reth_provider::test_utils::NoopProvider;
use reth_rpc_builder::{
auth::{AuthRpcModule, AuthServerConfig, AuthServerHandle},
RpcModuleBuilder, RpcServerConfig, RpcServerHandle, TransportRpcModuleConfig,
};
use reth_rpc_engine_api::{capabilities::EngineCapabilities, EngineApi};
use reth_rpc_layer::JwtSecret;
use reth_rpc_server_types::RpcModuleSelection;
use reth_tasks::TokioTaskExecutor;
use reth_transaction_pool::{
noop::NoopTransactionPool,
test_utils::{TestPool, TestPoolBuilder},
};
use tokio::sync::mpsc::unbounded_channel;
/// Localhost with port 0 so a free port is used.
pub const fn test_address() -> SocketAddr {
SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, 0))
}
/// Launches a new server for the auth module
pub async fn launch_auth(secret: JwtSecret) -> AuthServerHandle {
let config = AuthServerConfig::builder(secret).socket_addr(test_address()).build();
let (tx, _rx) = unbounded_channel();
let beacon_engine_handle = ConsensusEngineHandle::<EthEngineTypes>::new(tx);
let client = ClientVersionV1 {
code: ClientCode::RH,
name: "Reth".to_string(),
version: "v0.2.0-beta.5".to_string(),
commit: "defa64b2".to_string(),
};
let engine_api = EngineApi::new(
NoopProvider::default(),
MAINNET.clone(),
beacon_engine_handle,
spawn_test_payload_service().into(),
NoopTransactionPool::default(),
Box::<TokioTaskExecutor>::default(),
client,
EngineCapabilities::default(),
EthereumEngineValidator::new(MAINNET.clone()),
false,
);
let module = AuthRpcModule::new(engine_api);
module.start_server(config).await.unwrap()
}
/// Launches a new server with http only with the given modules
pub async fn launch_http(modules: impl Into<RpcModuleSelection>) -> RpcServerHandle {
let builder = test_rpc_builder();
let eth_api = builder.bootstrap_eth_api();
let server = builder.build(TransportRpcModuleConfig::set_http(modules), eth_api);
RpcServerConfig::http(Default::default())
.with_http_address(test_address())
.start(&server)
.await
.unwrap()
}
/// Launches a new server with ws only with the given modules
pub async fn launch_ws(modules: impl Into<RpcModuleSelection>) -> RpcServerHandle {
let builder = test_rpc_builder();
let eth_api = builder.bootstrap_eth_api();
let server = builder.build(TransportRpcModuleConfig::set_ws(modules), eth_api);
RpcServerConfig::ws(Default::default())
.with_ws_address(test_address())
.start(&server)
.await
.unwrap()
}
/// Launches a new server with http and ws and with the given modules
pub async fn launch_http_ws(modules: impl Into<RpcModuleSelection>) -> RpcServerHandle {
let builder = test_rpc_builder();
let eth_api = builder.bootstrap_eth_api();
let modules = modules.into();
let server = builder
.build(TransportRpcModuleConfig::set_ws(modules.clone()).with_http(modules), eth_api);
RpcServerConfig::ws(Default::default())
.with_ws_address(test_address())
.with_ws_address(test_address())
.with_http(Default::default())
.with_http_address(test_address())
.start(&server)
.await
.unwrap()
}
/// Launches a new server with http and ws and with the given modules on the same port.
pub async fn launch_http_ws_same_port(modules: impl Into<RpcModuleSelection>) -> RpcServerHandle {
let builder = test_rpc_builder();
let modules = modules.into();
let eth_api = builder.bootstrap_eth_api();
let server = builder
.build(TransportRpcModuleConfig::set_ws(modules.clone()).with_http(modules), eth_api);
let addr = test_address();
RpcServerConfig::ws(Default::default())
.with_ws_address(addr)
.with_http(Default::default())
.with_http_address(addr)
.start(&server)
.await
.unwrap()
}
/// Returns an [`RpcModuleBuilder`] with testing components.
pub fn test_rpc_builder(
) -> RpcModuleBuilder<EthPrimitives, NoopProvider, TestPool, NoopNetwork, EthEvmConfig, NoopConsensus>
{
RpcModuleBuilder::default()
.with_provider(NoopProvider::default())
.with_pool(TestPoolBuilder::default().into())
.with_network(NoopNetwork::default())
.with_executor(Box::new(TokioTaskExecutor::default()))
.with_evm_config(EthEvmConfig::mainnet())
.with_consensus(NoopConsensus::default())
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-builder/tests/it/startup.rs | crates/rpc/rpc-builder/tests/it/startup.rs | //! Startup tests
use std::io;
use reth_rpc_builder::{
error::{RpcError, ServerKind, WsHttpSamePortError},
RpcServerConfig, TransportRpcModuleConfig,
};
use reth_rpc_server_types::RethRpcModule;
use crate::utils::{
launch_http, launch_http_ws_same_port, launch_ws, test_address, test_rpc_builder,
};
fn is_addr_in_use_kind(err: &RpcError, kind: ServerKind) -> bool {
match err {
RpcError::AddressAlreadyInUse { kind: k, error } => {
*k == kind && error.kind() == io::ErrorKind::AddrInUse
}
_ => false,
}
}
#[tokio::test(flavor = "multi_thread")]
async fn test_http_addr_in_use() {
let handle = launch_http(vec![RethRpcModule::Admin]).await;
let addr = handle.http_local_addr().unwrap();
let builder = test_rpc_builder();
let eth_api = builder.bootstrap_eth_api();
let server =
builder.build(TransportRpcModuleConfig::set_http(vec![RethRpcModule::Admin]), eth_api);
let result =
RpcServerConfig::http(Default::default()).with_http_address(addr).start(&server).await;
let err = result.unwrap_err();
assert!(is_addr_in_use_kind(&err, ServerKind::Http(addr)), "{err}");
}
#[tokio::test(flavor = "multi_thread")]
async fn test_ws_addr_in_use() {
let handle = launch_ws(vec![RethRpcModule::Admin]).await;
let addr = handle.ws_local_addr().unwrap();
let builder = test_rpc_builder();
let eth_api = builder.bootstrap_eth_api();
let server =
builder.build(TransportRpcModuleConfig::set_ws(vec![RethRpcModule::Admin]), eth_api);
let result = RpcServerConfig::ws(Default::default()).with_ws_address(addr).start(&server).await;
let err = result.unwrap_err();
assert!(is_addr_in_use_kind(&err, ServerKind::WS(addr)), "{err}");
}
#[tokio::test(flavor = "multi_thread")]
async fn test_launch_same_port() {
let handle = launch_http_ws_same_port(vec![RethRpcModule::Admin]).await;
let ws_addr = handle.ws_local_addr().unwrap();
let http_addr = handle.http_local_addr().unwrap();
assert_eq!(ws_addr, http_addr);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_launch_same_port_different_modules() {
let builder = test_rpc_builder();
let eth_api = builder.bootstrap_eth_api();
let server = builder.build(
TransportRpcModuleConfig::set_ws(vec![RethRpcModule::Admin])
.with_http(vec![RethRpcModule::Eth]),
eth_api,
);
let addr = test_address();
let res = RpcServerConfig::ws(Default::default())
.with_ws_address(addr)
.with_http(Default::default())
.with_http_address(addr)
.start(&server)
.await;
let err = res.unwrap_err();
assert!(matches!(
err,
RpcError::WsHttpSamePortError(WsHttpSamePortError::ConflictingModules { .. })
));
}
#[tokio::test(flavor = "multi_thread")]
async fn test_launch_same_port_same_cors() {
let builder = test_rpc_builder();
let eth_api = builder.bootstrap_eth_api();
let server = builder.build(
TransportRpcModuleConfig::set_ws(vec![RethRpcModule::Eth])
.with_http(vec![RethRpcModule::Eth]),
eth_api,
);
let addr = test_address();
let res = RpcServerConfig::ws(Default::default())
.with_ws_address(addr)
.with_http(Default::default())
.with_cors(Some("*".to_string()))
.with_http_cors(Some("*".to_string()))
.with_http_address(addr)
.start(&server)
.await;
assert!(res.is_ok());
}
#[tokio::test(flavor = "multi_thread")]
async fn test_launch_same_port_different_cors() {
let builder = test_rpc_builder();
let eth_api = builder.bootstrap_eth_api();
let server = builder.build(
TransportRpcModuleConfig::set_ws(vec![RethRpcModule::Eth])
.with_http(vec![RethRpcModule::Eth]),
eth_api,
);
let addr = test_address();
let res = RpcServerConfig::ws(Default::default())
.with_ws_address(addr)
.with_http(Default::default())
.with_cors(Some("*".to_string()))
.with_http_cors(Some("example".to_string()))
.with_http_address(addr)
.start(&server)
.await;
let err = res.unwrap_err();
assert!(matches!(
err,
RpcError::WsHttpSamePortError(WsHttpSamePortError::ConflictingCorsDomains { .. })
));
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/rpc/rpc-builder/tests/it/main.rs | crates/rpc/rpc-builder/tests/it/main.rs | #![allow(missing_docs)]
mod auth;
mod http;
mod middleware;
mod serde;
mod startup;
pub mod utils;
const fn main() {}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/metrics/src/lib.rs | crates/metrics/src/lib.rs | //! Collection of metrics utilities.
//!
//! ## Feature Flags
//!
//! - `common`: Common metrics utilities, such as wrappers around tokio senders and receivers. Pulls
//! in `tokio`.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
/// Metrics derive macro.
pub use metrics_derive::Metrics;
/// Implementation of common metric utilities.
#[cfg(feature = "common")]
pub mod common;
/// Re-export core metrics crate.
pub use metrics;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/metrics/src/common/mpsc.rs | crates/metrics/src/common/mpsc.rs | //! Support for metering senders. Facilitates debugging by exposing metrics for number of messages
//! sent, number of errors, etc.
use crate::Metrics;
use futures::Stream;
use metrics::Counter;
use std::{
pin::Pin,
task::{ready, Context, Poll},
};
use tokio::sync::mpsc::{
self,
error::{SendError, TryRecvError, TrySendError},
};
use tokio_util::sync::{PollSendError, PollSender};
/// Wrapper around [`mpsc::unbounded_channel`] that returns a new unbounded metered channel.
pub fn metered_unbounded_channel<T>(
scope: &'static str,
) -> (UnboundedMeteredSender<T>, UnboundedMeteredReceiver<T>) {
let (tx, rx) = mpsc::unbounded_channel();
(UnboundedMeteredSender::new(tx, scope), UnboundedMeteredReceiver::new(rx, scope))
}
/// Wrapper around [`mpsc::channel`] that returns a new bounded metered channel with the given
/// buffer size.
pub fn metered_channel<T>(
buffer: usize,
scope: &'static str,
) -> (MeteredSender<T>, MeteredReceiver<T>) {
let (tx, rx) = mpsc::channel(buffer);
(MeteredSender::new(tx, scope), MeteredReceiver::new(rx, scope))
}
/// A wrapper type around [`UnboundedSender`](mpsc::UnboundedSender) that updates metrics on send.
#[derive(Debug)]
pub struct UnboundedMeteredSender<T> {
/// The [`UnboundedSender`](mpsc::UnboundedSender) that this wraps around
sender: mpsc::UnboundedSender<T>,
/// Holds metrics for this type
metrics: MeteredSenderMetrics,
}
impl<T> UnboundedMeteredSender<T> {
/// Creates a new [`MeteredSender`] wrapping around the provided that updates metrics on send.
// #[derive(Debug)]
pub fn new(sender: mpsc::UnboundedSender<T>, scope: &'static str) -> Self {
Self { sender, metrics: MeteredSenderMetrics::new(scope) }
}
/// Calls the underlying that updates metrics on send.
// #[derive(Debug)]'s `try_send`, incrementing the appropriate
/// metrics depending on the result.
pub fn send(&self, message: T) -> Result<(), SendError<T>> {
match self.sender.send(message) {
Ok(()) => {
self.metrics.messages_sent_total.increment(1);
Ok(())
}
Err(error) => {
self.metrics.send_errors_total.increment(1);
Err(error)
}
}
}
}
impl<T> Clone for UnboundedMeteredSender<T> {
fn clone(&self) -> Self {
Self { sender: self.sender.clone(), metrics: self.metrics.clone() }
}
}
/// A wrapper type around [Receiver](mpsc::UnboundedReceiver) that updates metrics on receive.
#[derive(Debug)]
pub struct UnboundedMeteredReceiver<T> {
/// The [Sender](mpsc::Sender) that this wraps around
receiver: mpsc::UnboundedReceiver<T>,
/// Holds metrics for this type
metrics: MeteredReceiverMetrics,
}
// === impl MeteredReceiver ===
impl<T> UnboundedMeteredReceiver<T> {
/// Creates a new [`UnboundedMeteredReceiver`] wrapping around the provided
/// [Receiver](mpsc::UnboundedReceiver)
pub fn new(receiver: mpsc::UnboundedReceiver<T>, scope: &'static str) -> Self {
Self { receiver, metrics: MeteredReceiverMetrics::new(scope) }
}
/// Receives the next value for this receiver.
pub async fn recv(&mut self) -> Option<T> {
let msg = self.receiver.recv().await;
if msg.is_some() {
self.metrics.messages_received_total.increment(1);
}
msg
}
/// Tries to receive the next value for this receiver.
pub fn try_recv(&mut self) -> Result<T, TryRecvError> {
let msg = self.receiver.try_recv()?;
self.metrics.messages_received_total.increment(1);
Ok(msg)
}
/// Closes the receiving half of a channel without dropping it.
pub fn close(&mut self) {
self.receiver.close();
}
/// Polls to receive the next message on this channel.
pub fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll<Option<T>> {
let msg = ready!(self.receiver.poll_recv(cx));
if msg.is_some() {
self.metrics.messages_received_total.increment(1);
}
Poll::Ready(msg)
}
}
impl<T> Stream for UnboundedMeteredReceiver<T> {
type Item = T;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
self.poll_recv(cx)
}
}
/// A wrapper type around [Sender](mpsc::Sender) that updates metrics on send.
#[derive(Debug)]
pub struct MeteredSender<T> {
/// The [Sender](mpsc::Sender) that this wraps around
sender: mpsc::Sender<T>,
/// Holds metrics for this type
metrics: MeteredSenderMetrics,
}
impl<T> MeteredSender<T> {
/// Creates a new [`MeteredSender`] wrapping around the provided [Sender](mpsc::Sender)
pub fn new(sender: mpsc::Sender<T>, scope: &'static str) -> Self {
Self { sender, metrics: MeteredSenderMetrics::new(scope) }
}
/// Tries to acquire a permit to send a message without waiting.
///
/// See also [Sender](mpsc::Sender)'s `try_reserve_owned`.
pub fn try_reserve_owned(self) -> Result<OwnedPermit<T>, TrySendError<Self>> {
let Self { sender, metrics } = self;
sender.try_reserve_owned().map(|permit| OwnedPermit::new(permit, metrics.clone())).map_err(
|err| match err {
TrySendError::Full(sender) => TrySendError::Full(Self { sender, metrics }),
TrySendError::Closed(sender) => TrySendError::Closed(Self { sender, metrics }),
},
)
}
/// Waits to acquire a permit to send a message and return owned permit.
///
/// See also [Sender](mpsc::Sender)'s `reserve_owned`.
pub async fn reserve_owned(self) -> Result<OwnedPermit<T>, SendError<()>> {
self.sender.reserve_owned().await.map(|permit| OwnedPermit::new(permit, self.metrics))
}
/// Waits to acquire a permit to send a message.
///
/// See also [Sender](mpsc::Sender)'s `reserve`.
pub async fn reserve(&self) -> Result<Permit<'_, T>, SendError<()>> {
self.sender.reserve().await.map(|permit| Permit::new(permit, &self.metrics))
}
/// Tries to acquire a permit to send a message without waiting.
///
/// See also [Sender](mpsc::Sender)'s `try_reserve`.
pub fn try_reserve(&self) -> Result<Permit<'_, T>, TrySendError<()>> {
self.sender.try_reserve().map(|permit| Permit::new(permit, &self.metrics))
}
/// Returns the underlying [Sender](mpsc::Sender).
pub const fn inner(&self) -> &mpsc::Sender<T> {
&self.sender
}
/// Calls the underlying [Sender](mpsc::Sender)'s `try_send`, incrementing the appropriate
/// metrics depending on the result.
pub fn try_send(&self, message: T) -> Result<(), TrySendError<T>> {
match self.sender.try_send(message) {
Ok(()) => {
self.metrics.messages_sent_total.increment(1);
Ok(())
}
Err(error) => {
self.metrics.send_errors_total.increment(1);
Err(error)
}
}
}
/// Calls the underlying [Sender](mpsc::Sender)'s `send`, incrementing the appropriate
/// metrics depending on the result.
pub async fn send(&self, value: T) -> Result<(), SendError<T>> {
match self.sender.send(value).await {
Ok(()) => {
self.metrics.messages_sent_total.increment(1);
Ok(())
}
Err(error) => {
self.metrics.send_errors_total.increment(1);
Err(error)
}
}
}
}
impl<T> Clone for MeteredSender<T> {
fn clone(&self) -> Self {
Self { sender: self.sender.clone(), metrics: self.metrics.clone() }
}
}
/// A wrapper type around [`OwnedPermit`](mpsc::OwnedPermit) that updates metrics accounting
/// when sending
#[derive(Debug)]
pub struct OwnedPermit<T> {
permit: mpsc::OwnedPermit<T>,
/// Holds metrics for this type
metrics: MeteredSenderMetrics,
}
impl<T> OwnedPermit<T> {
/// Creates a new [`OwnedPermit`] wrapping the provided [`mpsc::OwnedPermit`] with given metrics
/// handle.
pub const fn new(permit: mpsc::OwnedPermit<T>, metrics: MeteredSenderMetrics) -> Self {
Self { permit, metrics }
}
/// Sends a value using the reserved capacity and update metrics accordingly.
pub fn send(self, value: T) -> MeteredSender<T> {
let Self { permit, metrics } = self;
metrics.messages_sent_total.increment(1);
MeteredSender { sender: permit.send(value), metrics }
}
}
/// A wrapper type around [Permit](mpsc::Permit) that updates metrics accounting
/// when sending
#[derive(Debug)]
pub struct Permit<'a, T> {
permit: mpsc::Permit<'a, T>,
metrics_ref: &'a MeteredSenderMetrics,
}
impl<'a, T> Permit<'a, T> {
/// Creates a new [`Permit`] wrapping the provided [`mpsc::Permit`] with given metrics ref.
pub const fn new(permit: mpsc::Permit<'a, T>, metrics_ref: &'a MeteredSenderMetrics) -> Self {
Self { permit, metrics_ref }
}
/// Sends a value using the reserved capacity and updates metrics accordingly.
pub fn send(self, value: T) {
self.metrics_ref.messages_sent_total.increment(1);
self.permit.send(value);
}
}
/// A wrapper type around [Receiver](mpsc::Receiver) that updates metrics on receive.
#[derive(Debug)]
pub struct MeteredReceiver<T> {
/// The [Receiver](mpsc::Receiver) that this wraps around
receiver: mpsc::Receiver<T>,
/// Holds metrics for this type
metrics: MeteredReceiverMetrics,
}
// === impl MeteredReceiver ===
impl<T> MeteredReceiver<T> {
/// Creates a new [`MeteredReceiver`] wrapping around the provided [Receiver](mpsc::Receiver)
pub fn new(receiver: mpsc::Receiver<T>, scope: &'static str) -> Self {
Self { receiver, metrics: MeteredReceiverMetrics::new(scope) }
}
/// Receives the next value for this receiver.
pub async fn recv(&mut self) -> Option<T> {
let msg = self.receiver.recv().await;
if msg.is_some() {
self.metrics.messages_received_total.increment(1);
}
msg
}
/// Tries to receive the next value for this receiver.
pub fn try_recv(&mut self) -> Result<T, TryRecvError> {
let msg = self.receiver.try_recv()?;
self.metrics.messages_received_total.increment(1);
Ok(msg)
}
/// Closes the receiving half of a channel without dropping it.
pub fn close(&mut self) {
self.receiver.close();
}
/// Polls to receive the next message on this channel.
pub fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll<Option<T>> {
let msg = ready!(self.receiver.poll_recv(cx));
if msg.is_some() {
self.metrics.messages_received_total.increment(1);
}
Poll::Ready(msg)
}
}
impl<T> Stream for MeteredReceiver<T> {
type Item = T;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
self.poll_recv(cx)
}
}
/// Throughput metrics for [`MeteredSender`]
#[derive(Clone, Metrics)]
#[metrics(dynamic = true)]
pub struct MeteredSenderMetrics {
/// Number of messages sent
messages_sent_total: Counter,
/// Number of failed message deliveries
send_errors_total: Counter,
}
/// Throughput metrics for [`MeteredReceiver`]
#[derive(Clone, Metrics)]
#[metrics(dynamic = true)]
struct MeteredReceiverMetrics {
/// Number of messages received
messages_received_total: Counter,
}
/// A wrapper type around [`PollSender`] that updates metrics on send.
#[derive(Debug)]
pub struct MeteredPollSender<T> {
/// The [`PollSender`] that this wraps around.
sender: PollSender<T>,
/// Holds metrics for this type.
metrics: MeteredPollSenderMetrics,
}
impl<T: Send + 'static> MeteredPollSender<T> {
/// Creates a new [`MeteredPollSender`] wrapping around the provided [`PollSender`].
pub fn new(sender: PollSender<T>, scope: &'static str) -> Self {
Self { sender, metrics: MeteredPollSenderMetrics::new(scope) }
}
/// Returns the underlying [`PollSender`].
pub const fn inner(&self) -> &PollSender<T> {
&self.sender
}
/// Calls the underlying [`PollSender`]'s `poll_reserve`, incrementing the appropriate
/// metrics depending on the result.
pub fn poll_reserve(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), PollSendError<T>>> {
match self.sender.poll_reserve(cx) {
Poll::Ready(Ok(permit)) => Poll::Ready(Ok(permit)),
Poll::Ready(Err(error)) => Poll::Ready(Err(error)),
Poll::Pending => {
self.metrics.back_pressure_total.increment(1);
Poll::Pending
}
}
}
/// Calls the underlying [`PollSender`]'s `send_item`, incrementing the appropriate
/// metrics depending on the result.
pub fn send_item(&mut self, item: T) -> Result<(), PollSendError<T>> {
match self.sender.send_item(item) {
Ok(()) => {
self.metrics.messages_sent_total.increment(1);
Ok(())
}
Err(error) => Err(error),
}
}
}
impl<T> Clone for MeteredPollSender<T> {
fn clone(&self) -> Self {
Self { sender: self.sender.clone(), metrics: self.metrics.clone() }
}
}
/// Throughput metrics for [`MeteredPollSender`]
#[derive(Clone, Metrics)]
#[metrics(dynamic = true)]
struct MeteredPollSenderMetrics {
/// Number of messages sent
messages_sent_total: Counter,
/// Number of delayed message deliveries caused by a full channel
back_pressure_total: Counter,
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/metrics/src/common/mod.rs | crates/metrics/src/common/mod.rs | pub mod mpsc;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/primitives-traits/src/node.rs | crates/primitives-traits/src/node.rs | use crate::{
Block, FullBlock, FullBlockBody, FullBlockHeader, FullReceipt, FullSignedTx,
MaybeSerdeBincodeCompat, Receipt,
};
use core::fmt;
/// Configures all the primitive types of the node.
///
/// This trait defines the core types used throughout the node for representing
/// blockchain data. It serves as the foundation for type consistency across
/// different node implementations.
pub trait NodePrimitives:
Send + Sync + Unpin + Clone + Default + fmt::Debug + PartialEq + Eq + 'static
{
/// Block primitive.
type Block: Block<Header = Self::BlockHeader, Body = Self::BlockBody> + MaybeSerdeBincodeCompat;
/// Block header primitive.
type BlockHeader: FullBlockHeader;
/// Block body primitive.
type BlockBody: FullBlockBody<Transaction = Self::SignedTx, OmmerHeader = Self::BlockHeader>;
/// Signed version of the transaction type.
///
/// This represents the transaction as it exists in the blockchain - the consensus
/// format that includes the signature and can be included in a block.
type SignedTx: FullSignedTx;
/// A receipt.
type Receipt: Receipt;
}
/// Helper trait that sets trait bounds on [`NodePrimitives`].
pub trait FullNodePrimitives
where
Self: NodePrimitives<
Block: FullBlock<Header = Self::BlockHeader, Body = Self::BlockBody>,
BlockHeader: FullBlockHeader,
BlockBody: FullBlockBody<Transaction = Self::SignedTx>,
SignedTx: FullSignedTx,
Receipt: FullReceipt,
> + Send
+ Sync
+ Unpin
+ Clone
+ Default
+ fmt::Debug
+ PartialEq
+ Eq
+ 'static,
{
}
impl<T> FullNodePrimitives for T where
T: NodePrimitives<
Block: FullBlock<Header = Self::BlockHeader, Body = Self::BlockBody>,
BlockHeader: FullBlockHeader,
BlockBody: FullBlockBody<Transaction = Self::SignedTx>,
SignedTx: FullSignedTx,
Receipt: FullReceipt,
> + Send
+ Sync
+ Unpin
+ Clone
+ Default
+ fmt::Debug
+ PartialEq
+ Eq
+ 'static
{
}
/// Helper adapter type for accessing [`NodePrimitives`] block header types.
pub type HeaderTy<N> = <N as NodePrimitives>::BlockHeader;
/// Helper adapter type for accessing [`NodePrimitives`] block body types.
pub type BodyTy<N> = <N as NodePrimitives>::BlockBody;
/// Helper adapter type for accessing [`NodePrimitives`] block types.
pub type BlockTy<N> = <N as NodePrimitives>::Block;
/// Helper adapter type for accessing [`NodePrimitives`] receipt types.
pub type ReceiptTy<N> = <N as NodePrimitives>::Receipt;
/// Helper adapter type for accessing [`NodePrimitives`] signed transaction types.
pub type TxTy<N> = <N as NodePrimitives>::SignedTx;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/primitives-traits/src/serde_bincode_compat.rs | crates/primitives-traits/src/serde_bincode_compat.rs | //! Bincode compatibility support for reth primitive types.
//!
//! This module provides traits and implementations to work around bincode's limitations
//! with optional serde fields. The bincode crate requires all fields to be present during
//! serialization, which conflicts with types that have `#[serde(skip_serializing_if)]`
//! attributes for RPC compatibility.
//!
//! # Overview
//!
//! The main trait is `SerdeBincodeCompat`, which provides a conversion mechanism between
//! types and their bincode-compatible representations. There are two main ways to implement
//! this trait:
//!
//! 1. **Using RLP encoding** - Implement `RlpBincode` for types that already support RLP
//! 2. **Custom implementation** - Define a custom representation type
//!
//! # Examples
//!
//! ## Using with `serde_with`
//!
//! ```rust
//! # use reth_primitives_traits::serde_bincode_compat::{self, SerdeBincodeCompat};
//! # use serde::{Deserialize, Serialize};
//! # use serde_with::serde_as;
//! # use alloy_consensus::Header;
//! #[serde_as]
//! #[derive(Serialize, Deserialize)]
//! struct MyStruct {
//! #[serde_as(as = "serde_bincode_compat::BincodeReprFor<'_, Header>")]
//! data: Header,
//! }
//! ```
use alloc::vec::Vec;
use alloy_primitives::Bytes;
use core::fmt::Debug;
use serde::{de::DeserializeOwned, Serialize};
pub use super::{
block::{serde_bincode_compat as block, serde_bincode_compat::*},
header::{serde_bincode_compat as header, serde_bincode_compat::*},
};
pub use block_bincode::{Block, BlockBody};
/// Trait for types that can be serialized and deserialized using bincode.
///
/// This trait provides a workaround for bincode's incompatibility with optional
/// serde fields. It ensures all fields are serialized, making the type bincode-compatible.
///
/// # Implementation
///
/// The easiest way to implement this trait is using [`RlpBincode`] for RLP-encodable types:
///
/// ```rust
/// # use reth_primitives_traits::serde_bincode_compat::RlpBincode;
/// # use alloy_rlp::{RlpEncodable, RlpDecodable};
/// # #[derive(RlpEncodable, RlpDecodable)]
/// # struct MyType;
/// impl RlpBincode for MyType {}
/// // SerdeBincodeCompat is automatically implemented
/// ```
///
/// For custom implementations, see the examples in the `block` module.
///
/// The recommended way to add bincode compatible serialization is via the
/// [`serde_with`] crate and the `serde_as` macro. See for reference [`header`].
pub trait SerdeBincodeCompat: Sized + 'static {
/// Serde representation of the type for bincode serialization.
///
/// This type defines the bincode compatible serde format for the type.
type BincodeRepr<'a>: Debug + Serialize + DeserializeOwned;
/// Convert this type into its bincode representation
fn as_repr(&self) -> Self::BincodeRepr<'_>;
/// Convert from the bincode representation
fn from_repr(repr: Self::BincodeRepr<'_>) -> Self;
}
impl SerdeBincodeCompat for alloy_consensus::Header {
type BincodeRepr<'a> = alloy_consensus::serde_bincode_compat::Header<'a>;
fn as_repr(&self) -> Self::BincodeRepr<'_> {
self.into()
}
fn from_repr(repr: Self::BincodeRepr<'_>) -> Self {
repr.into()
}
}
/// Type alias for the [`SerdeBincodeCompat::BincodeRepr`] associated type.
///
/// This provides a convenient way to refer to the bincode representation type
/// without having to write out the full associated type projection.
///
/// # Example
///
/// ```rust
/// # use reth_primitives_traits::serde_bincode_compat::{SerdeBincodeCompat, BincodeReprFor};
/// fn serialize_to_bincode<T: SerdeBincodeCompat>(value: &T) -> BincodeReprFor<'_, T> {
/// value.as_repr()
/// }
/// ```
pub type BincodeReprFor<'a, T> = <T as SerdeBincodeCompat>::BincodeRepr<'a>;
/// A helper trait for using RLP-encoding for providing bincode-compatible serialization.
///
/// By implementing this trait, [`SerdeBincodeCompat`] will be automatically implemented for the
/// type and RLP encoding will be used for serialization and deserialization for bincode
/// compatibility.
///
/// # Example
///
/// ```rust
/// # use reth_primitives_traits::serde_bincode_compat::RlpBincode;
/// # use alloy_rlp::{RlpEncodable, RlpDecodable};
/// #[derive(RlpEncodable, RlpDecodable)]
/// struct MyCustomType {
/// value: u64,
/// data: Vec<u8>,
/// }
///
/// // Simply implement the marker trait
/// impl RlpBincode for MyCustomType {}
///
/// // Now MyCustomType can be used with bincode through RLP encoding
/// ```
pub trait RlpBincode: alloy_rlp::Encodable + alloy_rlp::Decodable {}
impl<T: RlpBincode + 'static> SerdeBincodeCompat for T {
type BincodeRepr<'a> = Bytes;
fn as_repr(&self) -> Self::BincodeRepr<'_> {
let mut buf = Vec::new();
self.encode(&mut buf);
buf.into()
}
fn from_repr(repr: Self::BincodeRepr<'_>) -> Self {
Self::decode(&mut repr.as_ref()).expect("Failed to decode bincode rlp representation")
}
}
mod block_bincode {
use crate::serde_bincode_compat::SerdeBincodeCompat;
use alloc::{borrow::Cow, vec::Vec};
use alloy_consensus::TxEip4844;
use alloy_eips::eip4895::Withdrawals;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use serde_with::{DeserializeAs, SerializeAs};
/// Bincode-compatible [`alloy_consensus::Block`] serde implementation.
///
/// Intended to use with the [`serde_with::serde_as`] macro in the following way:
/// ```rust
/// use alloy_consensus::Block;
/// use reth_primitives_traits::serde_bincode_compat::{self, SerdeBincodeCompat};
/// use serde::{Deserialize, Serialize};
/// use serde_with::serde_as;
///
/// #[serde_as]
/// #[derive(Serialize, Deserialize)]
/// struct Data<T: SerdeBincodeCompat, H: SerdeBincodeCompat> {
/// #[serde_as(as = "serde_bincode_compat::Block<'_, T, H>")]
/// body: Block<T, H>,
/// }
/// ```
#[derive(derive_more::Debug, Serialize, Deserialize)]
#[debug(bound())]
pub struct Block<'a, T: SerdeBincodeCompat, H: SerdeBincodeCompat> {
header: H::BincodeRepr<'a>,
#[serde(bound = "BlockBody<'a, T, H>: Serialize + serde::de::DeserializeOwned")]
body: BlockBody<'a, T, H>,
}
impl<'a, T: SerdeBincodeCompat, H: SerdeBincodeCompat> From<&'a alloy_consensus::Block<T, H>>
for Block<'a, T, H>
{
fn from(value: &'a alloy_consensus::Block<T, H>) -> Self {
Self { header: value.header.as_repr(), body: (&value.body).into() }
}
}
impl<'a, T: SerdeBincodeCompat, H: SerdeBincodeCompat> From<Block<'a, T, H>>
for alloy_consensus::Block<T, H>
{
fn from(value: Block<'a, T, H>) -> Self {
Self { header: SerdeBincodeCompat::from_repr(value.header), body: value.body.into() }
}
}
impl<T: SerdeBincodeCompat, H: SerdeBincodeCompat> SerializeAs<alloy_consensus::Block<T, H>>
for Block<'_, T, H>
{
fn serialize_as<S>(
source: &alloy_consensus::Block<T, H>,
serializer: S,
) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
Block::from(source).serialize(serializer)
}
}
impl<'de, T: SerdeBincodeCompat, H: SerdeBincodeCompat>
DeserializeAs<'de, alloy_consensus::Block<T, H>> for Block<'de, T, H>
{
fn deserialize_as<D>(deserializer: D) -> Result<alloy_consensus::Block<T, H>, D::Error>
where
D: Deserializer<'de>,
{
Block::deserialize(deserializer).map(Into::into)
}
}
impl<T: SerdeBincodeCompat, H: SerdeBincodeCompat> SerdeBincodeCompat
for alloy_consensus::Block<T, H>
{
type BincodeRepr<'a> = Block<'a, T, H>;
fn as_repr(&self) -> Self::BincodeRepr<'_> {
self.into()
}
fn from_repr(repr: Self::BincodeRepr<'_>) -> Self {
repr.into()
}
}
/// Bincode-compatible [`alloy_consensus::BlockBody`] serde implementation.
///
/// Intended to use with the [`serde_with::serde_as`] macro in the following way:
/// ```rust
/// use reth_primitives_traits::serde_bincode_compat::{self, SerdeBincodeCompat};
/// use serde::{Deserialize, Serialize};
/// use serde_with::serde_as;
///
/// #[serde_as]
/// #[derive(Serialize, Deserialize)]
/// struct Data<T: SerdeBincodeCompat, H: SerdeBincodeCompat> {
/// #[serde_as(as = "serde_bincode_compat::BlockBody<'_, T, H>")]
/// body: alloy_consensus::BlockBody<T, H>,
/// }
/// ```
#[derive(derive_more::Debug, Serialize, Deserialize)]
#[debug(bound())]
pub struct BlockBody<'a, T: SerdeBincodeCompat, H: SerdeBincodeCompat> {
transactions: Vec<T::BincodeRepr<'a>>,
ommers: Vec<H::BincodeRepr<'a>>,
withdrawals: Cow<'a, Option<Withdrawals>>,
}
impl<'a, T: SerdeBincodeCompat, H: SerdeBincodeCompat>
From<&'a alloy_consensus::BlockBody<T, H>> for BlockBody<'a, T, H>
{
fn from(value: &'a alloy_consensus::BlockBody<T, H>) -> Self {
Self {
transactions: value.transactions.iter().map(|tx| tx.as_repr()).collect(),
ommers: value.ommers.iter().map(|h| h.as_repr()).collect(),
withdrawals: Cow::Borrowed(&value.withdrawals),
}
}
}
impl<'a, T: SerdeBincodeCompat, H: SerdeBincodeCompat> From<BlockBody<'a, T, H>>
for alloy_consensus::BlockBody<T, H>
{
fn from(value: BlockBody<'a, T, H>) -> Self {
Self {
transactions: value
.transactions
.into_iter()
.map(SerdeBincodeCompat::from_repr)
.collect(),
ommers: value.ommers.into_iter().map(SerdeBincodeCompat::from_repr).collect(),
withdrawals: value.withdrawals.into_owned(),
}
}
}
impl<T: SerdeBincodeCompat, H: SerdeBincodeCompat> SerializeAs<alloy_consensus::BlockBody<T, H>>
for BlockBody<'_, T, H>
{
fn serialize_as<S>(
source: &alloy_consensus::BlockBody<T, H>,
serializer: S,
) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
BlockBody::from(source).serialize(serializer)
}
}
impl<'de, T: SerdeBincodeCompat, H: SerdeBincodeCompat>
DeserializeAs<'de, alloy_consensus::BlockBody<T, H>> for BlockBody<'de, T, H>
{
fn deserialize_as<D>(deserializer: D) -> Result<alloy_consensus::BlockBody<T, H>, D::Error>
where
D: Deserializer<'de>,
{
BlockBody::deserialize(deserializer).map(Into::into)
}
}
impl<T: SerdeBincodeCompat, H: SerdeBincodeCompat> SerdeBincodeCompat
for alloy_consensus::BlockBody<T, H>
{
type BincodeRepr<'a> = BlockBody<'a, T, H>;
fn as_repr(&self) -> Self::BincodeRepr<'_> {
self.into()
}
fn from_repr(repr: Self::BincodeRepr<'_>) -> Self {
repr.into()
}
}
impl super::SerdeBincodeCompat for alloy_consensus::EthereumTxEnvelope<TxEip4844> {
type BincodeRepr<'a> =
alloy_consensus::serde_bincode_compat::transaction::EthereumTxEnvelope<'a>;
fn as_repr(&self) -> Self::BincodeRepr<'_> {
self.into()
}
fn from_repr(repr: Self::BincodeRepr<'_>) -> Self {
repr.into()
}
}
#[cfg(feature = "op")]
impl super::SerdeBincodeCompat for op_alloy_consensus::OpTxEnvelope {
type BincodeRepr<'a> =
op_alloy_consensus::serde_bincode_compat::transaction::OpTxEnvelope<'a>;
fn as_repr(&self) -> Self::BincodeRepr<'_> {
self.into()
}
fn from_repr(repr: Self::BincodeRepr<'_>) -> Self {
repr.into()
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/primitives-traits/src/withdrawal.rs | crates/primitives-traits/src/withdrawal.rs | //! [EIP-4895](https://eips.ethereum.org/EIPS/eip-4895) Withdrawal types.
#[cfg(test)]
mod tests {
use alloy_eips::eip4895::Withdrawal;
use alloy_primitives::Address;
use alloy_rlp::{RlpDecodable, RlpEncodable};
use proptest::proptest;
use proptest_arbitrary_interop::arb;
use reth_codecs::{add_arbitrary_tests, Compact};
use serde::{Deserialize, Serialize};
/// This type is kept for compatibility tests after the codec support was added to alloy-eips
/// Withdrawal type natively
#[derive(
Debug,
Clone,
PartialEq,
Eq,
Default,
Hash,
RlpEncodable,
RlpDecodable,
Serialize,
Deserialize,
Compact,
)]
#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))]
#[add_arbitrary_tests(compact)]
struct RethWithdrawal {
/// Monotonically increasing identifier issued by consensus layer.
index: u64,
/// Index of validator associated with withdrawal.
validator_index: u64,
/// Target address for withdrawn ether.
address: Address,
/// Value of the withdrawal in gwei.
amount: u64,
}
impl PartialEq<Withdrawal> for RethWithdrawal {
fn eq(&self, other: &Withdrawal) -> bool {
self.index == other.index &&
self.validator_index == other.validator_index &&
self.address == other.address &&
self.amount == other.amount
}
}
// <https://github.com/paradigmxyz/reth/issues/1614>
#[test]
fn test_withdrawal_serde_roundtrip() {
let input = r#"[{"index":"0x0","validatorIndex":"0x0","address":"0x0000000000000000000000000000000000001000","amount":"0x1"},{"index":"0x1","validatorIndex":"0x1","address":"0x0000000000000000000000000000000000001001","amount":"0x1"},{"index":"0x2","validatorIndex":"0x2","address":"0x0000000000000000000000000000000000001002","amount":"0x1"},{"index":"0x3","validatorIndex":"0x3","address":"0x0000000000000000000000000000000000001003","amount":"0x1"},{"index":"0x4","validatorIndex":"0x4","address":"0x0000000000000000000000000000000000001004","amount":"0x1"},{"index":"0x5","validatorIndex":"0x5","address":"0x0000000000000000000000000000000000001005","amount":"0x1"},{"index":"0x6","validatorIndex":"0x6","address":"0x0000000000000000000000000000000000001006","amount":"0x1"},{"index":"0x7","validatorIndex":"0x7","address":"0x0000000000000000000000000000000000001007","amount":"0x1"},{"index":"0x8","validatorIndex":"0x8","address":"0x0000000000000000000000000000000000001008","amount":"0x1"},{"index":"0x9","validatorIndex":"0x9","address":"0x0000000000000000000000000000000000001009","amount":"0x1"},{"index":"0xa","validatorIndex":"0xa","address":"0x000000000000000000000000000000000000100a","amount":"0x1"},{"index":"0xb","validatorIndex":"0xb","address":"0x000000000000000000000000000000000000100b","amount":"0x1"},{"index":"0xc","validatorIndex":"0xc","address":"0x000000000000000000000000000000000000100c","amount":"0x1"},{"index":"0xd","validatorIndex":"0xd","address":"0x000000000000000000000000000000000000100d","amount":"0x1"},{"index":"0xe","validatorIndex":"0xe","address":"0x000000000000000000000000000000000000100e","amount":"0x1"},{"index":"0xf","validatorIndex":"0xf","address":"0x000000000000000000000000000000000000100f","amount":"0x1"}]"#;
let withdrawals: Vec<Withdrawal> = serde_json::from_str(input).unwrap();
let s = serde_json::to_string(&withdrawals).unwrap();
assert_eq!(input, s);
}
proptest!(
#[test]
fn test_roundtrip_withdrawal_compat(withdrawal in arb::<RethWithdrawal>()) {
// Convert to buffer and then create alloy_access_list from buffer and
// compare
let mut compacted_reth_withdrawal = Vec::<u8>::new();
let len = withdrawal.to_compact(&mut compacted_reth_withdrawal);
// decode the compacted buffer to AccessList
let alloy_withdrawal = Withdrawal::from_compact(&compacted_reth_withdrawal, len).0;
assert_eq!(withdrawal, alloy_withdrawal);
let mut compacted_alloy_withdrawal = Vec::<u8>::new();
let alloy_len = alloy_withdrawal.to_compact(&mut compacted_alloy_withdrawal);
assert_eq!(len, alloy_len);
assert_eq!(compacted_reth_withdrawal, compacted_alloy_withdrawal);
}
);
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/primitives-traits/src/lib.rs | crates/primitives-traits/src/lib.rs | //! Commonly used types and traits in Reth.
//!
//! ## Overview
//!
//! This crate defines various traits and types that form the foundation of the reth stack.
//! The top-level trait is [`Block`] which represents a block in the blockchain. A [`Block`] is
//! composed of a [`Header`] and a [`BlockBody`]. A [`BlockBody`] contains the transactions in the
//! block and additional data that is part of the block. In ethereum, this includes uncle headers
//! and withdrawals. For optimism, uncle headers and withdrawals are always empty lists.
//!
//! The most common types you'll use are:
//! - [`Block`] - A basic block with header and body
//! - [`SealedBlock`] - A block with its hash cached
//! - [`SealedHeader`] - A header with its hash cached
//! - [`RecoveredBlock`] - A sealed block with sender addresses recovered
//!
//! ## Feature Flags
//!
//! - `arbitrary`: Adds `proptest` and `arbitrary` support for primitive types.
//! - `op`: Implements the traits for various [op-alloy](https://github.com/alloy-rs/op-alloy)
//! types.
//! - `reth-codec`: Enables db codec support for reth types including zstd compression for certain
//! types.
//! - `rpc-compat`: Adds RPC compatibility functions for the types in this crate, e.g. rpc type
//! conversions.
//! - `serde`: Adds serde support for all types.
//! - `secp256k1`: Adds secp256k1 support for transaction signing/recovery. (By default the no-std
//! friendly `k256` is used)
//! - `rayon`: Uses `rayon` for parallel transaction sender recovery in [`BlockBody`] by default.
//! - `serde-bincode-compat` provides helpers for dealing with the `bincode` crate.
//!
//! ### Sealing (Hashing)
//!
//! The block hash is derived from the [`Header`] and is used to uniquely identify the block. This
//! operation is referred to as sealing in the context of this crate. Sealing is an expensive
//! operation. This crate provides various wrapper types that cache the hash of the block to avoid
//! recomputing it: [`SealedHeader`] and [`SealedBlock`]. All sealed types can be downgraded to
//! their unsealed counterparts.
//!
//! ### Recovery
//!
//! The raw consensus transactions that make up a block don't include the sender's address. This
//! information is recovered from the transaction signature. This operation is referred to as
//! recovery in the context of this crate and is an expensive operation. The [`RecoveredBlock`]
//! represents a [`SealedBlock`] with the sender addresses recovered. A [`SealedBlock`] can be
//! upgraded to a [`RecoveredBlock`] by recovering the sender addresses:
//! [`SealedBlock::try_recover`]. A [`RecoveredBlock`] can be downgraded to a [`SealedBlock`] by
//! removing the sender addresses: [`RecoveredBlock::into_sealed_block`].
//!
//! #### Naming
//!
//! The types in this crate support multiple recovery functions, e.g.
//! [`SealedBlock::try_recover`] and [`SealedBlock::try_recover_unchecked`]. The `_unchecked` suffix indicates that this function recovers the signer _without ensuring that the signature has a low `s` value_, in other words this rule introduced in [EIP-2](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-2.md) is ignored.
//! Hence this function is necessary when dealing with pre EIP-2 transactions on the ethereum
//! mainnet. Newer transactions must always be recovered with the regular `recover` functions, see
//! also [`recover_signer`](crypto::secp256k1::recover_signer).
//!
//! ## Error Handling
//!
//! Most operations that can fail return `Result` types:
//! - [`RecoveryError`](transaction::signed::RecoveryError) - Transaction signature recovery failed
//! - [`BlockRecoveryError`](block::error::BlockRecoveryError) - Block-level recovery failed
//! - [`GotExpected`] / [`GotExpectedBoxed`] - Generic error for mismatched values
//!
//! Recovery errors typically indicate invalid signatures or corrupted data. The block recovery
//! error preserves the original block for further inspection.
//!
//! ### Example
//!
//! ```rust
//! # use reth_primitives_traits::{SealedBlock, RecoveredBlock};
//! # use reth_primitives_traits::block::error::BlockRecoveryError;
//! # fn example<B: reth_primitives_traits::Block>(sealed_block: SealedBlock<B>) -> Result<(), BlockRecoveryError<SealedBlock<B>>>
//! # where B::Body: reth_primitives_traits::BlockBody<Transaction: reth_primitives_traits::SignedTransaction> {
//! // Attempt to recover senders from a sealed block
//! match sealed_block.try_recover() {
//! Ok(recovered) => {
//! // Successfully recovered all senders
//! println!("Recovered {} senders", recovered.senders().len());
//! Ok(())
//! }
//! Err(err) => {
//! // Recovery failed - the block is returned in the error
//! println!("Failed to recover senders for block");
//! // You can still access the original block
//! let block = err.into_inner();
//! let hash = block.hash();
//! Err(BlockRecoveryError::new(block))
//! }
//! }
//! # }
//! ```
//!
//! ## Performance Considerations
//!
//! - **Hashing**: Block hashing is expensive. Use [`SealedBlock`] to cache hashes.
//! - **Recovery**: Sender recovery is CPU-intensive. Use [`RecoveredBlock`] to cache results.
//! - **Parallel Recovery**: Enable the `rayon` feature for parallel transaction recovery.
//!
//! ## Bincode serde compatibility
//!
//! The [bincode-crate](https://github.com/bincode-org/bincode) is often used by additional tools when sending data over the network.
//! `bincode` crate doesn't work well with optionally serializable serde fields, but some of the consensus types require optional serialization for RPC compatibility. Read more: <https://github.com/bincode-org/bincode/issues/326>
//!
//! As a workaround this crate introduces the `SerdeBincodeCompat` trait (available with the
//! `serde-bincode-compat` feature) used to provide a bincode compatible serde representation.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![cfg_attr(not(feature = "std"), no_std)]
#[macro_use]
extern crate alloc;
/// Common constants.
pub mod constants;
pub use constants::gas_units::{format_gas, format_gas_throughput};
/// Minimal account
pub mod account;
pub use account::{Account, Bytecode};
pub mod receipt;
pub use receipt::{FullReceipt, Receipt};
pub mod transaction;
pub use alloy_consensus::{
transaction::{Recovered, TransactionMeta},
ReceiptWithBloom,
};
pub use transaction::{
execute::FillTxEnv,
signed::{FullSignedTx, SignedTransaction},
FullTransaction, SignerRecoverable, Transaction,
};
pub mod block;
pub use block::{
body::{BlockBody, FullBlockBody},
header::{AlloyBlockHeader, BlockHeader, FullBlockHeader},
recovered::IndexedTx,
Block, FullBlock, RecoveredBlock, SealedBlock,
};
mod withdrawal;
pub use alloy_eips::eip2718::WithEncoded;
pub mod crypto;
mod error;
pub use error::{GotExpected, GotExpectedBoxed};
mod log;
pub use alloy_primitives::{logs_bloom, Log, LogData};
pub mod proofs;
mod storage;
pub use storage::StorageEntry;
pub mod sync;
mod extended;
pub use extended::Extended;
/// Common header types
pub mod header;
pub use header::{Header, SealedHeader, SealedHeaderFor};
/// Bincode-compatible serde implementations for common abstracted types in Reth.
///
/// `bincode` crate doesn't work with optionally serializable serde fields, but some of the
/// Reth types require optional serialization for RPC compatibility. This module makes so that
/// all fields are serialized.
///
/// Read more: <https://github.com/bincode-org/bincode/issues/326>
#[cfg(feature = "serde-bincode-compat")]
pub mod serde_bincode_compat;
/// Heuristic size trait
pub mod size;
pub use size::InMemorySize;
/// Node traits
pub mod node;
pub use node::{BlockTy, BodyTy, FullNodePrimitives, HeaderTy, NodePrimitives, ReceiptTy, TxTy};
/// Helper trait that requires de-/serialize implementation since `serde` feature is enabled.
#[cfg(feature = "serde")]
pub trait MaybeSerde: serde::Serialize + for<'de> serde::Deserialize<'de> {}
/// Noop. Helper trait that would require de-/serialize implementation if `serde` feature were
/// enabled.
#[cfg(not(feature = "serde"))]
pub trait MaybeSerde {}
#[cfg(feature = "serde")]
impl<T> MaybeSerde for T where T: serde::Serialize + for<'de> serde::Deserialize<'de> {}
#[cfg(not(feature = "serde"))]
impl<T> MaybeSerde for T {}
/// Helper trait that requires database encoding implementation since `reth-codec` feature is
/// enabled.
#[cfg(feature = "reth-codec")]
pub trait MaybeCompact: reth_codecs::Compact {}
/// Noop. Helper trait that would require database encoding implementation if `reth-codec` feature
/// were enabled.
#[cfg(not(feature = "reth-codec"))]
pub trait MaybeCompact {}
#[cfg(feature = "reth-codec")]
impl<T> MaybeCompact for T where T: reth_codecs::Compact {}
#[cfg(not(feature = "reth-codec"))]
impl<T> MaybeCompact for T {}
/// Helper trait that requires serde bincode compatibility implementation.
#[cfg(feature = "serde-bincode-compat")]
pub trait MaybeSerdeBincodeCompat: crate::serde_bincode_compat::SerdeBincodeCompat {}
/// Noop. Helper trait that would require serde bincode compatibility implementation if
/// `serde-bincode-compat` feature were enabled.
#[cfg(not(feature = "serde-bincode-compat"))]
pub trait MaybeSerdeBincodeCompat {}
#[cfg(feature = "serde-bincode-compat")]
impl<T> MaybeSerdeBincodeCompat for T where T: crate::serde_bincode_compat::SerdeBincodeCompat {}
#[cfg(not(feature = "serde-bincode-compat"))]
impl<T> MaybeSerdeBincodeCompat for T {}
/// Utilities for testing.
#[cfg(any(test, feature = "arbitrary", feature = "test-utils"))]
pub mod test_utils {
pub use crate::header::test_utils::{generate_valid_header, valid_header_strategy};
#[cfg(any(test, feature = "test-utils"))]
pub use crate::{block::TestBlock, header::test_utils::TestHeader};
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/primitives-traits/src/proofs.rs | crates/primitives-traits/src/proofs.rs | //! Helper function for calculating Merkle proofs and hashes.
pub use alloy_trie::root::ordered_trie_root_with_encoder;
pub use alloy_consensus::proofs::calculate_receipt_root;
/// Calculate a transaction root.
///
/// `(rlp(index), encoded(tx))` pairs.
#[doc(inline)]
pub use alloy_consensus::proofs::calculate_transaction_root;
/// Calculates the root hash of the withdrawals.
#[doc(inline)]
pub use alloy_consensus::proofs::calculate_withdrawals_root;
/// Calculates the root hash for ommer/uncle headers.
#[doc(inline)]
pub use alloy_consensus::proofs::calculate_ommers_root;
#[cfg(test)]
mod tests {
use alloy_consensus::EMPTY_ROOT_HASH;
use alloy_genesis::GenesisAccount;
use alloy_primitives::{b256, hex_literal::hex, Address, B256, U256};
use alloy_trie::root::{state_root_ref_unhashed, state_root_unhashed};
use reth_chainspec::{HOLESKY, MAINNET, SEPOLIA};
use std::collections::HashMap;
#[test]
fn check_empty_state_root() {
let genesis_alloc = HashMap::<Address, GenesisAccount>::new();
let root = state_root_unhashed(genesis_alloc);
assert_eq!(root, EMPTY_ROOT_HASH);
}
#[test]
fn test_simple_account_state_root() {
// each fixture specifies an address and expected root hash - the address is initialized
// with a maximum balance, and is the only account in the state.
// these test cases are generated by using geth with a custom genesis.json (with a single
// account that has max balance)
let fixtures: Vec<(Address, B256)> = vec![
(
hex!("9fe4abd71ad081f091bd06dd1c16f7e92927561e").into(),
hex!("4b35be4231841d212ce2fa43aedbddeadd6eb7d420195664f9f0d55629db8c32").into(),
),
(
hex!("c2ba9d87f8be0ade00c60d3656c1188e008fbfa2").into(),
hex!("e1389256c47d63df8856d7729dec9dc2dae074a7f0cbc49acad1cf7b29f7fe94").into(),
),
];
for (test_addr, expected_root) in fixtures {
let mut genesis_alloc = HashMap::new();
genesis_alloc
.insert(test_addr, GenesisAccount { balance: U256::MAX, ..Default::default() });
let root = state_root_unhashed(genesis_alloc);
assert_eq!(root, expected_root);
}
}
#[test]
fn test_chain_state_roots() {
let expected_mainnet_state_root =
b256!("0xd7f8974fb5ac78d9ac099b9ad5018bedc2ce0a72dad1827a1709da30580f0544");
let calculated_mainnet_state_root = state_root_ref_unhashed(&MAINNET.genesis.alloc);
assert_eq!(
expected_mainnet_state_root, calculated_mainnet_state_root,
"mainnet state root mismatch"
);
let expected_sepolia_state_root =
b256!("0x5eb6e371a698b8d68f665192350ffcecbbbf322916f4b51bd79bb6887da3f494");
let calculated_sepolia_state_root = state_root_ref_unhashed(&SEPOLIA.genesis.alloc);
assert_eq!(
expected_sepolia_state_root, calculated_sepolia_state_root,
"sepolia state root mismatch"
);
let expected_holesky_state_root =
b256!("0x69d8c9d72f6fa4ad42d4702b433707212f90db395eb54dc20bc85de253788783");
let calculated_holesky_state_root = state_root_ref_unhashed(&HOLESKY.genesis.alloc);
assert_eq!(
expected_holesky_state_root, calculated_holesky_state_root,
"holesky state root mismatch"
);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/primitives-traits/src/sync.rs | crates/primitives-traits/src/sync.rs | //! Lock synchronization primitives
use once_cell as _;
#[cfg(not(feature = "std"))]
pub use once_cell::sync::{Lazy as LazyLock, OnceCell as OnceLock};
#[cfg(feature = "std")]
pub use std::sync::{LazyLock, OnceLock};
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/primitives-traits/src/log.rs | crates/primitives-traits/src/log.rs | #[cfg(test)]
mod tests {
use alloy_primitives::{Address, Bytes, Log as AlloyLog, B256};
use alloy_rlp::{RlpDecodable, RlpEncodable};
use proptest::proptest;
use proptest_arbitrary_interop::arb;
use reth_codecs::{add_arbitrary_tests, Compact};
use serde::{Deserialize, Serialize};
/// This type is kept for compatibility tests after the codec support was added to
/// alloy-primitives Log type natively
#[derive(
Clone,
Debug,
PartialEq,
Eq,
RlpDecodable,
RlpEncodable,
Default,
Serialize,
Deserialize,
Compact,
)]
#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))]
#[add_arbitrary_tests(compact, rlp)]
struct Log {
/// Contract that emitted this log.
address: Address,
/// Topics of the log. The number of logs depends on what `LOG` opcode is used.
topics: Vec<B256>,
/// Arbitrary length data.
data: Bytes,
}
impl From<AlloyLog> for Log {
fn from(mut log: AlloyLog) -> Self {
Self {
address: log.address,
topics: std::mem::take(log.data.topics_mut_unchecked()),
data: log.data.data,
}
}
}
impl From<Log> for AlloyLog {
fn from(log: Log) -> Self {
Self::new_unchecked(log.address, log.topics, log.data)
}
}
proptest! {
#[test]
fn test_roundtrip_conversion_between_log_and_alloy_log(log in arb::<Log>()) {
// Convert log to buffer and then create alloy_log from buffer and compare
let mut compacted_log = Vec::<u8>::new();
let len = log.to_compact(&mut compacted_log);
let alloy_log = AlloyLog::from_compact(&compacted_log, len).0;
assert_eq!(log, alloy_log.into());
// Create alloy_log from log and then convert it to buffer and compare compacted_alloy_log and compacted_log
let alloy_log = AlloyLog::new_unchecked(log.address, log.topics, log.data);
let mut compacted_alloy_log = Vec::<u8>::new();
let alloy_len = alloy_log.to_compact(&mut compacted_alloy_log);
assert_eq!(len, alloy_len);
assert_eq!(compacted_log, compacted_alloy_log);
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/primitives-traits/src/storage.rs | crates/primitives-traits/src/storage.rs | use alloy_primitives::{B256, U256};
use alloy_primitives::FlaggedStorage;
/// Account storage entry.
///
/// `key` is the subkey when used as a value in the `StorageChangeSets` table.
#[derive(Debug, Default, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))]
#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))]
pub struct StorageEntry {
/// Storage key.
pub key: B256,
/// Value on storage key.
pub value: FlaggedStorage,
}
impl StorageEntry {
/// Create a new `StorageEntry` with given key and value.
pub const fn new(key: B256, value: U256, is_private: bool) -> Self {
Self { key, value: FlaggedStorage { value, is_private } }
}
/// Convert the storage entry to a flagged storage entry.
pub const fn to_flagged_storage(self) -> FlaggedStorage {
self.value
}
}
impl From<(B256, U256, bool)> for StorageEntry {
fn from((key, value, is_private): (B256, U256, bool)) -> Self {
Self::new(key, value, is_private)
}
}
impl From<(B256, (U256, bool))> for StorageEntry {
fn from((key, (value, is_private)): (B256, (U256, bool))) -> Self {
Self::new(key, value, is_private)
}
}
impl From<(B256, FlaggedStorage)> for StorageEntry {
fn from((key, value): (B256, FlaggedStorage)) -> Self {
Self { key, value }
}
}
impl From<StorageEntry> for FlaggedStorage {
fn from(entry: StorageEntry) -> Self {
entry.value
}
}
// NOTE: Removing reth_codec and manually encode subkey
// and compress second part of the value. If we have compression
// over whole value (Even SubKey) that would mess up fetching of values with seek_by_key_subkey
#[cfg(any(test, feature = "reth-codec"))]
impl reth_codecs::Compact for StorageEntry {
fn to_compact<B>(&self, buf: &mut B) -> usize
where
B: bytes::BufMut + AsMut<[u8]>,
{
// for now put full bytes and later compress it.
buf.put_slice(&self.key[..]);
buf.put_u8(self.value.is_private as u8);
self.value.value.to_compact(buf) + 32 + 1
}
fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) {
let key = B256::from_slice(&buf[..32]);
let is_private = buf[32] != 0;
let (value, out) = U256::from_compact(&buf[32 + 1..], len - 32 - 1);
(Self { key, value: FlaggedStorage { value, is_private } }, out)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/primitives-traits/src/error.rs | crates/primitives-traits/src/error.rs | use alloc::boxed::Box;
use core::ops::{Deref, DerefMut};
/// A pair of values, one of which is expected and one of which is actual.
#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash, thiserror::Error)]
#[error("got {got}, expected {expected}")]
pub struct GotExpected<T> {
/// The actual value.
pub got: T,
/// The expected value.
pub expected: T,
}
impl<T> From<(T, T)> for GotExpected<T> {
#[inline]
fn from((got, expected): (T, T)) -> Self {
Self::new(got, expected)
}
}
impl<T> GotExpected<T> {
/// Creates a new error from a pair of values.
#[inline]
pub const fn new(got: T, expected: T) -> Self {
Self { got, expected }
}
}
/// A pair of values, one of which is expected and one of which is actual.
///
/// Same as [`GotExpected`], but [`Box`]ed for smaller size.
///
/// Prefer instantiating using [`GotExpected`], and then using `.into()` to convert to this type.
#[derive(Clone, Default, PartialEq, Eq, PartialOrd, Ord, Hash, thiserror::Error, Debug)]
#[error(transparent)]
pub struct GotExpectedBoxed<T>(pub Box<GotExpected<T>>);
impl<T> Deref for GotExpectedBoxed<T> {
type Target = GotExpected<T>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<T> DerefMut for GotExpectedBoxed<T> {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl<T> From<(T, T)> for GotExpectedBoxed<T> {
#[inline]
fn from(value: (T, T)) -> Self {
Self(Box::new(GotExpected::from(value)))
}
}
impl<T> From<GotExpected<T>> for GotExpectedBoxed<T> {
#[inline]
fn from(value: GotExpected<T>) -> Self {
Self(Box::new(value))
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/primitives-traits/src/receipt.rs | crates/primitives-traits/src/receipt.rs | //! Receipt abstraction
use crate::{InMemorySize, MaybeCompact, MaybeSerde, MaybeSerdeBincodeCompat};
use alloc::vec::Vec;
use alloy_consensus::{
Eip2718EncodableReceipt, RlpDecodableReceipt, RlpEncodableReceipt, TxReceipt, Typed2718,
};
use alloy_rlp::{Decodable, Encodable};
use core::fmt;
/// Helper trait that unifies all behaviour required by receipt to support full node operations.
pub trait FullReceipt: Receipt + MaybeCompact {}
impl<T> FullReceipt for T where T: Receipt + MaybeCompact {}
/// Abstraction of a receipt.
pub trait Receipt:
Send
+ Sync
+ Unpin
+ Clone
+ fmt::Debug
+ TxReceipt<Log = alloy_primitives::Log>
+ RlpEncodableReceipt
+ RlpDecodableReceipt
+ Encodable
+ Decodable
+ Eip2718EncodableReceipt
+ Typed2718
+ MaybeSerde
+ InMemorySize
+ MaybeSerdeBincodeCompat
{
}
// Blanket implementation for any type that satisfies all the supertrait bounds
impl<T> Receipt for T where
T: Send
+ Sync
+ Unpin
+ Clone
+ fmt::Debug
+ TxReceipt<Log = alloy_primitives::Log>
+ RlpEncodableReceipt
+ RlpDecodableReceipt
+ Encodable
+ Decodable
+ Eip2718EncodableReceipt
+ Typed2718
+ MaybeSerde
+ InMemorySize
+ MaybeSerdeBincodeCompat
{
}
/// Retrieves gas spent by transactions as a vector of tuples (transaction index, gas used).
pub fn gas_spent_by_transactions<I, T>(receipts: I) -> Vec<(u64, u64)>
where
I: IntoIterator<Item = T>,
T: TxReceipt,
{
receipts
.into_iter()
.enumerate()
.map(|(id, receipt)| (id as u64, receipt.cumulative_gas_used()))
.collect()
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/primitives-traits/src/crypto.rs | crates/primitives-traits/src/crypto.rs | //! Crypto utilities.
pub use alloy_consensus::crypto::*;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/primitives-traits/src/extended.rs | crates/primitives-traits/src/extended.rs | use crate::{
size::InMemorySize,
transaction::signed::{RecoveryError, SignedTransaction},
};
use alloc::vec::Vec;
use alloy_consensus::{transaction::SignerRecoverable, EthereumTxEnvelope, Transaction};
use alloy_eips::{
eip2718::{Eip2718Error, Eip2718Result, IsTyped2718},
eip2930::AccessList,
eip7702::SignedAuthorization,
Decodable2718, Encodable2718, Typed2718,
};
use alloy_primitives::{ChainId, TxHash};
use alloy_rlp::{BufMut, Decodable, Encodable, Result as RlpResult};
use revm_primitives::{Address, Bytes, TxKind, B256, U256};
macro_rules! delegate {
($self:expr => $tx:ident.$method:ident($($arg:expr),*)) => {
match $self {
Self::BuiltIn($tx) => $tx.$method($($arg),*),
Self::Other($tx) => $tx.$method($($arg),*),
}
};
}
/// An enum that combines two different transaction types.
///
/// This is intended to be used to extend existing presets, for example the ethereum or opstack
/// transaction types and receipts
///
/// Note: The [`Extended::Other`] variants must not overlap with the builtin one, transaction
/// types must be unique. For example if [`Extended::BuiltIn`] contains an `EIP-1559` type variant,
/// [`Extended::Other`] must not include that type.
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[derive(Debug, Clone, Hash, Eq, PartialEq)]
pub enum Extended<BuiltIn, Other> {
/// The builtin transaction type.
BuiltIn(BuiltIn),
/// The other transaction type.
Other(Other),
}
impl<B, T> Transaction for Extended<B, T>
where
B: Transaction,
T: Transaction,
{
fn chain_id(&self) -> Option<ChainId> {
delegate!(self => tx.chain_id())
}
fn nonce(&self) -> u64 {
delegate!(self => tx.nonce())
}
fn gas_limit(&self) -> u64 {
delegate!(self => tx.gas_limit())
}
fn gas_price(&self) -> Option<u128> {
delegate!(self => tx.gas_price())
}
fn max_fee_per_gas(&self) -> u128 {
delegate!(self => tx.max_fee_per_gas())
}
fn max_priority_fee_per_gas(&self) -> Option<u128> {
delegate!(self => tx.max_priority_fee_per_gas())
}
fn max_fee_per_blob_gas(&self) -> Option<u128> {
delegate!(self => tx.max_fee_per_blob_gas())
}
fn priority_fee_or_price(&self) -> u128 {
delegate!(self => tx.priority_fee_or_price())
}
fn effective_gas_price(&self, base_fee: Option<u64>) -> u128 {
delegate!(self => tx.effective_gas_price(base_fee))
}
fn is_dynamic_fee(&self) -> bool {
delegate!(self => tx.is_dynamic_fee())
}
fn kind(&self) -> TxKind {
delegate!(self => tx.kind())
}
fn is_create(&self) -> bool {
match self {
Self::BuiltIn(tx) => tx.is_create(),
Self::Other(_tx) => false,
}
}
fn value(&self) -> U256 {
delegate!(self => tx.value())
}
fn input(&self) -> &Bytes {
delegate!(self => tx.input())
}
fn access_list(&self) -> Option<&AccessList> {
delegate!(self => tx.access_list())
}
fn blob_versioned_hashes(&self) -> Option<&[B256]> {
delegate!(self => tx.blob_versioned_hashes())
}
fn authorization_list(&self) -> Option<&[SignedAuthorization]> {
delegate!(self => tx.authorization_list())
}
}
impl<B, T> IsTyped2718 for Extended<B, T>
where
B: IsTyped2718,
T: IsTyped2718,
{
fn is_type(type_id: u8) -> bool {
B::is_type(type_id) || T::is_type(type_id)
}
}
impl<B, T> InMemorySize for Extended<B, T>
where
B: InMemorySize,
T: InMemorySize,
{
fn size(&self) -> usize {
delegate!(self => tx.size())
}
}
impl<B, T> SignerRecoverable for Extended<B, T>
where
B: SignedTransaction + IsTyped2718,
T: SignedTransaction,
{
fn recover_signer(&self) -> Result<Address, RecoveryError> {
delegate!(self => tx.recover_signer())
}
fn recover_signer_unchecked(&self) -> Result<Address, RecoveryError> {
delegate!(self => tx.recover_signer_unchecked())
}
fn recover_unchecked_with_buf(&self, buf: &mut Vec<u8>) -> Result<Address, RecoveryError> {
delegate!(self => tx.recover_unchecked_with_buf(buf))
}
}
impl<B, T> SignedTransaction for Extended<B, T>
where
B: SignedTransaction + IsTyped2718,
T: SignedTransaction,
{
fn tx_hash(&self) -> &TxHash {
match self {
Self::BuiltIn(tx) => tx.tx_hash(),
Self::Other(tx) => tx.tx_hash(),
}
}
}
impl<B, T> Typed2718 for Extended<B, T>
where
B: Typed2718,
T: Typed2718,
{
fn ty(&self) -> u8 {
match self {
Self::BuiltIn(tx) => tx.ty(),
Self::Other(tx) => tx.ty(),
}
}
}
impl<B, T> Decodable2718 for Extended<B, T>
where
B: Decodable2718 + IsTyped2718,
T: Decodable2718,
{
fn typed_decode(ty: u8, buf: &mut &[u8]) -> Eip2718Result<Self> {
if B::is_type(ty) {
let envelope = B::typed_decode(ty, buf)?;
Ok(Self::BuiltIn(envelope))
} else {
let other = T::typed_decode(ty, buf)?;
Ok(Self::Other(other))
}
}
fn fallback_decode(buf: &mut &[u8]) -> Eip2718Result<Self> {
if buf.is_empty() {
return Err(Eip2718Error::RlpError(alloy_rlp::Error::InputTooShort));
}
B::fallback_decode(buf).map(Self::BuiltIn)
}
}
impl<B, T> Encodable2718 for Extended<B, T>
where
B: Encodable2718,
T: Encodable2718,
{
fn encode_2718_len(&self) -> usize {
match self {
Self::BuiltIn(envelope) => envelope.encode_2718_len(),
Self::Other(tx) => tx.encode_2718_len(),
}
}
fn encode_2718(&self, out: &mut dyn BufMut) {
match self {
Self::BuiltIn(envelope) => envelope.encode_2718(out),
Self::Other(tx) => tx.encode_2718(out),
}
}
}
impl<B, T> Encodable for Extended<B, T>
where
B: Encodable,
T: Encodable,
{
fn encode(&self, out: &mut dyn BufMut) {
match self {
Self::BuiltIn(envelope) => envelope.encode(out),
Self::Other(tx) => tx.encode(out),
}
}
fn length(&self) -> usize {
match self {
Self::BuiltIn(envelope) => envelope.length(),
Self::Other(tx) => tx.length(),
}
}
}
impl<B, T> Decodable for Extended<B, T>
where
B: Decodable,
T: Decodable,
{
fn decode(buf: &mut &[u8]) -> RlpResult<Self> {
let original = *buf;
match B::decode(buf) {
Ok(tx) => Ok(Self::BuiltIn(tx)),
Err(_) => {
*buf = original;
T::decode(buf).map(Self::Other)
}
}
}
}
impl<Eip4844, Tx> From<EthereumTxEnvelope<Eip4844>> for Extended<EthereumTxEnvelope<Eip4844>, Tx> {
fn from(value: EthereumTxEnvelope<Eip4844>) -> Self {
Self::BuiltIn(value)
}
}
#[cfg(feature = "op")]
mod op {
use crate::Extended;
use alloy_consensus::error::ValueError;
use alloy_primitives::{Sealed, Signature, B256};
use op_alloy_consensus::{OpPooledTransaction, OpTransaction, OpTxEnvelope, TxDeposit};
impl<B, T> OpTransaction for Extended<B, T>
where
B: OpTransaction,
T: OpTransaction,
{
fn is_deposit(&self) -> bool {
match self {
Self::BuiltIn(b) => b.is_deposit(),
Self::Other(t) => t.is_deposit(),
}
}
fn as_deposit(&self) -> Option<&Sealed<TxDeposit>> {
match self {
Self::BuiltIn(b) => b.as_deposit(),
Self::Other(t) => t.as_deposit(),
}
}
}
impl<Tx> TryFrom<Extended<OpTxEnvelope, Tx>> for Extended<OpPooledTransaction, Tx> {
type Error = <OpPooledTransaction as TryFrom<OpTxEnvelope>>::Error;
fn try_from(value: Extended<OpTxEnvelope, Tx>) -> Result<Self, Self::Error> {
match value {
Extended::BuiltIn(tx) => {
let converted_tx: OpPooledTransaction = tx.try_into()?;
Ok(Self::BuiltIn(converted_tx))
}
Extended::Other(tx) => Ok(Self::Other(tx)),
}
}
}
impl<Tx> From<OpPooledTransaction> for Extended<OpTxEnvelope, Tx> {
fn from(tx: OpPooledTransaction) -> Self {
Self::BuiltIn(tx.into())
}
}
impl<Tx> From<Extended<OpPooledTransaction, Tx>> for Extended<OpTxEnvelope, Tx> {
fn from(tx: Extended<OpPooledTransaction, Tx>) -> Self {
match tx {
Extended::BuiltIn(tx) => Self::BuiltIn(tx.into()),
Extended::Other(tx) => Self::Other(tx),
}
}
}
impl<Tx> TryFrom<Extended<OpTxEnvelope, Tx>> for OpPooledTransaction {
type Error = ValueError<OpTxEnvelope>;
fn try_from(_tx: Extended<OpTxEnvelope, Tx>) -> Result<Self, Self::Error> {
match _tx {
Extended::BuiltIn(inner) => inner.try_into(),
Extended::Other(_tx) => Err(ValueError::new(
OpTxEnvelope::Legacy(alloy_consensus::Signed::new_unchecked(
alloy_consensus::TxLegacy::default(),
Signature::decode_rlp_vrs(&mut &[0u8; 65][..], |_| Ok(false)).unwrap(),
B256::default(),
)),
"Cannot convert custom transaction to OpPooledTransaction",
)),
}
}
}
impl<Tx> From<OpTxEnvelope> for Extended<OpTxEnvelope, Tx> {
fn from(value: OpTxEnvelope) -> Self {
Self::BuiltIn(value)
}
}
}
#[cfg(feature = "serde-bincode-compat")]
mod serde_bincode_compat {
use super::*;
use crate::serde_bincode_compat::SerdeBincodeCompat;
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[derive(Debug)]
pub enum ExtendedRepr<'a, B: SerdeBincodeCompat, T: SerdeBincodeCompat> {
BuiltIn(B::BincodeRepr<'a>),
Other(T::BincodeRepr<'a>),
}
impl<B, T> SerdeBincodeCompat for Extended<B, T>
where
B: SerdeBincodeCompat + core::fmt::Debug,
T: SerdeBincodeCompat + core::fmt::Debug,
{
type BincodeRepr<'a> = ExtendedRepr<'a, B, T>;
fn as_repr(&self) -> Self::BincodeRepr<'_> {
match self {
Self::BuiltIn(tx) => ExtendedRepr::BuiltIn(tx.as_repr()),
Self::Other(tx) => ExtendedRepr::Other(tx.as_repr()),
}
}
fn from_repr(repr: Self::BincodeRepr<'_>) -> Self {
match repr {
ExtendedRepr::BuiltIn(tx_repr) => Self::BuiltIn(B::from_repr(tx_repr)),
ExtendedRepr::Other(tx_repr) => Self::Other(T::from_repr(tx_repr)),
}
}
}
}
#[cfg(feature = "reth-codec")]
use alloy_primitives::bytes::Buf;
#[cfg(feature = "reth-codec")]
impl<B, T> reth_codecs::Compact for Extended<B, T>
where
B: Transaction + IsTyped2718 + reth_codecs::Compact,
T: Transaction + reth_codecs::Compact,
{
fn to_compact<Buf>(&self, buf: &mut Buf) -> usize
where
Buf: alloy_rlp::bytes::BufMut + AsMut<[u8]>,
{
buf.put_u8(self.ty());
match self {
Self::BuiltIn(tx) => tx.to_compact(buf),
Self::Other(tx) => tx.to_compact(buf),
}
}
fn from_compact(mut buf: &[u8], len: usize) -> (Self, &[u8]) {
let type_byte = buf.get_u8();
if <B as IsTyped2718>::is_type(type_byte) {
let (tx, remaining) = B::from_compact(buf, len);
return (Self::BuiltIn(tx), remaining);
}
let (tx, remaining) = T::from_compact(buf, len);
(Self::Other(tx), remaining)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/primitives-traits/src/account.rs | crates/primitives-traits/src/account.rs | use alloy_consensus::constants::KECCAK_EMPTY;
use alloy_primitives::{keccak256, Bytes, B256, U256};
use alloy_trie::TrieAccount;
use derive_more::Deref;
use revm_bytecode::{Bytecode as RevmBytecode, BytecodeDecodeError};
use revm_state::AccountInfo;
use seismic_alloy_genesis::GenesisAccount;
#[cfg(any(test, feature = "reth-codec"))]
/// Identifiers used in [`Compact`](reth_codecs::Compact) encoding of [`Bytecode`].
pub mod compact_ids {
/// Identifier for legacy raw bytecode.
pub const LEGACY_RAW_BYTECODE_ID: u8 = 0;
/// Identifier for removed bytecode variant.
pub const REMOVED_BYTECODE_ID: u8 = 1;
/// Identifier for [`LegacyAnalyzed`](revm_bytecode::Bytecode::LegacyAnalyzed).
pub const LEGACY_ANALYZED_BYTECODE_ID: u8 = 2;
/// Identifier for [`Eip7702`](revm_bytecode::Bytecode::Eip7702).
pub const EIP7702_BYTECODE_ID: u8 = 4;
}
/// An Ethereum account.
#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))]
#[derive(Clone, Copy, Debug, PartialEq, Eq, Default)]
#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))]
#[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))]
#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))]
pub struct Account {
/// Account nonce.
pub nonce: u64,
/// Account balance.
pub balance: U256,
/// Hash of the account's bytecode.
pub bytecode_hash: Option<B256>,
}
impl Account {
/// Whether the account has bytecode.
pub const fn has_bytecode(&self) -> bool {
self.bytecode_hash.is_some()
}
/// After `SpuriousDragon` empty account is defined as account with nonce == 0 && balance == 0
/// && bytecode = None (or hash is [`KECCAK_EMPTY`]).
pub fn is_empty(&self) -> bool {
self.nonce == 0 &&
self.balance.is_zero() &&
self.bytecode_hash.is_none_or(|hash| hash == KECCAK_EMPTY)
}
/// Returns an account bytecode's hash.
/// In case of no bytecode, returns [`KECCAK_EMPTY`].
pub fn get_bytecode_hash(&self) -> B256 {
self.bytecode_hash.unwrap_or(KECCAK_EMPTY)
}
/// Converts the account into a trie account with the given storage root.
pub fn into_trie_account(self, storage_root: B256) -> TrieAccount {
let Self { nonce, balance, bytecode_hash } = self;
TrieAccount {
nonce,
balance,
storage_root,
code_hash: bytecode_hash.unwrap_or(KECCAK_EMPTY),
}
}
/// Extracts the account information from a [`revm_state::Account`]
pub fn from_revm_account(revm_account: &revm_state::Account) -> Self {
Self {
balance: revm_account.info.balance,
nonce: revm_account.info.nonce,
bytecode_hash: if revm_account.info.code_hash == revm_primitives::KECCAK_EMPTY {
None
} else {
Some(revm_account.info.code_hash)
},
}
}
}
impl From<revm_state::Account> for Account {
fn from(value: revm_state::Account) -> Self {
Self::from_revm_account(&value)
}
}
/// Bytecode for an account.
///
/// A wrapper around [`revm::primitives::Bytecode`][RevmBytecode] with encoding/decoding support.
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[derive(Debug, Clone, Default, PartialEq, Eq, Deref)]
pub struct Bytecode(pub RevmBytecode);
impl Bytecode {
/// Create new bytecode from raw bytes.
///
/// No analysis will be performed.
///
/// # Panics
///
/// Panics if bytecode is EOF and has incorrect format.
pub fn new_raw(bytes: Bytes) -> Self {
Self(RevmBytecode::new_raw(bytes))
}
/// Creates a new raw [`revm_bytecode::Bytecode`].
///
/// Returns an error on incorrect Bytecode format.
#[inline]
pub fn new_raw_checked(bytecode: Bytes) -> Result<Self, BytecodeDecodeError> {
RevmBytecode::new_raw_checked(bytecode).map(Self)
}
}
#[cfg(any(test, feature = "reth-codec"))]
impl reth_codecs::Compact for Bytecode {
fn to_compact<B>(&self, buf: &mut B) -> usize
where
B: bytes::BufMut + AsMut<[u8]>,
{
use compact_ids::{EIP7702_BYTECODE_ID, LEGACY_ANALYZED_BYTECODE_ID};
let bytecode = match &self.0 {
RevmBytecode::LegacyAnalyzed(analyzed) => analyzed.bytecode(),
RevmBytecode::Eip7702(eip7702) => eip7702.raw(),
};
buf.put_u32(bytecode.len() as u32);
buf.put_slice(bytecode.as_ref());
let len = match &self.0 {
// [`REMOVED_BYTECODE_ID`] has been removed.
RevmBytecode::LegacyAnalyzed(analyzed) => {
buf.put_u8(LEGACY_ANALYZED_BYTECODE_ID);
buf.put_u64(analyzed.original_len() as u64);
let map = analyzed.jump_table().as_slice();
buf.put_slice(map);
1 + 8 + map.len()
}
RevmBytecode::Eip7702(_) => {
buf.put_u8(EIP7702_BYTECODE_ID);
1
}
};
len + bytecode.len() + 4
}
// # Panics
//
// A panic will be triggered if a bytecode variant of 1 or greater than 2 is passed from the
// database.
fn from_compact(mut buf: &[u8], _: usize) -> (Self, &[u8]) {
use byteorder::ReadBytesExt;
use bytes::Buf;
use compact_ids::*;
let len = buf.read_u32::<byteorder::BigEndian>().expect("could not read bytecode length")
as usize;
let bytes = Bytes::from(buf.copy_to_bytes(len));
let variant = buf.read_u8().expect("could not read bytecode variant");
let decoded = match variant {
LEGACY_RAW_BYTECODE_ID => Self(RevmBytecode::new_raw(bytes)),
REMOVED_BYTECODE_ID => {
unreachable!("Junk data in database: checked Bytecode variant was removed")
}
LEGACY_ANALYZED_BYTECODE_ID => {
let original_len = buf.read_u64::<byteorder::BigEndian>().unwrap() as usize;
// When saving jumptable, its length is getting aligned to u8 boundary. Thus, we
// need to re-calculate the internal length of bitvec and truncate it when loading
// jumptables to avoid inconsistencies during `Compact` roundtrip.
let jump_table_len = if buf.len() * 8 >= bytes.len() {
// Use length of padded bytecode if we can fit it
bytes.len()
} else {
// Otherwise, use original_len
original_len
};
Self(RevmBytecode::new_analyzed(
bytes,
original_len,
revm_bytecode::JumpTable::from_slice(buf, jump_table_len),
))
}
EIP7702_BYTECODE_ID => {
// EIP-7702 bytecode objects will be decoded from the raw bytecode
Self(RevmBytecode::new_raw(bytes))
}
_ => unreachable!("Junk data in database: unknown Bytecode variant"),
};
(decoded, &[])
}
}
impl From<&GenesisAccount> for Account {
fn from(value: &GenesisAccount) -> Self {
Self {
nonce: value.nonce.unwrap_or_default(),
balance: value.balance,
bytecode_hash: value.code.as_ref().map(keccak256),
}
}
}
impl From<AccountInfo> for Account {
fn from(revm_acc: AccountInfo) -> Self {
Self {
balance: revm_acc.balance,
nonce: revm_acc.nonce,
bytecode_hash: (!revm_acc.is_empty_code_hash()).then_some(revm_acc.code_hash),
}
}
}
impl From<&AccountInfo> for Account {
fn from(revm_acc: &AccountInfo) -> Self {
Self {
balance: revm_acc.balance,
nonce: revm_acc.nonce,
bytecode_hash: (!revm_acc.is_empty_code_hash()).then_some(revm_acc.code_hash),
}
}
}
impl From<Account> for AccountInfo {
fn from(reth_acc: Account) -> Self {
Self {
balance: reth_acc.balance,
nonce: reth_acc.nonce,
code_hash: reth_acc.bytecode_hash.unwrap_or(KECCAK_EMPTY),
code: None,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_primitives::{hex_literal::hex, B256, U256};
use reth_codecs::Compact;
use revm_bytecode::{JumpTable, LegacyAnalyzedBytecode};
#[test]
fn test_account() {
let mut buf = vec![];
let mut acc = Account::default();
let len = acc.to_compact(&mut buf);
assert_eq!(len, 2);
acc.balance = U256::from(2);
let len = acc.to_compact(&mut buf);
assert_eq!(len, 3);
acc.nonce = 2;
let len = acc.to_compact(&mut buf);
assert_eq!(len, 4);
}
#[test]
fn test_empty_account() {
let mut acc = Account { nonce: 0, balance: U256::ZERO, bytecode_hash: None };
// Nonce 0, balance 0, and bytecode hash set to None is considered empty.
assert!(acc.is_empty());
acc.bytecode_hash = Some(KECCAK_EMPTY);
// Nonce 0, balance 0, and bytecode hash set to KECCAK_EMPTY is considered empty.
assert!(acc.is_empty());
acc.balance = U256::from(2);
// Non-zero balance makes it non-empty.
assert!(!acc.is_empty());
acc.balance = U256::ZERO;
acc.nonce = 10;
// Non-zero nonce makes it non-empty.
assert!(!acc.is_empty());
acc.nonce = 0;
acc.bytecode_hash = Some(B256::from(U256::ZERO));
// Non-empty bytecode hash makes it non-empty.
assert!(!acc.is_empty());
}
#[test]
#[ignore]
fn test_bytecode() {
let mut buf = vec![];
let bytecode = Bytecode::new_raw(Bytes::default());
let len = bytecode.to_compact(&mut buf);
assert_eq!(len, 14);
let mut buf = vec![];
let bytecode = Bytecode::new_raw(Bytes::from(&hex!("ffff")));
let len = bytecode.to_compact(&mut buf);
assert_eq!(len, 17);
let mut buf = vec![];
let bytecode = Bytecode(RevmBytecode::LegacyAnalyzed(LegacyAnalyzedBytecode::new(
Bytes::from(&hex!("ff00")),
2,
JumpTable::from_slice(&[0], 2),
)));
let len = bytecode.to_compact(&mut buf);
assert_eq!(len, 16);
let (decoded, remainder) = Bytecode::from_compact(&buf, len);
assert_eq!(decoded, bytecode);
assert!(remainder.is_empty());
}
#[test]
fn test_account_has_bytecode() {
// Account with no bytecode (None)
let acc_no_bytecode = Account { nonce: 1, balance: U256::from(1000), bytecode_hash: None };
assert!(!acc_no_bytecode.has_bytecode(), "Account should not have bytecode");
// Account with bytecode hash set to KECCAK_EMPTY (should have bytecode)
let acc_empty_bytecode =
Account { nonce: 1, balance: U256::from(1000), bytecode_hash: Some(KECCAK_EMPTY) };
assert!(acc_empty_bytecode.has_bytecode(), "Account should have bytecode");
// Account with a non-empty bytecode hash
let acc_with_bytecode = Account {
nonce: 1,
balance: U256::from(1000),
bytecode_hash: Some(B256::from_slice(&[0x11u8; 32])),
};
assert!(acc_with_bytecode.has_bytecode(), "Account should have bytecode");
}
#[test]
fn test_account_get_bytecode_hash() {
// Account with no bytecode (should return KECCAK_EMPTY)
let acc_no_bytecode = Account { nonce: 0, balance: U256::ZERO, bytecode_hash: None };
assert_eq!(acc_no_bytecode.get_bytecode_hash(), KECCAK_EMPTY, "Should return KECCAK_EMPTY");
// Account with bytecode hash set to KECCAK_EMPTY
let acc_empty_bytecode =
Account { nonce: 1, balance: U256::from(1000), bytecode_hash: Some(KECCAK_EMPTY) };
assert_eq!(
acc_empty_bytecode.get_bytecode_hash(),
KECCAK_EMPTY,
"Should return KECCAK_EMPTY"
);
// Account with a valid bytecode hash
let bytecode_hash = B256::from_slice(&[0x11u8; 32]);
let acc_with_bytecode =
Account { nonce: 1, balance: U256::from(1000), bytecode_hash: Some(bytecode_hash) };
assert_eq!(
acc_with_bytecode.get_bytecode_hash(),
bytecode_hash,
"Should return the bytecode hash"
);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/primitives-traits/src/size.rs | crates/primitives-traits/src/size.rs | use alloc::vec::Vec;
use alloy_consensus::{
transaction::TxEip4844Sidecar, EthereumTxEnvelope, Header, TxEip1559, TxEip2930, TxEip4844,
TxEip4844Variant, TxEip4844WithSidecar, TxEip7702, TxLegacy, TxType,
};
use alloy_eips::eip4895::Withdrawals;
use alloy_primitives::{Signature, TxHash, B256};
use revm_primitives::Log;
/// Trait for calculating a heuristic for the in-memory size of a struct.
#[auto_impl::auto_impl(&, Arc, Box)]
pub trait InMemorySize {
/// Returns a heuristic for the in-memory size of a struct.
fn size(&self) -> usize;
}
impl<T: InMemorySize> InMemorySize for alloy_consensus::Signed<T> {
fn size(&self) -> usize {
T::size(self.tx()) + self.signature().size() + core::mem::size_of::<B256>()
}
}
/// Implement `InMemorySize` for a type with `size_of`
macro_rules! impl_in_mem_size_size_of {
($($ty:ty),*) => {
$(
impl InMemorySize for $ty {
#[inline]
fn size(&self) -> usize {
core::mem::size_of::<Self>()
}
}
)*
};
}
impl_in_mem_size_size_of!(Signature, TxHash, TxType);
/// Implement `InMemorySize` for a type with a native `size` method.
macro_rules! impl_in_mem_size {
($($ty:ty),*) => {
$(
impl InMemorySize for $ty {
#[inline]
fn size(&self) -> usize {
Self::size(self)
}
}
)*
};
}
impl_in_mem_size!(
Header,
TxLegacy,
TxEip2930,
TxEip1559,
TxEip7702,
TxEip4844,
seismic_alloy_consensus::TxSeismic
);
impl<T: TxEip4844Sidecar> InMemorySize for TxEip4844Variant<T> {
#[inline]
fn size(&self) -> usize {
Self::size(self)
}
}
impl<T: TxEip4844Sidecar> InMemorySize for TxEip4844WithSidecar<T> {
#[inline]
fn size(&self) -> usize {
Self::size(self)
}
}
#[cfg(feature = "op")]
impl_in_mem_size_size_of!(op_alloy_consensus::OpTxType, seismic_alloy_consensus::SeismicTxType);
impl InMemorySize for alloy_consensus::Receipt {
fn size(&self) -> usize {
let Self { status, cumulative_gas_used, logs } = self;
core::mem::size_of_val(status) +
core::mem::size_of_val(cumulative_gas_used) +
logs.capacity() * core::mem::size_of::<Log>()
}
}
impl<T: InMemorySize> InMemorySize for EthereumTxEnvelope<T> {
fn size(&self) -> usize {
match self {
Self::Legacy(tx) => tx.size(),
Self::Eip2930(tx) => tx.size(),
Self::Eip1559(tx) => tx.size(),
Self::Eip4844(tx) => tx.size(),
Self::Eip7702(tx) => tx.size(),
}
}
}
impl<T: InMemorySize, H: InMemorySize> InMemorySize for alloy_consensus::BlockBody<T, H> {
/// Calculates a heuristic for the in-memory size of the block body
#[inline]
fn size(&self) -> usize {
self.transactions.iter().map(T::size).sum::<usize>() +
self.transactions.capacity() * core::mem::size_of::<T>() +
self.ommers.iter().map(H::size).sum::<usize>() +
self.ommers.capacity() * core::mem::size_of::<Header>() +
self.withdrawals
.as_ref()
.map_or(core::mem::size_of::<Option<Withdrawals>>(), Withdrawals::total_size)
}
}
impl<T: InMemorySize, H: InMemorySize> InMemorySize for alloy_consensus::Block<T, H> {
#[inline]
fn size(&self) -> usize {
self.header.size() + self.body.size()
}
}
impl<T: InMemorySize> InMemorySize for Vec<T> {
fn size(&self) -> usize {
// Note: This does not track additional capacity
self.iter().map(T::size).sum::<usize>()
}
}
impl InMemorySize for u64 {
fn size(&self) -> usize {
core::mem::size_of::<Self>()
}
}
mod seismic {
use super::*;
impl InMemorySize for seismic_alloy_consensus::SeismicTypedTransaction {
fn size(&self) -> usize {
match self {
Self::Legacy(tx) => tx.size(),
Self::Eip2930(tx) => tx.size(),
Self::Eip1559(tx) => tx.size(),
Self::Eip4844(tx) => tx.size(),
Self::Eip7702(tx) => tx.size(),
Self::Seismic(tx) => tx.size(),
}
}
}
impl InMemorySize for seismic_alloy_consensus::SeismicTxEnvelope {
fn size(&self) -> usize {
match self {
Self::Legacy(tx) => tx.size(),
Self::Eip2930(tx) => tx.size(),
Self::Eip1559(tx) => tx.size(),
Self::Eip4844(tx) => tx.size(),
Self::Eip7702(tx) => tx.size(),
Self::Seismic(tx) => tx.size(),
}
}
}
}
/// Implementation for optimism types
#[cfg(feature = "op")]
mod op {
use super::*;
impl InMemorySize for op_alloy_consensus::OpDepositReceipt {
fn size(&self) -> usize {
let Self { inner, deposit_nonce, deposit_receipt_version } = self;
inner.size() +
core::mem::size_of_val(deposit_nonce) +
core::mem::size_of_val(deposit_receipt_version)
}
}
impl InMemorySize for op_alloy_consensus::OpTypedTransaction {
fn size(&self) -> usize {
match self {
Self::Legacy(tx) => tx.size(),
Self::Eip2930(tx) => tx.size(),
Self::Eip1559(tx) => tx.size(),
Self::Eip7702(tx) => tx.size(),
Self::Deposit(tx) => tx.size(),
}
}
}
impl InMemorySize for op_alloy_consensus::OpPooledTransaction {
fn size(&self) -> usize {
match self {
Self::Legacy(tx) => tx.size(),
Self::Eip2930(tx) => tx.size(),
Self::Eip1559(tx) => tx.size(),
Self::Eip7702(tx) => tx.size(),
}
}
}
impl InMemorySize for op_alloy_consensus::OpTxEnvelope {
fn size(&self) -> usize {
match self {
Self::Legacy(tx) => tx.size(),
Self::Eip2930(tx) => tx.size(),
Self::Eip1559(tx) => tx.size(),
Self::Eip7702(tx) => tx.size(),
Self::Deposit(tx) => tx.size(),
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
// ensures we don't have any recursion in the `InMemorySize` impls
#[test]
fn no_in_memory_no_recursion() {
fn assert_no_recursion<T: InMemorySize + Default>() {
let _ = T::default().size();
}
assert_no_recursion::<Header>();
assert_no_recursion::<TxLegacy>();
assert_no_recursion::<TxEip2930>();
assert_no_recursion::<TxEip1559>();
assert_no_recursion::<TxEip7702>();
assert_no_recursion::<TxEip4844>();
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/primitives-traits/src/transaction/signature.rs | crates/primitives-traits/src/transaction/signature.rs | //! Signature types and helpers
/// Re-exported signature type
pub use alloy_primitives::Signature;
#[cfg(test)]
mod tests {
use crate::crypto::secp256k1::recover_signer;
use alloy_primitives::{address, b256, Signature, U256};
use std::str::FromStr;
#[test]
fn test_recover_signer() {
let signature = Signature::new(
U256::from_str(
"18515461264373351373200002665853028612451056578545711640558177340181847433846",
)
.unwrap(),
U256::from_str(
"46948507304638947509940763649030358759909902576025900602547168820602576006531",
)
.unwrap(),
false,
);
let hash = b256!("0xdaf5a779ae972f972197303d7b574746c7ef83eadac0f2791ad23db92e4c8e53");
let signer = recover_signer(&signature, hash).unwrap();
let expected = address!("0x9d8a62f656a8d1615c1294fd71e9cfb3e4855a4f");
assert_eq!(expected, signer);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/primitives-traits/src/transaction/execute.rs | crates/primitives-traits/src/transaction/execute.rs | //! Abstraction of an executable transaction.
use alloy_primitives::Address;
/// Loads transaction into execution environment.
pub trait FillTxEnv<TxEnv> {
/// Fills `TxEnv` with an [`Address`] and transaction.
fn fill_tx_env(&self, tx_env: &mut TxEnv, sender: Address);
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/primitives-traits/src/transaction/error.rs | crates/primitives-traits/src/transaction/error.rs | //! Various error variants that can happen when working with transactions.
use crate::GotExpectedBoxed;
use alloy_primitives::U256;
/// Represents error variants that can happen when trying to validate a transaction.
#[derive(Debug, Clone, Eq, PartialEq, thiserror::Error)]
pub enum InvalidTransactionError {
/// The sender does not have enough funds to cover the transaction fees
#[error(
"sender does not have enough funds ({}) to cover transaction fees: {}", _0.got, _0.expected
)]
InsufficientFunds(GotExpectedBoxed<U256>),
/// The nonce is lower than the account's nonce, or there is a nonce gap present.
///
/// This is a consensus error.
#[error("transaction nonce is not consistent: next nonce {state}, tx nonce {tx}")]
NonceNotConsistent {
/// The nonce of the transaction.
tx: u64,
/// The current state of the nonce in the local chain.
state: u64,
},
/// The transaction is before Spurious Dragon and has a chain ID.
#[error("transactions before Spurious Dragon should not have a chain ID")]
OldLegacyChainId,
/// The chain ID in the transaction does not match the current network configuration.
#[error("transaction's chain ID does not match")]
ChainIdMismatch,
/// The transaction requires EIP-2930 which is not enabled currently.
#[error("EIP-2930 transactions are disabled")]
Eip2930Disabled,
/// The transaction requires EIP-1559 which is not enabled currently.
#[error("EIP-1559 transactions are disabled")]
Eip1559Disabled,
/// The transaction requires EIP-4844 which is not enabled currently.
#[error("EIP-4844 transactions are disabled")]
Eip4844Disabled,
/// The transaction requires EIP-7702 which is not enabled currently.
#[error("EIP-7702 transactions are disabled")]
Eip7702Disabled,
/// Thrown if a transaction is not supported in the current network configuration.
#[error("transaction type not supported")]
TxTypeNotSupported,
/// The calculated gas of the transaction exceeds `u64::MAX`.
#[error("gas overflow (maximum of u64)")]
GasUintOverflow,
/// The transaction is specified to use less gas than required to start the invocation.
#[error("intrinsic gas too low")]
GasTooLow,
/// The transaction gas exceeds the limit
#[error("intrinsic gas too high")]
GasTooHigh,
/// Thrown to ensure no one is able to specify a transaction with a tip higher than the total
/// fee cap.
#[error("max priority fee per gas higher than max fee per gas")]
TipAboveFeeCap,
/// Thrown post London if the transaction's fee is less than the base fee of the block.
#[error("max fee per gas less than block base fee")]
FeeCapTooLow,
/// Thrown if the sender of a transaction is a contract.
#[error("transaction signer has bytecode set")]
SignerAccountHasBytecode,
/// Thrown post Osaka if gas limit is too high.
#[error("gas limit too high")]
GasLimitTooHigh,
/// Failed to decrypt calldata of seismic tx
#[error("Failed to decrypt seismic tx")]
FailedToDecryptSeismicTx,
}
/// Represents error variants that can happen when trying to convert a transaction to pooled
/// transaction.
#[derive(Debug, Clone, Eq, PartialEq, derive_more::Display, derive_more::Error)]
pub enum TransactionConversionError {
/// This error variant is used when a transaction cannot be converted into a pooled transaction
/// because it is not supported for P2P network.
#[display("Transaction is not supported for p2p")]
UnsupportedForP2P,
}
/// Represents error variants than can happen when trying to convert a recovered transaction.
#[derive(Debug, Clone, Eq, PartialEq, thiserror::Error)]
pub enum TryFromRecoveredTransactionError {
/// Thrown if the transaction type is unsupported.
#[error("Unsupported transaction type: {_0}")]
UnsupportedTransactionType(u8),
/// This error variant is used when a blob sidecar is missing.
#[error("Blob sidecar missing for an EIP-4844 transaction")]
BlobSidecarMissing,
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/primitives-traits/src/transaction/access_list.rs | crates/primitives-traits/src/transaction/access_list.rs | //! [EIP-2930](https://eips.ethereum.org/EIPS/eip-2930): Access List types
#[cfg(test)]
mod tests {
use alloy_eips::eip2930::{AccessList, AccessListItem};
use alloy_primitives::{Address, B256};
use alloy_rlp::{RlpDecodable, RlpDecodableWrapper, RlpEncodable, RlpEncodableWrapper};
use proptest::proptest;
use proptest_arbitrary_interop::arb;
use reth_codecs::{add_arbitrary_tests, Compact};
use serde::{Deserialize, Serialize};
/// This type is kept for compatibility tests after the codec support was added to alloy-eips
/// `AccessList` type natively
#[derive(
Clone,
Debug,
PartialEq,
Eq,
Hash,
Default,
RlpDecodableWrapper,
RlpEncodableWrapper,
Serialize,
Deserialize,
Compact,
)]
#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))]
#[add_arbitrary_tests(compact, rlp)]
struct RethAccessList(Vec<RethAccessListItem>);
impl PartialEq<AccessList> for RethAccessList {
fn eq(&self, other: &AccessList) -> bool {
self.0.iter().zip(other.iter()).all(|(a, b)| a == b)
}
}
// This
#[derive(
Clone,
Debug,
PartialEq,
Eq,
Hash,
Default,
RlpDecodable,
RlpEncodable,
Serialize,
Deserialize,
Compact,
)]
#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))]
#[add_arbitrary_tests(compact, rlp)]
#[serde(rename_all = "camelCase")]
struct RethAccessListItem {
/// Account address that would be loaded at the start of execution
address: Address,
/// The storage keys to be loaded at the start of execution.
///
/// Each key is a 32-byte value representing a specific storage slot.
storage_keys: Vec<B256>,
}
impl PartialEq<AccessListItem> for RethAccessListItem {
fn eq(&self, other: &AccessListItem) -> bool {
self.address == other.address && self.storage_keys == other.storage_keys
}
}
proptest!(
#[test]
fn test_roundtrip_accesslist_compat(access_list in arb::<RethAccessList>()) {
// Convert access_list to buffer and then create alloy_access_list from buffer and
// compare
let mut compacted_reth_access_list = Vec::<u8>::new();
let len = access_list.to_compact(&mut compacted_reth_access_list);
// decode the compacted buffer to AccessList
let alloy_access_list = AccessList::from_compact(&compacted_reth_access_list, len).0;
assert_eq!(access_list, alloy_access_list);
let mut compacted_alloy_access_list = Vec::<u8>::new();
let alloy_len = alloy_access_list.to_compact(&mut compacted_alloy_access_list);
assert_eq!(len, alloy_len);
assert_eq!(compacted_reth_access_list, compacted_alloy_access_list);
}
);
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/primitives-traits/src/transaction/mod.rs | crates/primitives-traits/src/transaction/mod.rs | //! Transaction abstraction
//!
//! This module provides traits for working with blockchain transactions:
//! - [`Transaction`] - Basic transaction interface
//! - [`signed::SignedTransaction`] - Transaction with signature and recovery methods
//! - [`FullTransaction`] - Transaction with database encoding support
//!
//! # Transaction Recovery
//!
//! Transaction senders are not stored directly but recovered from signatures.
//! Use `recover_signer` for post-EIP-2 transactions or `recover_signer_unchecked`
//! for historical transactions.
pub mod execute;
pub mod signature;
pub mod signed;
pub mod error;
pub mod recover;
pub use alloy_consensus::transaction::{SignerRecoverable, TransactionInfo, TransactionMeta};
use crate::{InMemorySize, MaybeCompact, MaybeSerde};
use core::{fmt, hash::Hash};
#[cfg(test)]
mod access_list;
/// Helper trait that unifies all behaviour required by transaction to support full node operations.
pub trait FullTransaction: Transaction + MaybeCompact {}
impl<T> FullTransaction for T where T: Transaction + MaybeCompact {}
/// Abstraction of a transaction.
pub trait Transaction:
Send
+ Sync
+ Unpin
+ Clone
+ fmt::Debug
+ Eq
+ PartialEq
+ Hash
+ alloy_consensus::Transaction
+ InMemorySize
+ MaybeSerde
{
}
impl<T> Transaction for T where
T: Send
+ Sync
+ Unpin
+ Clone
+ fmt::Debug
+ Eq
+ PartialEq
+ Hash
+ alloy_consensus::Transaction
+ InMemorySize
+ MaybeSerde
{
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/primitives-traits/src/transaction/signed.rs | crates/primitives-traits/src/transaction/signed.rs | //! API of a signed transaction.
use crate::{InMemorySize, MaybeCompact, MaybeSerde, MaybeSerdeBincodeCompat};
use alloc::fmt;
use alloy_consensus::{
transaction::{Recovered, RlpEcdsaEncodableTx, SignerRecoverable},
EthereumTxEnvelope, SignableTransaction,
};
use alloy_eips::eip2718::{Decodable2718, Encodable2718};
use alloy_primitives::{keccak256, Address, Signature, TxHash, B256};
use alloy_rlp::{Decodable, Encodable};
use core::hash::Hash;
pub use alloy_consensus::crypto::RecoveryError;
/// Helper trait that unifies all behaviour required by block to support full node operations.
pub trait FullSignedTx: SignedTransaction + MaybeCompact + MaybeSerdeBincodeCompat {}
impl<T> FullSignedTx for T where T: SignedTransaction + MaybeCompact + MaybeSerdeBincodeCompat {}
/// A signed transaction.
///
/// # Recovery Methods
///
/// This trait provides two types of recovery methods:
/// - Standard methods (e.g., `try_recover`) - enforce EIP-2 low-s signature requirement
/// - Unchecked methods (e.g., `try_recover_unchecked`) - skip EIP-2 validation for pre-EIP-2
/// transactions
///
/// Use unchecked methods only when dealing with historical pre-EIP-2 transactions.
#[auto_impl::auto_impl(&, Arc)]
pub trait SignedTransaction:
Send
+ Sync
+ Unpin
+ Clone
+ fmt::Debug
+ PartialEq
+ Eq
+ Hash
+ Encodable
+ Decodable
+ Encodable2718
+ Decodable2718
+ alloy_consensus::Transaction
+ MaybeSerde
+ InMemorySize
+ SignerRecoverable
{
/// Returns reference to transaction hash.
fn tx_hash(&self) -> &TxHash;
/// Returns whether this transaction type can be __broadcasted__ as full transaction over the
/// network.
///
/// Some transactions are not broadcastable as objects and only allowed to be broadcasted as
/// hashes, e.g. because they missing context (e.g. blob sidecar).
fn is_broadcastable_in_full(&self) -> bool {
// EIP-4844 transactions are not broadcastable in full, only hashes are allowed.
!self.is_eip4844()
}
/// Recover signer from signature and hash.
///
/// Returns an error if the transaction's signature is invalid.
fn try_recover(&self) -> Result<Address, RecoveryError> {
self.recover_signer()
}
/// Recover signer from signature and hash _without ensuring that the signature has a low `s`
/// value_.
///
/// Returns an error if the transaction's signature is invalid.
fn try_recover_unchecked(&self) -> Result<Address, RecoveryError> {
self.recover_signer_unchecked()
}
/// Calculate transaction hash, eip2728 transaction does not contain rlp header and start with
/// tx type.
fn recalculate_hash(&self) -> B256 {
keccak256(self.encoded_2718())
}
/// Tries to recover signer and return [`Recovered`] by cloning the type.
#[auto_impl(keep_default_for(&, Arc))]
fn try_clone_into_recovered(&self) -> Result<Recovered<Self>, RecoveryError> {
self.recover_signer().map(|signer| Recovered::new_unchecked(self.clone(), signer))
}
/// Tries to recover signer and return [`Recovered`] by cloning the type.
#[auto_impl(keep_default_for(&, Arc))]
fn try_clone_into_recovered_unchecked(&self) -> Result<Recovered<Self>, RecoveryError> {
self.recover_signer_unchecked().map(|signer| Recovered::new_unchecked(self.clone(), signer))
}
/// Tries to recover signer and return [`Recovered`].
///
/// Returns `Err(Self)` if the transaction's signature is invalid, see also
/// [`SignerRecoverable::recover_signer`].
#[auto_impl(keep_default_for(&, Arc))]
fn try_into_recovered(self) -> Result<Recovered<Self>, Self> {
match self.recover_signer() {
Ok(signer) => Ok(Recovered::new_unchecked(self, signer)),
Err(_) => Err(self),
}
}
/// Consumes the type, recover signer and return [`Recovered`] _without
/// ensuring that the signature has a low `s` value_ (EIP-2).
///
/// Returns `RecoveryError` if the transaction's signature is invalid.
#[deprecated(note = "Use try_into_recovered_unchecked instead")]
#[auto_impl(keep_default_for(&, Arc))]
fn into_recovered_unchecked(self) -> Result<Recovered<Self>, RecoveryError> {
self.recover_signer_unchecked().map(|signer| Recovered::new_unchecked(self, signer))
}
/// Returns the [`Recovered`] transaction with the given sender.
///
/// Note: assumes the given signer is the signer of this transaction.
#[auto_impl(keep_default_for(&, Arc))]
fn with_signer(self, signer: Address) -> Recovered<Self> {
Recovered::new_unchecked(self, signer)
}
/// Returns the [`Recovered`] transaction with the given signer, using a reference to self.
///
/// Note: assumes the given signer is the signer of this transaction.
#[auto_impl(keep_default_for(&, Arc))]
fn with_signer_ref(&self, signer: Address) -> Recovered<&Self> {
Recovered::new_unchecked(self, signer)
}
}
impl<T> SignedTransaction for EthereumTxEnvelope<T>
where
T: RlpEcdsaEncodableTx + SignableTransaction<Signature> + Unpin,
Self: Clone + PartialEq + Eq + Decodable + Decodable2718 + MaybeSerde + InMemorySize,
{
fn tx_hash(&self) -> &TxHash {
match self {
Self::Legacy(tx) => tx.hash(),
Self::Eip2930(tx) => tx.hash(),
Self::Eip1559(tx) => tx.hash(),
Self::Eip7702(tx) => tx.hash(),
Self::Eip4844(tx) => tx.hash(),
}
}
}
impl SignedTransaction for seismic_alloy_consensus::SeismicTxEnvelope {
fn tx_hash(&self) -> &TxHash {
match self {
Self::Legacy(tx) => tx.hash(),
Self::Eip2930(tx) => tx.hash(),
Self::Eip1559(tx) => tx.hash(),
Self::Eip4844(tx) => tx.hash(),
Self::Eip7702(tx) => tx.hash(),
Self::Seismic(tx) => tx.hash(),
}
}
}
#[cfg(feature = "op")]
mod op {
use super::*;
use op_alloy_consensus::{OpPooledTransaction, OpTxEnvelope};
impl SignedTransaction for OpPooledTransaction {
fn tx_hash(&self) -> &TxHash {
match self {
Self::Legacy(tx) => tx.hash(),
Self::Eip2930(tx) => tx.hash(),
Self::Eip1559(tx) => tx.hash(),
Self::Eip7702(tx) => tx.hash(),
}
}
}
impl SignedTransaction for OpTxEnvelope {
fn tx_hash(&self) -> &TxHash {
match self {
Self::Legacy(tx) => tx.hash(),
Self::Eip2930(tx) => tx.hash(),
Self::Eip1559(tx) => tx.hash(),
Self::Eip7702(tx) => tx.hash(),
Self::Deposit(tx) => tx.hash_ref(),
}
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/primitives-traits/src/transaction/recover.rs | crates/primitives-traits/src/transaction/recover.rs | //! Helpers for recovering signers from a set of transactions
#[cfg(feature = "rayon")]
pub use rayon::*;
#[cfg(not(feature = "rayon"))]
pub use iter::*;
#[cfg(feature = "rayon")]
mod rayon {
use crate::{transaction::signed::RecoveryError, SignedTransaction};
use alloc::vec::Vec;
use alloy_primitives::Address;
use rayon::prelude::{IntoParallelIterator, ParallelIterator};
/// Recovers a list of signers from a transaction list iterator.
///
/// Returns `None`, if some transaction's signature is invalid
pub fn recover_signers<'a, I, T>(txes: I) -> Result<Vec<Address>, RecoveryError>
where
T: SignedTransaction,
I: IntoParallelIterator<Item = &'a T> + IntoIterator<Item = &'a T> + Send,
{
txes.into_par_iter().map(|tx| tx.recover_signer()).collect()
}
/// Recovers a list of signers from a transaction list iterator _without ensuring that the
/// signature has a low `s` value_.
///
/// Returns `None`, if some transaction's signature is invalid.
pub fn recover_signers_unchecked<'a, I, T>(txes: I) -> Result<Vec<Address>, RecoveryError>
where
T: SignedTransaction,
I: IntoParallelIterator<Item = &'a T> + IntoIterator<Item = &'a T> + Send,
{
txes.into_par_iter().map(|tx| tx.recover_signer_unchecked()).collect()
}
}
#[cfg(not(feature = "rayon"))]
mod iter {
use crate::{transaction::signed::RecoveryError, SignedTransaction};
use alloc::vec::Vec;
use alloy_primitives::Address;
/// Recovers a list of signers from a transaction list iterator.
///
/// Returns `Err(RecoveryError)`, if some transaction's signature is invalid
pub fn recover_signers<'a, I, T>(txes: I) -> Result<Vec<Address>, RecoveryError>
where
T: SignedTransaction,
I: IntoIterator<Item = &'a T>,
{
txes.into_iter().map(|tx| tx.recover_signer()).collect()
}
/// Recovers a list of signers from a transaction list iterator _without ensuring that the
/// signature has a low `s` value_.
///
/// Returns `Err(RecoveryError)`, if some transaction's signature is invalid.
pub fn recover_signers_unchecked<'a, I, T>(txes: I) -> Result<Vec<Address>, RecoveryError>
where
T: SignedTransaction,
I: IntoIterator<Item = &'a T>,
{
txes.into_iter().map(|tx| tx.recover_signer_unchecked()).collect()
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/primitives-traits/src/block/recovered.rs | crates/primitives-traits/src/block/recovered.rs | //! Recovered Block variant.
use crate::{
block::{error::SealedBlockRecoveryError, SealedBlock},
transaction::signed::{RecoveryError, SignedTransaction},
Block, BlockBody, InMemorySize, SealedHeader,
};
use alloc::vec::Vec;
use alloy_consensus::{
transaction::{Recovered, TransactionMeta},
BlockHeader,
};
use alloy_eips::{eip1898::BlockWithParent, BlockNumHash, Encodable2718};
use alloy_primitives::{
Address, BlockHash, BlockNumber, Bloom, Bytes, Sealed, TxHash, B256, B64, U256,
};
use derive_more::Deref;
/// A block with senders recovered from the block's transactions.
///
/// This type represents a [`SealedBlock`] where all transaction senders have been
/// recovered and verified. Recovery is an expensive operation that extracts the
/// sender address from each transaction's signature.
///
/// # Construction
///
/// - [`RecoveredBlock::new`] / [`RecoveredBlock::new_unhashed`] - Create with pre-recovered senders
/// (unchecked)
/// - [`RecoveredBlock::try_new`] / [`RecoveredBlock::try_new_unhashed`] - Create with validation
/// - [`RecoveredBlock::try_recover`] - Recover from a block
/// - [`RecoveredBlock::try_recover_sealed`] - Recover from a sealed block
///
/// # Performance
///
/// Sender recovery is computationally expensive. Cache recovered blocks when possible
/// to avoid repeated recovery operations.
///
/// ## Sealing
///
/// This type uses lazy sealing to avoid hashing the header until it is needed:
///
/// [`RecoveredBlock::new_unhashed`] creates a recovered block without hashing the header.
/// [`RecoveredBlock::new`] creates a recovered block with the corresponding block hash.
///
/// ## Recovery
///
/// Sender recovery is fallible and can fail if any of the transactions fail to recover the sender.
/// A [`SealedBlock`] can be upgraded to a [`RecoveredBlock`] using the
/// [`RecoveredBlock::try_recover`] or [`SealedBlock::try_recover`] method.
#[derive(Debug, Clone, Deref)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct RecoveredBlock<B: Block> {
/// Block
#[deref]
#[cfg_attr(
feature = "serde",
serde(bound = "SealedBlock<B>: serde::Serialize + serde::de::DeserializeOwned")
)]
block: SealedBlock<B>,
/// List of senders that match the transactions in the block
senders: Vec<Address>,
}
impl<B: Block> RecoveredBlock<B> {
/// Creates a new recovered block instance with the given senders as provided and the block
/// hash.
///
/// Note: This expects that the given senders match the transactions in the block.
pub fn new(block: B, senders: Vec<Address>, hash: BlockHash) -> Self {
Self { block: SealedBlock::new_unchecked(block, hash), senders }
}
/// Creates a new recovered block instance with the given senders as provided.
///
/// Note: This expects that the given senders match the transactions in the block.
pub fn new_unhashed(block: B, senders: Vec<Address>) -> Self {
Self { block: SealedBlock::new_unhashed(block), senders }
}
/// Returns the recovered senders.
pub fn senders(&self) -> &[Address] {
&self.senders
}
/// Returns an iterator over the recovered senders.
pub fn senders_iter(&self) -> impl Iterator<Item = &Address> {
self.senders.iter()
}
/// Consumes the type and returns the inner block.
pub fn into_block(self) -> B {
self.block.into_block()
}
/// Returns a reference to the sealed block.
pub const fn sealed_block(&self) -> &SealedBlock<B> {
&self.block
}
/// Creates a new recovered block instance with the given [`SealedBlock`] and senders as
/// provided
pub const fn new_sealed(block: SealedBlock<B>, senders: Vec<Address>) -> Self {
Self { block, senders }
}
/// A safer variant of [`Self::new_unhashed`] that checks if the number of senders is equal to
/// the number of transactions in the block and recovers the senders from the transactions, if
/// not using [`SignedTransaction::recover_signer`](crate::transaction::signed::SignedTransaction)
/// to recover the senders.
pub fn try_new(
block: B,
senders: Vec<Address>,
hash: BlockHash,
) -> Result<Self, SealedBlockRecoveryError<B>> {
let senders = if block.body().transaction_count() == senders.len() {
senders
} else {
let Ok(senders) = block.body().try_recover_signers() else {
return Err(SealedBlockRecoveryError::new(SealedBlock::new_unchecked(block, hash)));
};
senders
};
Ok(Self::new(block, senders, hash))
}
/// A safer variant of [`Self::new`] that checks if the number of senders is equal to
/// the number of transactions in the block and recovers the senders from the transactions, if
/// not using [`SignedTransaction::recover_signer_unchecked`](crate::transaction::signed::SignedTransaction)
/// to recover the senders.
pub fn try_new_unchecked(
block: B,
senders: Vec<Address>,
hash: BlockHash,
) -> Result<Self, SealedBlockRecoveryError<B>> {
let senders = if block.body().transaction_count() == senders.len() {
senders
} else {
let Ok(senders) = block.body().try_recover_signers_unchecked() else {
return Err(SealedBlockRecoveryError::new(SealedBlock::new_unchecked(block, hash)));
};
senders
};
Ok(Self::new(block, senders, hash))
}
/// A safer variant of [`Self::new_unhashed`] that checks if the number of senders is equal to
/// the number of transactions in the block and recovers the senders from the transactions, if
/// not using [`SignedTransaction::recover_signer`](crate::transaction::signed::SignedTransaction)
/// to recover the senders.
pub fn try_new_unhashed(block: B, senders: Vec<Address>) -> Result<Self, RecoveryError> {
let senders = if block.body().transaction_count() == senders.len() {
senders
} else {
block.body().try_recover_signers()?
};
Ok(Self::new_unhashed(block, senders))
}
/// A safer variant of [`Self::new_unhashed`] that checks if the number of senders is equal to
/// the number of transactions in the block and recovers the senders from the transactions, if
/// not using [`SignedTransaction::recover_signer_unchecked`](crate::transaction::signed::SignedTransaction)
/// to recover the senders.
pub fn try_new_unhashed_unchecked(
block: B,
senders: Vec<Address>,
) -> Result<Self, RecoveryError> {
let senders = if block.body().transaction_count() == senders.len() {
senders
} else {
block.body().try_recover_signers_unchecked()?
};
Ok(Self::new_unhashed(block, senders))
}
/// Recovers the senders from the transactions in the block using
/// [`SignedTransaction::recover_signer`](crate::transaction::signed::SignedTransaction).
///
/// Returns an error if any of the transactions fail to recover the sender.
pub fn try_recover(block: B) -> Result<Self, RecoveryError> {
let senders = block.body().try_recover_signers()?;
Ok(Self::new_unhashed(block, senders))
}
/// Recovers the senders from the transactions in the block using
/// [`SignedTransaction::recover_signer_unchecked`](crate::transaction::signed::SignedTransaction).
///
/// Returns an error if any of the transactions fail to recover the sender.
pub fn try_recover_unchecked(block: B) -> Result<Self, RecoveryError> {
let senders = block.body().try_recover_signers_unchecked()?;
Ok(Self::new_unhashed(block, senders))
}
/// Recovers the senders from the transactions in the block using
/// [`SignedTransaction::recover_signer`](crate::transaction::signed::SignedTransaction).
///
/// Returns an error if any of the transactions fail to recover the sender.
pub fn try_recover_sealed(block: SealedBlock<B>) -> Result<Self, SealedBlockRecoveryError<B>> {
let Ok(senders) = block.body().try_recover_signers() else {
return Err(SealedBlockRecoveryError::new(block));
};
let (block, hash) = block.split();
Ok(Self::new(block, senders, hash))
}
/// Recovers the senders from the transactions in the sealed block using
/// [`SignedTransaction::recover_signer_unchecked`](crate::transaction::signed::SignedTransaction).
///
/// Returns an error if any of the transactions fail to recover the sender.
pub fn try_recover_sealed_unchecked(
block: SealedBlock<B>,
) -> Result<Self, SealedBlockRecoveryError<B>> {
let Ok(senders) = block.body().try_recover_signers_unchecked() else {
return Err(SealedBlockRecoveryError::new(block));
};
let (block, hash) = block.split();
Ok(Self::new(block, senders, hash))
}
/// A safer variant of [`Self::new_unhashed`] that checks if the number of senders is equal to
/// the number of transactions in the block and recovers the senders from the transactions, if
/// not using [`SignedTransaction::recover_signer_unchecked`](crate::transaction::signed::SignedTransaction)
/// to recover the senders.
///
/// Returns an error if any of the transactions fail to recover the sender.
pub fn try_recover_sealed_with_senders(
block: SealedBlock<B>,
senders: Vec<Address>,
) -> Result<Self, SealedBlockRecoveryError<B>> {
let (block, hash) = block.split();
Self::try_new(block, senders, hash)
}
/// A safer variant of [`Self::new`] that checks if the number of senders is equal to
/// the number of transactions in the block and recovers the senders from the transactions, if
/// not using [`SignedTransaction::recover_signer_unchecked`](crate::transaction::signed::SignedTransaction)
/// to recover the senders.
pub fn try_recover_sealed_with_senders_unchecked(
block: SealedBlock<B>,
senders: Vec<Address>,
) -> Result<Self, SealedBlockRecoveryError<B>> {
let (block, hash) = block.split();
Self::try_new_unchecked(block, senders, hash)
}
/// Returns the block hash.
pub fn hash_ref(&self) -> &BlockHash {
self.block.hash_ref()
}
/// Returns a copy of the block hash.
pub fn hash(&self) -> BlockHash {
*self.hash_ref()
}
/// Return the number hash tuple.
pub fn num_hash(&self) -> BlockNumHash {
BlockNumHash::new(self.header().number(), self.hash())
}
/// Return a [`BlockWithParent`] for this header.
pub fn block_with_parent(&self) -> BlockWithParent {
BlockWithParent { parent: self.header().parent_hash(), block: self.num_hash() }
}
/// Clone the header.
pub fn clone_header(&self) -> B::Header {
self.header().clone()
}
/// Clones the internal header and returns a [`SealedHeader`] sealed with the hash.
pub fn clone_sealed_header(&self) -> SealedHeader<B::Header> {
SealedHeader::new(self.clone_header(), self.hash())
}
/// Clones the wrapped block and returns the [`SealedBlock`] sealed with the hash.
pub fn clone_sealed_block(&self) -> SealedBlock<B> {
self.block.clone()
}
/// Consumes the block and returns the block's header.
pub fn into_header(self) -> B::Header {
self.block.into_header()
}
/// Consumes the block and returns the block's body.
pub fn into_body(self) -> B::Body {
self.block.into_body()
}
/// Consumes the block and returns the [`SealedBlock`] and drops the recovered senders.
pub fn into_sealed_block(self) -> SealedBlock<B> {
self.block
}
/// Consumes the type and returns its components.
pub fn split_sealed(self) -> (SealedBlock<B>, Vec<Address>) {
(self.block, self.senders)
}
/// Consumes the type and returns its components.
#[doc(alias = "into_components")]
pub fn split(self) -> (B, Vec<Address>) {
(self.block.into_block(), self.senders)
}
/// Returns the `Recovered<&T>` transaction at the given index.
pub fn recovered_transaction(
&self,
idx: usize,
) -> Option<Recovered<&<B::Body as BlockBody>::Transaction>> {
let sender = self.senders.get(idx).copied()?;
self.block.body().transactions().get(idx).map(|tx| Recovered::new_unchecked(tx, sender))
}
/// Finds a transaction by hash and returns it with its index and block context.
pub fn find_indexed(&self, tx_hash: TxHash) -> Option<IndexedTx<'_, B>> {
self.body()
.transactions_iter()
.enumerate()
.find(|(_, tx)| tx.trie_hash() == tx_hash)
.map(|(index, tx)| IndexedTx { block: self, tx, index })
}
/// Returns an iterator over all transactions and their sender.
#[inline]
pub fn transactions_with_sender(
&self,
) -> impl Iterator<Item = (&Address, &<B::Body as BlockBody>::Transaction)> + '_ {
self.senders.iter().zip(self.block.body().transactions())
}
/// Returns an iterator over cloned `Recovered<Transaction>`
#[inline]
pub fn clone_transactions_recovered(
&self,
) -> impl Iterator<Item = Recovered<<B::Body as BlockBody>::Transaction>> + '_ {
self.transactions_with_sender()
.map(|(sender, tx)| Recovered::new_unchecked(tx.clone(), *sender))
}
/// Returns an iterator over `Recovered<&Transaction>`
#[inline]
pub fn transactions_recovered(
&self,
) -> impl Iterator<Item = Recovered<&'_ <B::Body as BlockBody>::Transaction>> + '_ {
self.transactions_with_sender().map(|(sender, tx)| Recovered::new_unchecked(tx, *sender))
}
/// Consumes the type and returns an iterator over all [`Recovered`] transactions in the block.
#[inline]
pub fn into_transactions_recovered(
self,
) -> impl Iterator<Item = Recovered<<B::Body as BlockBody>::Transaction>> {
self.block
.split()
.0
.into_body()
.into_transactions()
.into_iter()
.zip(self.senders)
.map(|(tx, sender)| tx.with_signer(sender))
}
/// Consumes the block and returns the transactions of the block.
#[inline]
pub fn into_transactions(self) -> Vec<<B::Body as BlockBody>::Transaction> {
self.block.split().0.into_body().into_transactions()
}
}
impl<B: Block> BlockHeader for RecoveredBlock<B> {
fn parent_hash(&self) -> B256 {
self.header().parent_hash()
}
fn ommers_hash(&self) -> B256 {
self.header().ommers_hash()
}
fn beneficiary(&self) -> Address {
self.header().beneficiary()
}
fn state_root(&self) -> B256 {
self.header().state_root()
}
fn transactions_root(&self) -> B256 {
self.header().transactions_root()
}
fn receipts_root(&self) -> B256 {
self.header().receipts_root()
}
fn withdrawals_root(&self) -> Option<B256> {
self.header().withdrawals_root()
}
fn logs_bloom(&self) -> Bloom {
self.header().logs_bloom()
}
fn difficulty(&self) -> U256 {
self.header().difficulty()
}
fn number(&self) -> BlockNumber {
self.header().number()
}
fn gas_limit(&self) -> u64 {
self.header().gas_limit()
}
fn gas_used(&self) -> u64 {
self.header().gas_used()
}
fn timestamp(&self) -> u64 {
self.header().timestamp()
}
fn mix_hash(&self) -> Option<B256> {
self.header().mix_hash()
}
fn nonce(&self) -> Option<B64> {
self.header().nonce()
}
fn base_fee_per_gas(&self) -> Option<u64> {
self.header().base_fee_per_gas()
}
fn blob_gas_used(&self) -> Option<u64> {
self.header().blob_gas_used()
}
fn excess_blob_gas(&self) -> Option<u64> {
self.header().excess_blob_gas()
}
fn parent_beacon_block_root(&self) -> Option<B256> {
self.header().parent_beacon_block_root()
}
fn requests_hash(&self) -> Option<B256> {
self.header().requests_hash()
}
fn extra_data(&self) -> &Bytes {
self.header().extra_data()
}
}
impl<B: Block> Eq for RecoveredBlock<B> {}
impl<B: Block> PartialEq for RecoveredBlock<B> {
fn eq(&self, other: &Self) -> bool {
self.hash_ref().eq(other.hash_ref()) &&
self.block.eq(&other.block) &&
self.senders.eq(&other.senders)
}
}
impl<B: Block + Default> Default for RecoveredBlock<B> {
#[inline]
fn default() -> Self {
Self::new_unhashed(B::default(), Default::default())
}
}
impl<B: Block> InMemorySize for RecoveredBlock<B> {
#[inline]
fn size(&self) -> usize {
self.block.size() + self.senders.len() * core::mem::size_of::<Address>()
}
}
impl<B: Block> From<RecoveredBlock<B>> for Sealed<B> {
fn from(value: RecoveredBlock<B>) -> Self {
value.block.into()
}
}
/// Converts a block with recovered transactions into a [`RecoveredBlock`].
///
/// This implementation takes an `alloy_consensus::Block` where transactions are of type
/// `Recovered<T>` (transactions with their recovered senders) and converts it into a
/// [`RecoveredBlock`] which stores transactions and senders separately for efficiency.
impl<T, H> From<alloy_consensus::Block<Recovered<T>, H>>
for RecoveredBlock<alloy_consensus::Block<T, H>>
where
T: SignedTransaction,
H: crate::block::header::BlockHeader,
{
fn from(block: alloy_consensus::Block<Recovered<T>, H>) -> Self {
let header = block.header;
// Split the recovered transactions into transactions and senders
let (transactions, senders): (Vec<T>, Vec<Address>) = block
.body
.transactions
.into_iter()
.map(|recovered| {
let (tx, sender) = recovered.into_parts();
(tx, sender)
})
.unzip();
// Reconstruct the block with regular transactions
let body = alloy_consensus::BlockBody {
transactions,
ommers: block.body.ommers,
withdrawals: block.body.withdrawals,
};
let block = alloy_consensus::Block::new(header, body);
Self::new_unhashed(block, senders)
}
}
#[cfg(any(test, feature = "arbitrary"))]
impl<'a, B> arbitrary::Arbitrary<'a> for RecoveredBlock<B>
where
B: Block + arbitrary::Arbitrary<'a>,
{
fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result<Self> {
let block = B::arbitrary(u)?;
Ok(Self::try_recover(block).unwrap())
}
}
#[cfg(any(test, feature = "test-utils"))]
impl<B: Block> RecoveredBlock<B> {
/// Returns a mutable reference to the recovered senders.
pub const fn senders_mut(&mut self) -> &mut Vec<Address> {
&mut self.senders
}
/// Appends the sender to the list of senders.
pub fn push_sender(&mut self, sender: Address) {
self.senders.push(sender);
}
}
#[cfg(any(test, feature = "test-utils"))]
impl<B> core::ops::DerefMut for RecoveredBlock<B>
where
B: Block,
{
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.block
}
}
#[cfg(any(test, feature = "test-utils"))]
impl<B: crate::test_utils::TestBlock> RecoveredBlock<B> {
/// Updates the block header.
pub fn set_header(&mut self, header: B::Header) {
*self.header_mut() = header
}
/// Updates the block hash.
pub fn set_hash(&mut self, hash: BlockHash) {
self.block.set_hash(hash)
}
/// Returns a mutable reference to the header.
pub const fn header_mut(&mut self) -> &mut B::Header {
self.block.header_mut()
}
/// Returns a mutable reference to the body.
pub const fn block_mut(&mut self) -> &mut B::Body {
self.block.body_mut()
}
/// Updates the parent block hash.
pub fn set_parent_hash(&mut self, hash: BlockHash) {
self.block.set_parent_hash(hash);
}
/// Updates the block number.
pub fn set_block_number(&mut self, number: alloy_primitives::BlockNumber) {
self.block.set_block_number(number);
}
/// Updates the block state root.
pub fn set_state_root(&mut self, state_root: alloy_primitives::B256) {
self.block.set_state_root(state_root);
}
/// Updates the block difficulty.
pub fn set_difficulty(&mut self, difficulty: alloy_primitives::U256) {
self.block.set_difficulty(difficulty);
}
}
/// Transaction with its index and block reference for efficient metadata access.
#[derive(Debug)]
pub struct IndexedTx<'a, B: Block> {
/// Recovered block containing the transaction
block: &'a RecoveredBlock<B>,
/// Transaction matching the hash
tx: &'a <B::Body as BlockBody>::Transaction,
/// Index of the transaction in the block
index: usize,
}
impl<'a, B: Block> IndexedTx<'a, B> {
/// Returns the transaction.
pub const fn tx(&self) -> &<B::Body as BlockBody>::Transaction {
self.tx
}
/// Returns the transaction hash.
pub fn tx_hash(&self) -> TxHash {
self.tx.trie_hash()
}
/// Returns the block hash.
pub fn block_hash(&self) -> B256 {
self.block.hash()
}
/// Returns the index of the transaction in the block.
pub const fn index(&self) -> usize {
self.index
}
/// Builds a [`TransactionMeta`] for the indexed transaction.
pub fn meta(&self) -> TransactionMeta {
TransactionMeta {
tx_hash: self.tx.trie_hash(),
index: self.index as u64,
block_hash: self.block.hash(),
block_number: self.block.number(),
base_fee: self.block.base_fee_per_gas(),
timestamp: self.block.timestamp(),
excess_blob_gas: self.block.excess_blob_gas(),
}
}
}
#[cfg(feature = "rpc-compat")]
mod rpc_compat {
use super::{
Block as BlockTrait, BlockBody as BlockBodyTrait, RecoveredBlock, SignedTransaction,
};
use crate::{block::error::BlockRecoveryError, SealedHeader};
use alloc::vec::Vec;
use alloy_consensus::{
transaction::Recovered, Block as CBlock, BlockBody, BlockHeader, Sealable,
};
use alloy_rpc_types_eth::{Block, BlockTransactions, BlockTransactionsKind, TransactionInfo};
impl<B> RecoveredBlock<B>
where
B: BlockTrait,
{
/// Converts the block into an RPC [`Block`] with the given [`BlockTransactionsKind`].
///
/// The `tx_resp_builder` closure transforms each transaction into the desired response
/// type.
///
/// `header_builder` transforms the block header into RPC representation. It takes the
/// consensus header and RLP length of the block which is a common dependency of RPC
/// headers.
pub fn into_rpc_block<T, RpcH, F, E>(
self,
kind: BlockTransactionsKind,
tx_resp_builder: F,
header_builder: impl FnOnce(SealedHeader<B::Header>, usize) -> Result<RpcH, E>,
) -> Result<Block<T, RpcH>, E>
where
F: Fn(
Recovered<<<B as BlockTrait>::Body as BlockBodyTrait>::Transaction>,
TransactionInfo,
) -> Result<T, E>,
{
match kind {
BlockTransactionsKind::Hashes => self.into_rpc_block_with_tx_hashes(header_builder),
BlockTransactionsKind::Full => {
self.into_rpc_block_full(tx_resp_builder, header_builder)
}
}
}
/// Converts the block to an RPC [`Block`] without consuming self.
///
/// For transaction hashes, only necessary parts are cloned for efficiency.
/// For full transactions, the entire block is cloned.
///
/// The `tx_resp_builder` closure transforms each transaction into the desired response
/// type.
///
/// `header_builder` transforms the block header into RPC representation. It takes the
/// consensus header and RLP length of the block which is a common dependency of RPC
/// headers.
pub fn clone_into_rpc_block<T, RpcH, F, E>(
&self,
kind: BlockTransactionsKind,
tx_resp_builder: F,
header_builder: impl FnOnce(SealedHeader<B::Header>, usize) -> Result<RpcH, E>,
) -> Result<Block<T, RpcH>, E>
where
F: Fn(
Recovered<<<B as BlockTrait>::Body as BlockBodyTrait>::Transaction>,
TransactionInfo,
) -> Result<T, E>,
{
match kind {
BlockTransactionsKind::Hashes => self.to_rpc_block_with_tx_hashes(header_builder),
BlockTransactionsKind::Full => {
self.clone().into_rpc_block_full(tx_resp_builder, header_builder)
}
}
}
/// Creates an RPC [`Block`] with transaction hashes from a reference.
///
/// Returns [`BlockTransactions::Hashes`] containing only transaction hashes.
/// Efficiently clones only necessary parts, not the entire block.
pub fn to_rpc_block_with_tx_hashes<T, RpcH, E>(
&self,
header_builder: impl FnOnce(SealedHeader<B::Header>, usize) -> Result<RpcH, E>,
) -> Result<Block<T, RpcH>, E> {
let transactions = self.body().transaction_hashes_iter().copied().collect();
let rlp_length = self.rlp_length();
let header = self.clone_sealed_header();
let withdrawals = self.body().withdrawals().cloned();
let transactions = BlockTransactions::Hashes(transactions);
let uncles =
self.body().ommers().unwrap_or(&[]).iter().map(|h| h.hash_slow()).collect();
let header = header_builder(header, rlp_length)?;
Ok(Block { header, uncles, transactions, withdrawals })
}
/// Converts the block into an RPC [`Block`] with transaction hashes.
///
/// Consumes self and returns [`BlockTransactions::Hashes`] containing only transaction
/// hashes.
pub fn into_rpc_block_with_tx_hashes<T, E, RpcHeader>(
self,
f: impl FnOnce(SealedHeader<B::Header>, usize) -> Result<RpcHeader, E>,
) -> Result<Block<T, RpcHeader>, E> {
let transactions = self.body().transaction_hashes_iter().copied().collect();
let rlp_length = self.rlp_length();
let (header, body) = self.into_sealed_block().split_sealed_header_body();
let BlockBody { ommers, withdrawals, .. } = body.into_ethereum_body();
let transactions = BlockTransactions::Hashes(transactions);
let uncles = ommers.into_iter().map(|h| h.hash_slow()).collect();
let header = f(header, rlp_length)?;
Ok(Block { header, uncles, transactions, withdrawals })
}
/// Converts the block into an RPC [`Block`] with full transaction objects.
///
/// Returns [`BlockTransactions::Full`] with complete transaction data.
/// The `tx_resp_builder` closure transforms each transaction with its metadata.
pub fn into_rpc_block_full<T, RpcHeader, F, E>(
self,
tx_resp_builder: F,
header_builder: impl FnOnce(SealedHeader<B::Header>, usize) -> Result<RpcHeader, E>,
) -> Result<Block<T, RpcHeader>, E>
where
F: Fn(
Recovered<<<B as BlockTrait>::Body as BlockBodyTrait>::Transaction>,
TransactionInfo,
) -> Result<T, E>,
{
let block_number = self.header().number();
let base_fee = self.header().base_fee_per_gas();
let block_length = self.rlp_length();
let block_hash = Some(self.hash());
let (block, senders) = self.split_sealed();
let (header, body) = block.split_sealed_header_body();
let BlockBody { transactions, ommers, withdrawals } = body.into_ethereum_body();
let transactions = transactions
.into_iter()
.zip(senders)
.enumerate()
.map(|(idx, (tx, sender))| {
let tx_info = TransactionInfo {
hash: Some(*tx.tx_hash()),
block_hash,
block_number: Some(block_number),
base_fee,
index: Some(idx as u64),
};
tx_resp_builder(Recovered::new_unchecked(tx, sender), tx_info)
})
.collect::<Result<Vec<_>, E>>()?;
let transactions = BlockTransactions::Full(transactions);
let uncles = ommers.into_iter().map(|h| h.hash_slow()).collect();
let header = header_builder(header, block_length)?;
let block = Block { header, uncles, transactions, withdrawals };
Ok(block)
}
}
impl<T> RecoveredBlock<CBlock<T>>
where
T: SignedTransaction,
{
/// Creates a `RecoveredBlock` from an RPC block.
///
/// Converts the RPC block to consensus format and recovers transaction senders.
/// Works with any transaction type `U` that can be converted to `T`.
///
/// # Examples
/// ```ignore
/// let rpc_block: alloy_rpc_types_eth::Block = get_rpc_block();
/// let recovered = RecoveredBlock::from_rpc_block(rpc_block)?;
/// ```
pub fn from_rpc_block<U>(
block: alloy_rpc_types_eth::Block<U>,
) -> Result<Self, BlockRecoveryError<alloy_consensus::Block<T>>>
where
T: From<U>,
{
// Convert to consensus block and then convert transactions
let consensus_block = block.into_consensus().convert_transactions();
// Try to recover the block
consensus_block.try_into_recovered()
}
}
impl<T, U> TryFrom<alloy_rpc_types_eth::Block<U>> for RecoveredBlock<CBlock<T>>
where
T: SignedTransaction + From<U>,
{
type Error = BlockRecoveryError<alloy_consensus::Block<T>>;
fn try_from(block: alloy_rpc_types_eth::Block<U>) -> Result<Self, Self::Error> {
Self::from_rpc_block(block)
}
}
}
/// Bincode-compatible [`RecoveredBlock`] serde implementation.
#[cfg(feature = "serde-bincode-compat")]
pub(super) mod serde_bincode_compat {
use crate::{
serde_bincode_compat::{self, SerdeBincodeCompat},
Block,
};
use alloc::{borrow::Cow, vec::Vec};
use alloy_primitives::Address;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use serde_with::{DeserializeAs, SerializeAs};
/// Bincode-compatible [`super::RecoveredBlock`] serde implementation.
///
/// Intended to use with the [`serde_with::serde_as`] macro in the following way:
/// ```rust
/// use reth_primitives_traits::{
/// block::RecoveredBlock,
/// serde_bincode_compat::{self, SerdeBincodeCompat},
/// Block,
/// };
/// use serde::{Deserialize, Serialize};
/// use serde_with::serde_as;
///
/// #[serde_as]
/// #[derive(Serialize, Deserialize)]
/// struct Data<T: Block<Header: SerdeBincodeCompat, Body: SerdeBincodeCompat> + 'static> {
/// #[serde_as(as = "serde_bincode_compat::RecoveredBlock<'_, T>")]
/// block: RecoveredBlock<T>,
/// }
/// ```
#[derive(derive_more::Debug, Serialize, Deserialize)]
pub struct RecoveredBlock<
'a,
T: Block<Header: SerdeBincodeCompat, Body: SerdeBincodeCompat> + 'static,
> {
#[serde(
bound = "serde_bincode_compat::SealedBlock<'a, T>: Serialize + serde::de::DeserializeOwned"
)]
block: serde_bincode_compat::SealedBlock<'a, T>,
#[expect(clippy::owned_cow)]
senders: Cow<'a, Vec<Address>>,
}
impl<'a, T: Block<Header: SerdeBincodeCompat, Body: SerdeBincodeCompat> + 'static>
From<&'a super::RecoveredBlock<T>> for RecoveredBlock<'a, T>
{
fn from(value: &'a super::RecoveredBlock<T>) -> Self {
Self { block: (&value.block).into(), senders: Cow::Borrowed(&value.senders) }
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | true |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/primitives-traits/src/block/header.rs | crates/primitives-traits/src/block/header.rs | //! Block header data primitive.
use crate::{InMemorySize, MaybeCompact, MaybeSerde, MaybeSerdeBincodeCompat};
use alloy_primitives::Sealable;
use core::{fmt, hash::Hash};
/// Re-exported alias
pub use alloy_consensus::BlockHeader as AlloyBlockHeader;
/// Helper trait that unifies all behaviour required by block header to support full node
/// operations.
pub trait FullBlockHeader: BlockHeader + MaybeCompact {}
impl<T> FullBlockHeader for T where T: BlockHeader + MaybeCompact {}
/// Abstraction of a block header.
pub trait BlockHeader:
Send
+ Sync
+ Unpin
+ Clone
+ Hash
+ Default
+ fmt::Debug
+ PartialEq
+ Eq
+ alloy_rlp::Encodable
+ alloy_rlp::Decodable
+ alloy_consensus::BlockHeader
+ Sealable
+ InMemorySize
+ MaybeSerde
+ MaybeSerdeBincodeCompat
+ AsRef<Self>
+ 'static
{
/// Returns the timestamp in seconds, assuming the timestamp is in milliseconds.
fn timestamp_seconds(&self) -> u64 {
if cfg!(feature = "timestamp-in-seconds") {
self.timestamp()
} else {
self.timestamp() / 1000
}
}
}
impl BlockHeader for alloy_consensus::Header {}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/primitives-traits/src/block/error.rs | crates/primitives-traits/src/block/error.rs | //! Error types for the `block` module.
use crate::transaction::signed::RecoveryError;
/// Type alias for [`BlockRecoveryError`] with a [`SealedBlock`](crate::SealedBlock) value.
pub type SealedBlockRecoveryError<B> = BlockRecoveryError<crate::SealedBlock<B>>;
/// Error when recovering a block from [`SealedBlock`](crate::SealedBlock) to
/// [`RecoveredBlock`](crate::RecoveredBlock).
///
/// This error is returned when the block recovery fails and contains the erroneous block, because
/// recovering a block takes ownership of the block.
#[derive(Debug, Clone, thiserror::Error)]
#[error("Failed to recover the block")]
pub struct BlockRecoveryError<T>(pub T);
impl<T> BlockRecoveryError<T> {
/// Create a new error.
pub const fn new(inner: T) -> Self {
Self(inner)
}
/// Unwrap the error and return the original value.
pub fn into_inner(self) -> T {
self.0
}
}
impl<T> From<BlockRecoveryError<T>> for RecoveryError
where
T: core::fmt::Debug + Send + Sync + 'static,
{
fn from(err: BlockRecoveryError<T>) -> Self {
Self::from_source(err)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/primitives-traits/src/block/mod.rs | crates/primitives-traits/src/block/mod.rs | //! Block abstraction.
//!
//! This module provides the core block types and transformations:
//!
//! ```rust
//! # use reth_primitives_traits::{Block, SealedBlock, RecoveredBlock};
//! # fn example<B: Block + 'static>(block: B) -> Result<(), Box<dyn std::error::Error>>
//! # where B::Body: reth_primitives_traits::BlockBody<Transaction: reth_primitives_traits::SignedTransaction> {
//! // Basic block flow
//! let block: B = block;
//!
//! // Seal (compute hash)
//! let sealed: SealedBlock<B> = block.seal();
//!
//! // Recover senders
//! let recovered: RecoveredBlock<B> = sealed.try_recover()?;
//!
//! // Access components
//! let senders = recovered.senders();
//! let hash = recovered.hash();
//! # Ok(())
//! # }
//! ```
pub(crate) mod sealed;
pub use sealed::SealedBlock;
pub(crate) mod recovered;
pub use recovered::RecoveredBlock;
pub mod body;
pub mod error;
pub mod header;
use alloc::{fmt, vec::Vec};
use alloy_primitives::{Address, B256};
use alloy_rlp::{Decodable, Encodable};
use crate::{
block::error::BlockRecoveryError, transaction::signed::RecoveryError, BlockBody, BlockHeader,
FullBlockBody, FullBlockHeader, InMemorySize, MaybeSerde, SealedHeader, SignedTransaction,
};
/// Bincode-compatible header type serde implementations.
#[cfg(feature = "serde-bincode-compat")]
pub mod serde_bincode_compat {
pub use super::{
recovered::serde_bincode_compat::RecoveredBlock, sealed::serde_bincode_compat::SealedBlock,
};
}
/// Helper trait that unifies all behaviour required by block to support full node operations.
pub trait FullBlock:
Block<Header: FullBlockHeader, Body: FullBlockBody> + alloy_rlp::Encodable + alloy_rlp::Decodable
{
}
impl<T> FullBlock for T where
T: Block<Header: FullBlockHeader, Body: FullBlockBody>
+ alloy_rlp::Encodable
+ alloy_rlp::Decodable
{
}
/// Helper trait to access [`BlockBody::Transaction`] given a [`Block`].
pub type BlockTx<B> = <<B as Block>::Body as BlockBody>::Transaction;
/// Abstraction of block data type.
///
/// This type defines the structure of a block in the blockchain.
/// A [`Block`] is composed of a header and a body.
/// It is expected that a block can always be completely reconstructed from its header and body
pub trait Block:
Send
+ Sync
+ Unpin
+ Clone
+ Default
+ fmt::Debug
+ PartialEq
+ Eq
+ InMemorySize
+ MaybeSerde
+ Encodable
+ Decodable
{
/// Header part of the block.
type Header: BlockHeader;
/// The block's body contains the transactions in the block and additional data, e.g.
/// withdrawals in ethereum.
type Body: BlockBody<OmmerHeader = Self::Header>;
/// Create new block instance.
fn new(header: Self::Header, body: Self::Body) -> Self;
/// Create new a sealed block instance from a sealed header and the block body.
fn new_sealed(header: SealedHeader<Self::Header>, body: Self::Body) -> SealedBlock<Self> {
SealedBlock::from_sealed_parts(header, body)
}
/// Seal the block with a known hash.
///
/// WARNING: This method does not perform validation whether the hash is correct.
fn seal_unchecked(self, hash: B256) -> SealedBlock<Self> {
SealedBlock::new_unchecked(self, hash)
}
/// Creates the [`SealedBlock`] from the block's parts without calculating the hash upfront.
fn seal(self) -> SealedBlock<Self> {
SealedBlock::new_unhashed(self)
}
/// Calculate the header hash and seal the block so that it can't be changed.
fn seal_slow(self) -> SealedBlock<Self> {
SealedBlock::seal_slow(self)
}
/// Returns reference to block header.
fn header(&self) -> &Self::Header;
/// Returns reference to block body.
fn body(&self) -> &Self::Body;
/// Splits the block into its header and body.
fn split(self) -> (Self::Header, Self::Body);
/// Returns a tuple of references to the block's header and body.
fn split_ref(&self) -> (&Self::Header, &Self::Body) {
(self.header(), self.body())
}
/// Consumes the block and returns the header.
fn into_header(self) -> Self::Header {
self.split().0
}
/// Consumes the block and returns the body.
fn into_body(self) -> Self::Body {
self.split().1
}
/// Returns the rlp length of the block with the given header and body.
fn rlp_length(header: &Self::Header, body: &Self::Body) -> usize;
/// Expensive operation that recovers transaction signer.
fn recover_signers(&self) -> Result<Vec<Address>, RecoveryError>
where
<Self::Body as BlockBody>::Transaction: SignedTransaction,
{
self.body().recover_signers()
}
/// Transform the block into a [`RecoveredBlock`] using the given senders.
///
/// If the number of senders does not match the number of transactions in the block, this falls
/// back to manually recovery, but _without ensuring that the signature has a low `s` value_.
///
/// Returns the block as error if a signature is invalid.
fn try_into_recovered_unchecked(
self,
senders: Vec<Address>,
) -> Result<RecoveredBlock<Self>, BlockRecoveryError<Self>>
where
<Self::Body as BlockBody>::Transaction: SignedTransaction,
{
let senders = if self.body().transactions().len() == senders.len() {
senders
} else {
// Fall back to recovery if lengths don't match
let Ok(senders) = self.body().recover_signers_unchecked() else {
return Err(BlockRecoveryError::new(self))
};
senders
};
Ok(RecoveredBlock::new_unhashed(self, senders))
}
/// Transform the block into a [`RecoveredBlock`] using the given signers.
///
/// Note: This method assumes the signers are correct and does not validate them.
fn into_recovered_with_signers(self, signers: Vec<Address>) -> RecoveredBlock<Self>
where
<Self::Body as BlockBody>::Transaction: SignedTransaction,
{
RecoveredBlock::new_unhashed(self, signers)
}
/// **Expensive**. Transform into a [`RecoveredBlock`] by recovering senders in the contained
/// transactions.
///
/// Returns the block as error if a signature is invalid.
fn try_into_recovered(self) -> Result<RecoveredBlock<Self>, BlockRecoveryError<Self>>
where
<Self::Body as BlockBody>::Transaction: SignedTransaction,
{
let Ok(signers) = self.body().recover_signers() else {
return Err(BlockRecoveryError::new(self))
};
Ok(RecoveredBlock::new_unhashed(self, signers))
}
/// A Convenience function to convert this type into the regular ethereum block that
/// consists of:
///
/// - Header
///
/// And the ethereum block body [`alloy_consensus::BlockBody`], see also
/// [`BlockBody::into_ethereum_body`].
/// - Transactions
/// - Withdrawals
/// - Ommers
///
/// Note: This conversion can be incomplete. It is not expected that this `Block` is the same as
/// [`alloy_consensus::Block`] only that it can be converted into it which is useful for
/// the `eth_` RPC namespace (e.g. RPC block).
fn into_ethereum_block(
self,
) -> alloy_consensus::Block<<Self::Body as BlockBody>::Transaction, Self::Header> {
let (header, body) = self.split();
alloy_consensus::Block::new(header, body.into_ethereum_body())
}
}
impl<T, H> Block for alloy_consensus::Block<T, H>
where
T: SignedTransaction,
H: BlockHeader,
{
type Header = H;
type Body = alloy_consensus::BlockBody<T, H>;
fn new(header: Self::Header, body: Self::Body) -> Self {
Self { header, body }
}
fn header(&self) -> &Self::Header {
&self.header
}
fn body(&self) -> &Self::Body {
&self.body
}
fn split(self) -> (Self::Header, Self::Body) {
(self.header, self.body)
}
fn rlp_length(header: &Self::Header, body: &Self::Body) -> usize {
Self::rlp_length_for(header, body)
}
fn into_ethereum_block(self) -> Self {
self
}
}
/// An extension trait for [`Block`]s that allows for mutable access to the block's internals.
///
/// This allows for modifying the block's header and body for testing purposes.
#[cfg(any(test, feature = "test-utils"))]
pub trait TestBlock: Block<Header: crate::test_utils::TestHeader> {
/// Returns mutable reference to block body.
fn body_mut(&mut self) -> &mut Self::Body;
/// Returns mutable reference to block header.
fn header_mut(&mut self) -> &mut Self::Header;
/// Updates the block header.
fn set_header(&mut self, header: Self::Header);
/// Updates the parent block hash.
fn set_parent_hash(&mut self, hash: alloy_primitives::BlockHash) {
crate::header::test_utils::TestHeader::set_parent_hash(self.header_mut(), hash);
}
/// Updates the block number.
fn set_block_number(&mut self, number: alloy_primitives::BlockNumber) {
crate::header::test_utils::TestHeader::set_block_number(self.header_mut(), number);
}
/// Updates the block state root.
fn set_state_root(&mut self, state_root: alloy_primitives::B256) {
crate::header::test_utils::TestHeader::set_state_root(self.header_mut(), state_root);
}
/// Updates the block difficulty.
fn set_difficulty(&mut self, difficulty: alloy_primitives::U256) {
crate::header::test_utils::TestHeader::set_difficulty(self.header_mut(), difficulty);
}
}
#[cfg(any(test, feature = "test-utils"))]
impl<T, H> TestBlock for alloy_consensus::Block<T, H>
where
T: SignedTransaction,
H: crate::test_utils::TestHeader,
{
fn body_mut(&mut self) -> &mut Self::Body {
&mut self.body
}
fn header_mut(&mut self) -> &mut Self::Header {
&mut self.header
}
fn set_header(&mut self, header: Self::Header) {
self.header = header
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/primitives-traits/src/block/body.rs | crates/primitives-traits/src/block/body.rs | //! Block body abstraction.
use crate::{
transaction::signed::RecoveryError, BlockHeader, FullSignedTx, InMemorySize, MaybeSerde,
MaybeSerdeBincodeCompat, SignedTransaction,
};
use alloc::{fmt, vec::Vec};
use alloy_consensus::{Transaction, Typed2718};
use alloy_eips::{eip2718::Encodable2718, eip4895::Withdrawals};
use alloy_primitives::{Address, Bytes, B256};
/// Helper trait that unifies all behaviour required by transaction to support full node operations.
pub trait FullBlockBody: BlockBody<Transaction: FullSignedTx> + MaybeSerdeBincodeCompat {}
impl<T> FullBlockBody for T where T: BlockBody<Transaction: FullSignedTx> + MaybeSerdeBincodeCompat {}
/// Abstraction for block's body.
///
/// This type is a container for everything that is included in a block except the header.
/// For ethereum this includes transactions, ommers, and withdrawals.
pub trait BlockBody:
Send
+ Sync
+ Unpin
+ Clone
+ Default
+ fmt::Debug
+ PartialEq
+ Eq
+ alloy_rlp::Encodable
+ alloy_rlp::Decodable
+ InMemorySize
+ MaybeSerde
+ 'static
{
/// Ordered list of signed transactions as committed in the block.
type Transaction: SignedTransaction;
/// Ommer header type.
type OmmerHeader: BlockHeader;
/// Returns reference to transactions in the block.
fn transactions(&self) -> &[Self::Transaction];
/// A Convenience function to convert this type into the regular ethereum block body that
/// consists of:
///
/// - Transactions
/// - Withdrawals
/// - Ommers
///
/// Note: This conversion can be incomplete. It is not expected that this `Body` is the same as
/// [`alloy_consensus::BlockBody`] only that it can be converted into it which is useful for
/// the `eth_` RPC namespace (e.g. RPC block).
fn into_ethereum_body(self)
-> alloy_consensus::BlockBody<Self::Transaction, Self::OmmerHeader>;
/// Returns an iterator over the transactions in the block.
fn transactions_iter(&self) -> impl Iterator<Item = &Self::Transaction> + '_ {
self.transactions().iter()
}
/// Returns the transaction with the matching hash.
///
/// This is a convenience function for `transactions_iter().find()`
fn transaction_by_hash(&self, hash: &B256) -> Option<&Self::Transaction> {
self.transactions_iter().find(|tx| tx.tx_hash() == hash)
}
/// Returns true if the block body contains a transaction with the given hash.
///
/// This is a convenience function for `transaction_by_hash().is_some()`
fn contains_transaction(&self, hash: &B256) -> bool {
self.transaction_by_hash(hash).is_some()
}
/// Clones the transactions in the block.
///
/// This is a convenience function for `transactions().to_vec()`
fn clone_transactions(&self) -> Vec<Self::Transaction> {
self.transactions().to_vec()
}
/// Returns an iterator over all transaction hashes in the block body.
fn transaction_hashes_iter(&self) -> impl Iterator<Item = &B256> + '_ {
self.transactions_iter().map(|tx| tx.tx_hash())
}
/// Returns the number of the transactions in the block.
fn transaction_count(&self) -> usize {
self.transactions().len()
}
/// Consume the block body and return a [`Vec`] of transactions.
fn into_transactions(self) -> Vec<Self::Transaction>;
/// Returns `true` if the block body contains a transaction of the given type.
fn contains_transaction_type(&self, tx_type: u8) -> bool {
self.transactions_iter().any(|tx| tx.is_type(tx_type))
}
/// Calculate the transaction root for the block body.
fn calculate_tx_root(&self) -> B256 {
alloy_consensus::proofs::calculate_transaction_root(self.transactions())
}
/// Returns block withdrawals if any.
fn withdrawals(&self) -> Option<&Withdrawals>;
/// Calculate the withdrawals root for the block body.
///
/// Returns `RecoveryError` if there are no withdrawals in the block.
fn calculate_withdrawals_root(&self) -> Option<B256> {
self.withdrawals().map(|withdrawals| {
alloy_consensus::proofs::calculate_withdrawals_root(withdrawals.as_slice())
})
}
/// Returns block ommers if any.
fn ommers(&self) -> Option<&[Self::OmmerHeader]>;
/// Calculate the ommers root for the block body.
///
/// Returns `RecoveryError` if there are no ommers in the block.
fn calculate_ommers_root(&self) -> Option<B256> {
self.ommers().map(alloy_consensus::proofs::calculate_ommers_root)
}
/// Calculates the total blob gas used by _all_ EIP-4844 transactions in the block.
fn blob_gas_used(&self) -> u64 {
self.transactions_iter().filter_map(|tx| tx.blob_gas_used()).sum()
}
/// Returns an iterator over all blob versioned hashes in the block body.
fn blob_versioned_hashes_iter(&self) -> impl Iterator<Item = &B256> + '_ {
self.transactions_iter().filter_map(|tx| tx.blob_versioned_hashes()).flatten()
}
/// Returns an iterator over the encoded 2718 transactions.
///
/// This is also known as `raw transactions`.
///
/// See also [`Encodable2718`].
#[doc(alias = "raw_transactions_iter")]
fn encoded_2718_transactions_iter(&self) -> impl Iterator<Item = Vec<u8>> + '_ {
self.transactions_iter().map(|tx| tx.encoded_2718())
}
/// Returns a vector of encoded 2718 transactions.
///
/// This is also known as `raw transactions`.
///
/// See also [`Encodable2718`].
#[doc(alias = "raw_transactions")]
fn encoded_2718_transactions(&self) -> Vec<Bytes> {
self.encoded_2718_transactions_iter().map(Into::into).collect()
}
/// Recover signer addresses for all transactions in the block body.
fn recover_signers(&self) -> Result<Vec<Address>, RecoveryError>
where
Self::Transaction: SignedTransaction,
{
crate::transaction::recover::recover_signers(self.transactions())
}
/// Recover signer addresses for all transactions in the block body.
///
/// Returns an error if some transaction's signature is invalid.
fn try_recover_signers(&self) -> Result<Vec<Address>, RecoveryError>
where
Self::Transaction: SignedTransaction,
{
self.recover_signers()
}
/// Recover signer addresses for all transactions in the block body _without ensuring that the
/// signature has a low `s` value_.
///
/// Returns `RecoveryError`, if some transaction's signature is invalid.
fn recover_signers_unchecked(&self) -> Result<Vec<Address>, RecoveryError>
where
Self::Transaction: SignedTransaction,
{
crate::transaction::recover::recover_signers_unchecked(self.transactions())
}
/// Recover signer addresses for all transactions in the block body _without ensuring that the
/// signature has a low `s` value_.
///
/// Returns an error if some transaction's signature is invalid.
fn try_recover_signers_unchecked(&self) -> Result<Vec<Address>, RecoveryError>
where
Self::Transaction: SignedTransaction,
{
self.recover_signers_unchecked()
}
}
impl<T, H> BlockBody for alloy_consensus::BlockBody<T, H>
where
T: SignedTransaction,
H: BlockHeader,
{
type Transaction = T;
type OmmerHeader = H;
fn transactions(&self) -> &[Self::Transaction] {
&self.transactions
}
fn into_ethereum_body(self) -> Self {
self
}
fn into_transactions(self) -> Vec<Self::Transaction> {
self.transactions
}
fn withdrawals(&self) -> Option<&Withdrawals> {
self.withdrawals.as_ref()
}
fn ommers(&self) -> Option<&[Self::OmmerHeader]> {
Some(&self.ommers)
}
}
/// This is a helper alias to make it easy to refer to the inner `Transaction` associated type of a
/// given type that implements [`BlockBody`].
pub type BodyTx<N> = <N as BlockBody>::Transaction;
/// This is a helper alias to make it easy to refer to the inner `OmmerHeader` associated type of a
/// given type that implements [`BlockBody`].
pub type BodyOmmer<N> = <N as BlockBody>::OmmerHeader;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/primitives-traits/src/block/sealed.rs | crates/primitives-traits/src/block/sealed.rs | //! Sealed block types
use crate::{
block::{error::BlockRecoveryError, RecoveredBlock},
transaction::signed::RecoveryError,
Block, BlockBody, GotExpected, InMemorySize, SealedHeader,
};
use alloc::vec::Vec;
use alloy_consensus::BlockHeader;
use alloy_eips::{eip1898::BlockWithParent, BlockNumHash};
use alloy_primitives::{Address, BlockHash, Sealable, Sealed, B256};
use alloy_rlp::{Decodable, Encodable};
use bytes::BufMut;
use core::ops::Deref;
/// Sealed full block composed of the block's header and body.
///
/// This type uses lazy sealing to avoid hashing the header until it is needed, see also
/// [`SealedHeader`].
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct SealedBlock<B: Block> {
/// Sealed Header.
header: SealedHeader<B::Header>,
/// the block's body.
body: B::Body,
}
impl<B: Block> SealedBlock<B> {
/// Hashes the header and creates a sealed block.
///
/// This calculates the header hash. To create a [`SealedBlock`] without calculating the hash
/// upfront see [`SealedBlock::new_unhashed`]
pub fn seal_slow(block: B) -> Self {
let hash = block.header().hash_slow();
Self::new_unchecked(block, hash)
}
/// Create a new sealed block instance using the block.
///
/// Caution: This assumes the given hash is the block's hash.
#[inline]
pub fn new_unchecked(block: B, hash: BlockHash) -> Self {
let (header, body) = block.split();
Self { header: SealedHeader::new(header, hash), body }
}
/// Creates a `SealedBlock` from the block without the available hash
pub fn new_unhashed(block: B) -> Self {
let (header, body) = block.split();
Self { header: SealedHeader::new_unhashed(header), body }
}
/// Creates the [`SealedBlock`] from the block's parts by hashing the header.
///
///
/// This calculates the header hash. To create a [`SealedBlock`] from its parts without
/// calculating the hash upfront see [`SealedBlock::from_parts_unhashed`]
pub fn seal_parts(header: B::Header, body: B::Body) -> Self {
Self::seal_slow(B::new(header, body))
}
/// Creates the [`SealedBlock`] from the block's parts without calculating the hash upfront.
pub fn from_parts_unhashed(header: B::Header, body: B::Body) -> Self {
Self::new_unhashed(B::new(header, body))
}
/// Creates the [`SealedBlock`] from the block's parts.
pub fn from_parts_unchecked(header: B::Header, body: B::Body, hash: BlockHash) -> Self {
Self::new_unchecked(B::new(header, body), hash)
}
/// Creates the [`SealedBlock`] from the [`SealedHeader`] and the body.
pub fn from_sealed_parts(header: SealedHeader<B::Header>, body: B::Body) -> Self {
let (header, hash) = header.split();
Self::from_parts_unchecked(header, body, hash)
}
/// Returns a reference to the block hash.
#[inline]
pub fn hash_ref(&self) -> &BlockHash {
self.header.hash_ref()
}
/// Returns the block hash.
#[inline]
pub fn hash(&self) -> B256 {
self.header.hash()
}
/// Consumes the type and returns its components.
#[doc(alias = "into_components")]
pub fn split(self) -> (B, BlockHash) {
let (header, hash) = self.header.split();
(B::new(header, self.body), hash)
}
/// Consumes the type and returns the block.
pub fn into_block(self) -> B {
self.unseal()
}
/// Consumes the type and returns the block.
pub fn unseal(self) -> B {
let header = self.header.unseal();
B::new(header, self.body)
}
/// Clones the wrapped block.
pub fn clone_block(&self) -> B {
B::new(self.header.clone_header(), self.body.clone())
}
/// Converts this block into a [`RecoveredBlock`] with the given senders
///
/// Note: This method assumes the senders are correct and does not validate them.
pub const fn with_senders(self, senders: Vec<Address>) -> RecoveredBlock<B> {
RecoveredBlock::new_sealed(self, senders)
}
/// Converts this block into a [`RecoveredBlock`] with the given senders if the number of
/// senders is equal to the number of transactions in the block and recovers the senders from
/// the transactions, if
/// not using [`SignedTransaction::recover_signer`](crate::transaction::signed::SignedTransaction)
/// to recover the senders.
///
/// Returns an error if any of the transactions fail to recover the sender.
pub fn try_with_senders(
self,
senders: Vec<Address>,
) -> Result<RecoveredBlock<B>, BlockRecoveryError<Self>> {
RecoveredBlock::try_recover_sealed_with_senders(self, senders)
}
/// Converts this block into a [`RecoveredBlock`] with the given senders if the number of
/// senders is equal to the number of transactions in the block and recovers the senders from
/// the transactions, if
/// not using [`SignedTransaction::recover_signer_unchecked`](crate::transaction::signed::SignedTransaction)
/// to recover the senders.
///
/// Returns an error if any of the transactions fail to recover the sender.
pub fn try_with_senders_unchecked(
self,
senders: Vec<Address>,
) -> Result<RecoveredBlock<B>, BlockRecoveryError<Self>> {
RecoveredBlock::try_recover_sealed_with_senders_unchecked(self, senders)
}
/// Recovers the senders from the transactions in the block using
/// [`SignedTransaction::recover_signer`](crate::transaction::signed::SignedTransaction).
///
/// Returns an error if any of the transactions fail to recover the sender.
pub fn try_recover(self) -> Result<RecoveredBlock<B>, BlockRecoveryError<Self>> {
RecoveredBlock::try_recover_sealed(self)
}
/// Recovers the senders from the transactions in the block using
/// [`SignedTransaction::recover_signer_unchecked`](crate::transaction::signed::SignedTransaction).
///
/// Returns an error if any of the transactions fail to recover the sender.
pub fn try_recover_unchecked(self) -> Result<RecoveredBlock<B>, BlockRecoveryError<Self>> {
RecoveredBlock::try_recover_sealed_unchecked(self)
}
/// Returns reference to block header.
pub const fn header(&self) -> &B::Header {
self.header.header()
}
/// Returns reference to block body.
pub const fn body(&self) -> &B::Body {
&self.body
}
/// Returns the length of the block.
pub fn rlp_length(&self) -> usize {
B::rlp_length(self.header(), self.body())
}
/// Recovers all senders from the transactions in the block.
///
/// Returns `None` if any of the transactions fail to recover the sender.
pub fn senders(&self) -> Result<Vec<Address>, RecoveryError> {
self.body().recover_signers()
}
/// Return the number hash tuple.
pub fn num_hash(&self) -> BlockNumHash {
BlockNumHash::new(self.number(), self.hash())
}
/// Return a [`BlockWithParent`] for this header.
pub fn block_with_parent(&self) -> BlockWithParent {
BlockWithParent { parent: self.parent_hash(), block: self.num_hash() }
}
/// Returns the timestamp in seconds, assuming the timestamp is in milliseconds.
pub fn timestamp_seconds(&self) -> u64 {
if cfg!(feature = "timestamp-in-seconds") {
self.timestamp()
} else {
self.timestamp() / 1000
}
}
/// Returns the Sealed header.
pub const fn sealed_header(&self) -> &SealedHeader<B::Header> {
&self.header
}
/// Returns the wrapped `SealedHeader<B::Header>` as `SealedHeader<&B::Header>`.
pub fn sealed_header_ref(&self) -> SealedHeader<&B::Header> {
SealedHeader::new(self.header(), self.hash())
}
/// Clones the wrapped header and returns a [`SealedHeader`] sealed with the hash.
pub fn clone_sealed_header(&self) -> SealedHeader<B::Header> {
self.header.clone()
}
/// Consumes the block and returns the sealed header.
pub fn into_sealed_header(self) -> SealedHeader<B::Header> {
self.header
}
/// Consumes the block and returns the header.
pub fn into_header(self) -> B::Header {
self.header.unseal()
}
/// Consumes the block and returns the body.
pub fn into_body(self) -> B::Body {
self.body
}
/// Splits the block into body and header into separate components
pub fn split_header_body(self) -> (B::Header, B::Body) {
let header = self.header.unseal();
(header, self.body)
}
/// Splits the block into body and header into separate components.
pub fn split_sealed_header_body(self) -> (SealedHeader<B::Header>, B::Body) {
(self.header, self.body)
}
/// Returns an iterator over all blob versioned hashes from the block body.
#[inline]
pub fn blob_versioned_hashes_iter(&self) -> impl Iterator<Item = &B256> + '_ {
self.body().blob_versioned_hashes_iter()
}
/// Returns the number of transactions in the block.
#[inline]
pub fn transaction_count(&self) -> usize {
self.body().transaction_count()
}
/// Ensures that the transaction root in the block header is valid.
///
/// The transaction root is the Keccak 256-bit hash of the root node of the trie structure
/// populated with each transaction in the transactions list portion of the block.
///
/// # Returns
///
/// Returns `Ok(())` if the calculated transaction root matches the one stored in the header,
/// indicating that the transactions in the block are correctly represented in the trie.
///
/// Returns `Err(error)` if the transaction root validation fails, providing a `GotExpected`
/// error containing the calculated and expected roots.
pub fn ensure_transaction_root_valid(&self) -> Result<(), GotExpected<B256>> {
let calculated_root = self.body().calculate_tx_root();
if self.header().transactions_root() != calculated_root {
return Err(GotExpected {
got: calculated_root,
expected: self.header().transactions_root(),
})
}
Ok(())
}
}
impl<B> From<B> for SealedBlock<B>
where
B: Block,
{
fn from(block: B) -> Self {
Self::seal_slow(block)
}
}
impl<B> Default for SealedBlock<B>
where
B: Block + Default,
{
fn default() -> Self {
Self::seal_slow(Default::default())
}
}
impl<B: Block> InMemorySize for SealedBlock<B> {
#[inline]
fn size(&self) -> usize {
self.body.size() + self.header.size()
}
}
impl<B: Block> Deref for SealedBlock<B> {
type Target = B::Header;
fn deref(&self) -> &Self::Target {
self.header()
}
}
impl<B: Block> Encodable for SealedBlock<B> {
fn encode(&self, out: &mut dyn BufMut) {
// TODO: https://github.com/paradigmxyz/reth/issues/18002
self.clone().into_block().encode(out);
}
}
impl<B: Block> Decodable for SealedBlock<B> {
fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
let block = B::decode(buf)?;
Ok(Self::seal_slow(block))
}
}
impl<B: Block> From<SealedBlock<B>> for Sealed<B> {
fn from(value: SealedBlock<B>) -> Self {
let (block, hash) = value.split();
Self::new_unchecked(block, hash)
}
}
#[cfg(any(test, feature = "arbitrary"))]
impl<'a, B> arbitrary::Arbitrary<'a> for SealedBlock<B>
where
B: Block + arbitrary::Arbitrary<'a>,
{
fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result<Self> {
let block = B::arbitrary(u)?;
Ok(Self::seal_slow(block))
}
}
#[cfg(any(test, feature = "test-utils"))]
impl<B: crate::test_utils::TestBlock> SealedBlock<B> {
/// Returns a mutable reference to the header.
pub const fn header_mut(&mut self) -> &mut B::Header {
self.header.header_mut()
}
/// Updates the block hash.
pub fn set_hash(&mut self, hash: BlockHash) {
self.header.set_hash(hash)
}
/// Returns a mutable reference to the body.
pub const fn body_mut(&mut self) -> &mut B::Body {
&mut self.body
}
/// Updates the parent block hash.
pub fn set_parent_hash(&mut self, hash: BlockHash) {
self.header.set_parent_hash(hash)
}
/// Updates the block number.
pub fn set_block_number(&mut self, number: alloy_primitives::BlockNumber) {
self.header.set_block_number(number)
}
/// Updates the block state root.
pub fn set_state_root(&mut self, state_root: alloy_primitives::B256) {
self.header.set_state_root(state_root)
}
/// Updates the block difficulty.
pub fn set_difficulty(&mut self, difficulty: alloy_primitives::U256) {
self.header.set_difficulty(difficulty)
}
}
/// Bincode-compatible [`SealedBlock`] serde implementation.
#[cfg(feature = "serde-bincode-compat")]
pub(super) mod serde_bincode_compat {
use crate::{
serde_bincode_compat::{self, BincodeReprFor, SerdeBincodeCompat},
Block,
};
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use serde_with::{serde_as, DeserializeAs, SerializeAs};
/// Bincode-compatible [`super::SealedBlock`] serde implementation.
///
/// Intended to use with the [`serde_with::serde_as`] macro in the following way:
/// ```rust
/// use reth_primitives_traits::{
/// block::SealedBlock,
/// serde_bincode_compat::{self, SerdeBincodeCompat},
/// Block,
/// };
/// use serde::{Deserialize, Serialize};
/// use serde_with::serde_as;
///
/// #[serde_as]
/// #[derive(Serialize, Deserialize)]
/// struct Data<T: Block<Header: SerdeBincodeCompat, Body: SerdeBincodeCompat> + 'static> {
/// #[serde_as(as = "serde_bincode_compat::SealedBlock<'_, T>")]
/// block: SealedBlock<T>,
/// }
/// ```
#[serde_as]
#[derive(derive_more::Debug, Serialize, Deserialize)]
pub struct SealedBlock<
'a,
T: Block<Header: SerdeBincodeCompat, Body: SerdeBincodeCompat> + 'static,
> {
#[serde(
bound = "serde_bincode_compat::SealedHeader<'a, T::Header>: Serialize + serde::de::DeserializeOwned"
)]
header: serde_bincode_compat::SealedHeader<'a, T::Header>,
body: BincodeReprFor<'a, T::Body>,
}
impl<'a, T: Block<Header: SerdeBincodeCompat, Body: SerdeBincodeCompat> + 'static>
From<&'a super::SealedBlock<T>> for SealedBlock<'a, T>
{
fn from(value: &'a super::SealedBlock<T>) -> Self {
Self { header: value.header.as_repr(), body: value.body.as_repr() }
}
}
impl<'a, T: Block<Header: SerdeBincodeCompat, Body: SerdeBincodeCompat> + 'static>
From<SealedBlock<'a, T>> for super::SealedBlock<T>
{
fn from(value: SealedBlock<'a, T>) -> Self {
Self::from_sealed_parts(value.header.into(), SerdeBincodeCompat::from_repr(value.body))
}
}
impl<T: Block<Header: SerdeBincodeCompat, Body: SerdeBincodeCompat> + 'static>
SerializeAs<super::SealedBlock<T>> for SealedBlock<'_, T>
{
fn serialize_as<S>(source: &super::SealedBlock<T>, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
SealedBlock::from(source).serialize(serializer)
}
}
impl<'de, T: Block<Header: SerdeBincodeCompat, Body: SerdeBincodeCompat> + 'static>
DeserializeAs<'de, super::SealedBlock<T>> for SealedBlock<'de, T>
{
fn deserialize_as<D>(deserializer: D) -> Result<super::SealedBlock<T>, D::Error>
where
D: Deserializer<'de>,
{
SealedBlock::deserialize(deserializer).map(Into::into)
}
}
impl<T: Block<Header: SerdeBincodeCompat, Body: SerdeBincodeCompat> + 'static>
SerdeBincodeCompat for super::SealedBlock<T>
{
type BincodeRepr<'a> = SealedBlock<'a, T>;
fn as_repr(&self) -> Self::BincodeRepr<'_> {
self.into()
}
fn from_repr(repr: Self::BincodeRepr<'_>) -> Self {
repr.into()
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_rlp::{Decodable, Encodable};
#[test]
fn test_sealed_block_rlp_roundtrip() {
// Create a sample block using alloy_consensus::Block
let header = alloy_consensus::Header {
parent_hash: B256::ZERO,
ommers_hash: B256::ZERO,
beneficiary: Address::ZERO,
state_root: B256::ZERO,
transactions_root: B256::ZERO,
receipts_root: B256::ZERO,
logs_bloom: Default::default(),
difficulty: Default::default(),
number: 42,
gas_limit: 30_000_000,
gas_used: 21_000,
timestamp: 1_000_000,
extra_data: Default::default(),
mix_hash: B256::ZERO,
nonce: Default::default(),
base_fee_per_gas: Some(1_000_000_000),
withdrawals_root: None,
blob_gas_used: None,
excess_blob_gas: None,
parent_beacon_block_root: None,
requests_hash: None,
};
// Create a simple transaction
let tx = alloy_consensus::TxLegacy {
chain_id: Some(1),
nonce: 0,
gas_price: 21_000_000_000,
gas_limit: 21_000,
to: alloy_primitives::TxKind::Call(Address::ZERO),
value: alloy_primitives::U256::from(100),
input: alloy_primitives::Bytes::default(),
};
let tx_signed =
alloy_consensus::TxEnvelope::Legacy(alloy_consensus::Signed::new_unchecked(
tx,
alloy_primitives::Signature::test_signature(),
B256::ZERO,
));
// Create block body with the transaction
let body = alloy_consensus::BlockBody {
transactions: vec![tx_signed],
ommers: vec![],
withdrawals: Some(Default::default()),
};
// Create the block
let block = alloy_consensus::Block::new(header, body);
// Create a sealed block
let sealed_block = SealedBlock::seal_slow(block);
// Encode the sealed block
let mut encoded = Vec::new();
sealed_block.encode(&mut encoded);
// Decode the sealed block
let decoded = SealedBlock::<
alloy_consensus::Block<alloy_consensus::TxEnvelope, alloy_consensus::Header>,
>::decode(&mut encoded.as_slice())
.expect("Failed to decode sealed block");
// Verify the roundtrip
assert_eq!(sealed_block.hash(), decoded.hash());
assert_eq!(sealed_block.header().number, decoded.header().number);
assert_eq!(sealed_block.header().state_root, decoded.header().state_root);
assert_eq!(sealed_block.body().transactions.len(), decoded.body().transactions.len());
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/primitives-traits/src/header/test_utils.rs | crates/primitives-traits/src/header/test_utils.rs | //! Test utilities for the block header.
use crate::BlockHeader;
use alloy_consensus::Header;
use alloy_primitives::{BlockHash, BlockNumber, B256, U256};
use proptest::{arbitrary::any, prop_compose};
use proptest_arbitrary_interop::arb;
/// A helper trait for [`Header`]s that allows for mutable access to the headers values.
///
/// This allows for modifying the header for testing purposes.
pub trait TestHeader: BlockHeader {
/// Updates the parent block hash.
fn set_parent_hash(&mut self, hash: BlockHash);
/// Updates the block number.
fn set_block_number(&mut self, number: BlockNumber);
/// Updates the block state root.
fn set_state_root(&mut self, state_root: B256);
/// Updates the block difficulty.
fn set_difficulty(&mut self, difficulty: U256);
}
impl TestHeader for Header {
fn set_parent_hash(&mut self, hash: BlockHash) {
self.parent_hash = hash
}
fn set_block_number(&mut self, number: BlockNumber) {
self.number = number;
}
fn set_state_root(&mut self, state_root: B256) {
self.state_root = state_root;
}
fn set_difficulty(&mut self, difficulty: U256) {
self.difficulty = difficulty;
}
}
/// Generates a header which is valid __with respect to past and future forks__. This means, for
/// example, that if the withdrawals root is present, the base fee per gas is also present.
///
/// If blob gas used were present, then the excess blob gas and parent beacon block root are also
/// present. In this example, the withdrawals root would also be present.
///
/// This __does not, and should not guarantee__ that the header is valid with respect to __anything
/// else__.
pub const fn generate_valid_header(
mut header: Header,
eip_4844_active: bool,
blob_gas_used: u64,
excess_blob_gas: u64,
parent_beacon_block_root: B256,
) -> Header {
// Clear all related fields if EIP-1559 is inactive
if header.base_fee_per_gas.is_none() {
header.withdrawals_root = None;
}
// Set fields based on EIP-4844 being active
if eip_4844_active {
header.blob_gas_used = Some(blob_gas_used);
header.excess_blob_gas = Some(excess_blob_gas);
header.parent_beacon_block_root = Some(parent_beacon_block_root);
} else {
header.blob_gas_used = None;
header.excess_blob_gas = None;
header.parent_beacon_block_root = None;
}
// Placeholder for future EIP adjustments
header.requests_hash = None;
header
}
prop_compose! {
/// Generates a proptest strategy for constructing an instance of a header which is valid __with
/// respect to past and future forks__.
///
/// See docs for [generate_valid_header] for more information.
pub fn valid_header_strategy()(
header in arb::<Header>(),
eip_4844_active in any::<bool>(),
blob_gas_used in any::<u64>(),
excess_blob_gas in any::<u64>(),
parent_beacon_block_root in arb::<B256>()
) -> Header {
generate_valid_header(header, eip_4844_active, blob_gas_used, excess_blob_gas, parent_beacon_block_root)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/primitives-traits/src/header/mod.rs | crates/primitives-traits/src/header/mod.rs | mod sealed;
pub use sealed::{Header, SealedHeader, SealedHeaderFor};
#[cfg(any(test, feature = "test-utils", feature = "arbitrary"))]
pub mod test_utils;
/// Bincode-compatible header type serde implementations.
#[cfg(feature = "serde-bincode-compat")]
pub mod serde_bincode_compat {
pub use super::sealed::serde_bincode_compat::SealedHeader;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/primitives-traits/src/header/sealed.rs | crates/primitives-traits/src/header/sealed.rs | use crate::{sync::OnceLock, InMemorySize, NodePrimitives};
pub use alloy_consensus::Header;
use alloy_consensus::Sealed;
use alloy_eips::{eip1898::BlockWithParent, BlockNumHash};
use alloy_primitives::{keccak256, BlockHash, Sealable};
use alloy_rlp::{Decodable, Encodable};
use bytes::BufMut;
use core::mem;
use derive_more::{AsRef, Deref};
/// Type alias for [`SealedHeader`] generic over the `BlockHeader` type of [`NodePrimitives`].
pub type SealedHeaderFor<N> = SealedHeader<<N as NodePrimitives>::BlockHeader>;
/// Seals the header with the block hash.
///
/// This type uses lazy sealing to avoid hashing the header until it is needed:
///
/// [`SealedHeader::new_unhashed`] creates a sealed header without hashing the header.
/// [`SealedHeader::new`] creates a sealed header with the corresponding block hash.
/// [`SealedHeader::hash`] computes the hash if it has not been computed yet.
#[derive(Debug, Clone, AsRef, Deref)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(rlp))]
pub struct SealedHeader<H = Header> {
/// Block hash
#[cfg_attr(feature = "serde", serde(skip))]
hash: OnceLock<BlockHash>,
/// Locked Header fields.
#[as_ref]
#[deref]
header: H,
}
impl<H> SealedHeader<H> {
/// Creates the sealed header without hashing the header.
#[inline]
pub fn new_unhashed(header: H) -> Self {
Self { header, hash: Default::default() }
}
/// Creates the sealed header with the corresponding block hash.
#[inline]
pub fn new(header: H, hash: BlockHash) -> Self {
Self { header, hash: hash.into() }
}
/// Returns the sealed Header fields.
#[inline]
pub const fn header(&self) -> &H {
&self.header
}
/// Clone the header.
pub fn clone_header(&self) -> H
where
H: Clone,
{
self.header.clone()
}
/// Consumes the type and returns the wrapped header.
pub fn into_header(self) -> H {
self.header
}
/// Consumes the type and returns the wrapped header.
pub fn unseal(self) -> H {
self.header
}
/// Converts from &`SealedHeader<H>` to `SealedHeader<&H>`.
pub fn sealed_ref(&self) -> SealedHeader<&H> {
SealedHeader { hash: self.hash.clone(), header: &self.header }
}
}
impl<H: Sealable> SealedHeader<H> {
/// Hashes the header and creates a sealed header.
pub fn seal_slow(header: H) -> Self {
let hash = header.hash_slow();
Self::new(header, hash)
}
/// Returns the block hash.
///
/// Note: if the hash has not been computed yet, this will compute the hash:
/// [`Sealable::hash_slow`].
pub fn hash_ref(&self) -> &BlockHash {
self.hash.get_or_init(|| self.header.hash_slow())
}
/// Returns a copy of the block hash.
pub fn hash(&self) -> BlockHash {
*self.hash_ref()
}
/// This is the inverse of [`Header::seal_slow`] which returns the raw header and hash.
pub fn split(self) -> (H, BlockHash) {
let hash = self.hash();
(self.header, hash)
}
/// Returns references to both the header and hash without taking ownership.
pub fn split_ref(&self) -> (&H, &BlockHash) {
(self.header(), self.hash_ref())
}
}
impl<H: Sealable> SealedHeader<&H> {
/// Maps a `SealedHeader<&H>` to a `SealedHeader<H>` by cloning the header.
pub fn cloned(self) -> SealedHeader<H>
where
H: Clone,
{
let Self { hash, header } = self;
SealedHeader { hash, header: header.clone() }
}
}
impl<H: alloy_consensus::BlockHeader + Sealable> SealedHeader<H> {
/// Return the number hash tuple.
pub fn num_hash(&self) -> BlockNumHash {
BlockNumHash::new(self.number(), self.hash())
}
/// Return a [`BlockWithParent`] for this header.
pub fn block_with_parent(&self) -> BlockWithParent {
BlockWithParent { parent: self.parent_hash(), block: self.num_hash() }
}
/// Returns the timestamp in seconds (header timestamp is in milliseconds)
pub fn timestamp_seconds(&self) -> u64 {
if cfg!(feature = "timestamp-in-seconds") {
self.header.timestamp()
} else {
self.header.timestamp() / 1000
}
}
}
impl<H: Sealable> Eq for SealedHeader<H> {}
impl<H: Sealable> PartialEq for SealedHeader<H> {
fn eq(&self, other: &Self) -> bool {
self.hash() == other.hash()
}
}
impl<H: Sealable> core::hash::Hash for SealedHeader<H> {
fn hash<Ha: core::hash::Hasher>(&self, state: &mut Ha) {
self.hash().hash(state)
}
}
impl<H: InMemorySize> InMemorySize for SealedHeader<H> {
/// Calculates a heuristic for the in-memory size of the [`SealedHeader`].
#[inline]
fn size(&self) -> usize {
self.header.size() + mem::size_of::<BlockHash>()
}
}
impl<H: Sealable + Default> Default for SealedHeader<H> {
fn default() -> Self {
Self::seal_slow(H::default())
}
}
impl Encodable for SealedHeader {
fn encode(&self, out: &mut dyn BufMut) {
self.header.encode(out);
}
}
impl Decodable for SealedHeader {
fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
let b = &mut &**buf;
let started_len = buf.len();
// decode the header from temp buffer
let header = Header::decode(b)?;
// hash the consumed bytes, the rlp encoded header
let consumed = started_len - b.len();
let hash = keccak256(&buf[..consumed]);
// update original buffer
*buf = *b;
Ok(Self::new(header, hash))
}
}
impl<H: Sealable> From<SealedHeader<H>> for Sealed<H> {
fn from(value: SealedHeader<H>) -> Self {
let (header, hash) = value.split();
Self::new_unchecked(header, hash)
}
}
#[cfg(any(test, feature = "arbitrary"))]
impl<'a, H> arbitrary::Arbitrary<'a> for SealedHeader<H>
where
H: for<'b> arbitrary::Arbitrary<'b> + Sealable,
{
fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result<Self> {
let header = H::arbitrary(u)?;
Ok(Self::seal_slow(header))
}
}
#[cfg(any(test, feature = "test-utils"))]
impl<H: crate::test_utils::TestHeader> SealedHeader<H> {
/// Updates the block header.
pub fn set_header(&mut self, header: H) {
self.header = header
}
/// Updates the block hash.
pub fn set_hash(&mut self, hash: BlockHash) {
self.hash = hash.into()
}
/// Returns a mutable reference to the header.
pub const fn header_mut(&mut self) -> &mut H {
&mut self.header
}
/// Updates the parent block hash.
pub fn set_parent_hash(&mut self, hash: BlockHash) {
self.header.set_parent_hash(hash);
}
/// Updates the block number.
pub fn set_block_number(&mut self, number: alloy_primitives::BlockNumber) {
self.header.set_block_number(number);
}
/// Updates the block state root.
pub fn set_state_root(&mut self, state_root: alloy_primitives::B256) {
self.header.set_state_root(state_root);
}
/// Updates the block difficulty.
pub fn set_difficulty(&mut self, difficulty: alloy_primitives::U256) {
self.header.set_difficulty(difficulty);
}
}
#[cfg(feature = "rpc-compat")]
mod rpc_compat {
use super::*;
impl<H> SealedHeader<H> {
/// Converts this header into `alloy_rpc_types_eth::Header<H>`.
///
/// Note: This does not set the total difficulty or size of the block.
pub fn into_rpc_header(self) -> alloy_rpc_types_eth::Header<H>
where
H: Sealable,
{
alloy_rpc_types_eth::Header::from_sealed(self.into())
}
/// Converts an `alloy_rpc_types_eth::Header<H>` into a `SealedHeader<H>`.
pub fn from_rpc_header(header: alloy_rpc_types_eth::Header<H>) -> Self {
Self::new(header.inner, header.hash)
}
}
impl<H> From<alloy_rpc_types_eth::Header<H>> for SealedHeader<H> {
fn from(value: alloy_rpc_types_eth::Header<H>) -> Self {
Self::from_rpc_header(value)
}
}
}
/// Bincode-compatible [`SealedHeader`] serde implementation.
#[cfg(feature = "serde-bincode-compat")]
pub(super) mod serde_bincode_compat {
use crate::serde_bincode_compat::SerdeBincodeCompat;
use alloy_primitives::{BlockHash, Sealable};
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use serde_with::{DeserializeAs, SerializeAs};
/// Bincode-compatible [`super::SealedHeader`] serde implementation.
///
/// Intended to use with the [`serde_with::serde_as`] macro in the following way:
/// ```rust
/// use reth_primitives_traits::{serde_bincode_compat, SealedHeader};
/// use serde::{Deserialize, Serialize};
/// use serde_with::serde_as;
///
/// #[serde_as]
/// #[derive(Serialize, Deserialize)]
/// struct Data {
/// #[serde_as(as = "serde_bincode_compat::SealedHeader")]
/// header: SealedHeader,
/// }
/// ```
#[derive(derive_more::Debug, Serialize, Deserialize)]
#[debug(bound(H::BincodeRepr<'a>: core::fmt::Debug))]
pub struct SealedHeader<'a, H: Sealable + SerdeBincodeCompat = super::Header> {
hash: BlockHash,
header: H::BincodeRepr<'a>,
}
impl<'a, H: Sealable + SerdeBincodeCompat> From<&'a super::SealedHeader<H>>
for SealedHeader<'a, H>
{
fn from(value: &'a super::SealedHeader<H>) -> Self {
Self { hash: value.hash(), header: value.header.as_repr() }
}
}
impl<'a, H: Sealable + SerdeBincodeCompat> From<SealedHeader<'a, H>> for super::SealedHeader<H> {
fn from(value: SealedHeader<'a, H>) -> Self {
Self::new(SerdeBincodeCompat::from_repr(value.header), value.hash)
}
}
impl SerializeAs<super::SealedHeader> for SealedHeader<'_> {
fn serialize_as<S>(source: &super::SealedHeader, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
SealedHeader::from(source).serialize(serializer)
}
}
impl<'de> DeserializeAs<'de, super::SealedHeader> for SealedHeader<'de> {
fn deserialize_as<D>(deserializer: D) -> Result<super::SealedHeader, D::Error>
where
D: Deserializer<'de>,
{
SealedHeader::deserialize(deserializer).map(Into::into)
}
}
impl<H: Sealable + SerdeBincodeCompat> SerdeBincodeCompat for super::SealedHeader<H> {
type BincodeRepr<'a> = SealedHeader<'a, H>;
fn as_repr(&self) -> Self::BincodeRepr<'_> {
self.into()
}
fn from_repr(repr: Self::BincodeRepr<'_>) -> Self {
repr.into()
}
}
#[cfg(test)]
mod tests {
use super::super::{serde_bincode_compat, SealedHeader};
use arbitrary::Arbitrary;
use rand::Rng;
use serde::{Deserialize, Serialize};
use serde_with::serde_as;
#[test]
fn test_sealed_header_bincode_roundtrip() {
#[serde_as]
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)]
struct Data {
#[serde_as(as = "serde_bincode_compat::SealedHeader")]
transaction: SealedHeader,
}
let mut bytes = [0u8; 1024];
rand::rng().fill(&mut bytes[..]);
let data = Data {
transaction: SealedHeader::arbitrary(&mut arbitrary::Unstructured::new(&bytes))
.unwrap(),
};
let encoded = bincode::serialize(&data).unwrap();
let decoded: Data = bincode::deserialize(&encoded).unwrap();
assert_eq!(decoded, data);
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/primitives-traits/src/constants/gas_units.rs | crates/primitives-traits/src/constants/gas_units.rs | use alloc::string::String;
use core::time::Duration;
/// Represents one Kilogas, or `1_000` gas.
pub const KILOGAS: u64 = 1_000;
/// Represents one Megagas, or `1_000_000` gas.
pub const MEGAGAS: u64 = KILOGAS * 1_000;
/// Represents one Gigagas, or `1_000_000_000` gas.
pub const GIGAGAS: u64 = MEGAGAS * 1_000;
/// Returns a formatted gas throughput log, showing either:
/// * "Kgas/s", or 1,000 gas per second
/// * "Mgas/s", or 1,000,000 gas per second
/// * "Ggas/s", or 1,000,000,000 gas per second
///
/// Depending on the magnitude of the gas throughput.
pub fn format_gas_throughput(gas: u64, execution_duration: Duration) -> String {
let gas_per_second = gas as f64 / execution_duration.as_secs_f64();
if gas_per_second < MEGAGAS as f64 {
format!("{:.2}Kgas/second", gas_per_second / KILOGAS as f64)
} else if gas_per_second < GIGAGAS as f64 {
format!("{:.2}Mgas/second", gas_per_second / MEGAGAS as f64)
} else {
format!("{:.2}Ggas/second", gas_per_second / GIGAGAS as f64)
}
}
/// Returns a formatted gas log, showing either:
/// * "Kgas", or 1,000 gas
/// * "Mgas", or 1,000,000 gas
/// * "Ggas", or 1,000,000,000 gas
///
/// Depending on the magnitude of gas.
pub fn format_gas(gas: u64) -> String {
let gas = gas as f64;
if gas < MEGAGAS as f64 {
format!("{:.2}Kgas", gas / KILOGAS as f64)
} else if gas < GIGAGAS as f64 {
format!("{:.2}Mgas", gas / MEGAGAS as f64)
} else {
format!("{:.2}Ggas", gas / GIGAGAS as f64)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_gas_fmt() {
let gas = 888;
let gas_unit = format_gas(gas);
assert_eq!(gas_unit, "0.89Kgas");
let gas = 100_000;
let gas_unit = format_gas(gas);
assert_eq!(gas_unit, "100.00Kgas");
let gas = 100_000_000;
let gas_unit = format_gas(gas);
assert_eq!(gas_unit, "100.00Mgas");
let gas = 100_000_000_000;
let gas_unit = format_gas(gas);
assert_eq!(gas_unit, "100.00Ggas");
}
#[test]
fn test_gas_throughput_fmt() {
let duration = Duration::from_secs(1);
let gas = 100_000;
let throughput = format_gas_throughput(gas, duration);
assert_eq!(throughput, "100.00Kgas/second");
let gas = 100_000_000;
let throughput = format_gas_throughput(gas, duration);
assert_eq!(throughput, "100.00Mgas/second");
let gas = 100_000_000_000;
let throughput = format_gas_throughput(gas, duration);
assert_eq!(throughput, "100.00Ggas/second");
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/primitives-traits/src/constants/mod.rs | crates/primitives-traits/src/constants/mod.rs | //! Ethereum protocol-related constants
/// Gas units, for example [`GIGAGAS`].
pub mod gas_units;
pub use gas_units::{GIGAGAS, KILOGAS, MEGAGAS};
/// The client version: `reth/v{major}.{minor}.{patch}`
pub const RETH_CLIENT_VERSION: &str = concat!("reth/v", env!("CARGO_PKG_VERSION"));
/// Minimum gas limit allowed for transactions.
pub const MINIMUM_GAS_LIMIT: u64 = 5000;
/// Maximum gas limit allowed for block.
/// In hex this number is `0x7fffffffffffffff`
pub const MAXIMUM_GAS_LIMIT_BLOCK: u64 = 2u64.pow(63) - 1;
/// The bound divisor of the gas limit, used in update calculations.
pub const GAS_LIMIT_BOUND_DIVISOR: u64 = 1024;
/// Maximum transaction gas limit as defined by [EIP-7825](https://eips.ethereum.org/EIPS/eip-7825) activated in `Osaka` hardfork.
pub const MAX_TX_GAS_LIMIT_OSAKA: u64 = 2u64.pow(24);
/// The number of blocks to unwind during a reorg that already became a part of canonical chain.
///
/// In reality, the node can end up in this particular situation very rarely. It would happen only
/// if the node process is abruptly terminated during ongoing reorg and doesn't boot back up for
/// long period of time.
///
/// Unwind depth of `3` blocks significantly reduces the chance that the reorged block is kept in
/// the database.
pub const BEACON_CONSENSUS_REORG_UNWIND_DEPTH: u64 = 3;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/genesis-builder/src/genesis.rs | crates/genesis-builder/src/genesis.rs | use crate::{
error::{BuilderError, Result},
types::Genesis,
};
use std::{fs, path::Path};
/// Load genesis JSON file
pub fn load_genesis(path: &Path) -> Result<Genesis> {
if !path.exists() {
return Err(BuilderError::GenesisNotFound(path.to_path_buf()));
}
let content = fs::read_to_string(path)?;
let genesis: Genesis = serde_json::from_str(&content)?;
Ok(genesis)
}
/// Write genesis JSON file with pretty printing
pub fn write_genesis(genesis: &Genesis, path: &Path) -> Result<()> {
let json = serde_json::to_string_pretty(genesis)?;
fs::write(path, json)?;
Ok(())
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/genesis-builder/src/builder.rs | crates/genesis-builder/src/builder.rs | use crate::{
artifact::ArtifactLoader,
error::{BuilderError, Result},
types::{Genesis, GenesisAccount, Manifest},
};
use alloy_primitives::{hex, Address};
use std::io::{self, Write};
use tracing::{info, warn};
/// Default nonce for the genesis file
pub const DEFAULT_NONCE: &str = "0x1";
/// Default balance for the genesis file
pub const DEFAULT_BALANCE: &str = "0x0";
/// Builder for constructing genesis files with contracts
#[derive(Debug)]
pub struct GenesisBuilder {
/// Manifest containing the contracts to deploy
manifest: Manifest,
/// Genesis file to build
genesis: Genesis,
/// Loader for fetching contract artifacts
loader: ArtifactLoader,
/// Number of contracts added to the genesis file
contracts_added: usize,
/// Say "yes" to every overwrite question
yes_overwrite: bool,
}
impl GenesisBuilder {
/// Create a new genesis builder
pub fn new(manifest: Manifest, genesis: Genesis, yes_overwrite: bool) -> Result<Self> {
let loader = ArtifactLoader::new()?;
Ok(Self { manifest, genesis, loader, contracts_added: 0, yes_overwrite })
}
/// Execute the build process
pub fn build(mut self) -> Result<Genesis> {
info!(
"Building genesis with {} contracts from {}",
self.manifest.contracts.len(),
self.manifest.metadata.base_url()
);
for (name, config) in &self.manifest.contracts.clone() {
self.add_contract(name, config)?;
}
info!("Added {} contracts to genesis", self.contracts_added);
Ok(self.genesis)
}
/// Add a single contract to genesis
fn add_contract(&mut self, name: &str, config: &crate::types::ContractConfig) -> Result<()> {
let url = format!(
"{}/{}",
self.manifest.metadata.base_url().trim_end_matches('/'),
config.artifact.trim_start_matches('/')
);
println!("Url: {}", url);
let artifact = self.loader.load_artifact(&url)?;
let address = parse_address(&config.address)?;
if self.genesis.alloc.contains_key(&address) {
if !self.yes_overwrite && !overwrite_address(name, &config.address)? {
return Err(BuilderError::AddressCollision(format!(
"{} ({})",
name, config.address
)));
}
warn!("Overwriting existing contract at {}", config.address);
}
let account = GenesisAccount {
code: Some(format!("0x{}", hex::encode(&artifact.deployed_bytecode))),
balance: DEFAULT_BALANCE.to_string(),
nonce: None,
storage: Default::default(),
};
self.genesis.alloc.insert(address, account);
self.contracts_added += 1;
info!("Added {} @ {}", name, config.address);
Ok(())
}
}
/// Parse address from hex string
fn parse_address(hex_str: &str) -> Result<Address> {
let hex_str = hex_str.strip_prefix("0x").unwrap_or(hex_str);
let padded = if hex_str.len() < 40 { format!("{:0>40}", hex_str) } else { hex_str.to_string() };
let bytes =
hex::decode(&padded).map_err(|_| BuilderError::InvalidAddress(hex_str.to_string()))?;
if bytes.len() != 20 {
return Err(BuilderError::InvalidAddress(hex_str.to_string()));
}
Ok(Address::from_slice(&bytes))
}
/// Prompt the user to confirm overwriting an existing contract
fn overwrite_address(name: &str, address: &str) -> Result<bool> {
print!(
"Address collision: {} ({}) already exists in genesis. Overwrite? [y/N]: ",
name, address
);
io::stdout().flush()?;
let mut input = String::new();
io::stdin().read_line(&mut input)?;
let input = input.trim().to_lowercase();
Ok(input == "y" || input == "yes")
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/genesis-builder/src/lib.rs | crates/genesis-builder/src/lib.rs | //! Genesis builder for constructing genesis files with contracts
/// Artifact loader for fetching contract artifacts
pub mod artifact;
/// Genesis builder for constructing genesis files
pub mod builder;
/// Error types
pub mod error;
/// Genesis file handling
pub mod genesis;
/// Manifest file handling
pub mod manifest;
/// Common types
pub mod types;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/genesis-builder/src/artifact.rs | crates/genesis-builder/src/artifact.rs | use crate::{
error::{BuilderError, Result},
types::ContractArtifact,
};
use alloy_primitives::{hex, Bytes};
use serde_json::Value;
use std::time::Duration;
use tracing::info;
/// Default timeout for loading contract artifacts from remote
pub const DEFAULT_ARTIFACT_TIMEOUT: Duration = Duration::from_secs(30);
/// Client for loading contract artifacts
#[derive(Debug, Clone)]
pub struct ArtifactLoader {
/// HTTP client
client: reqwest::blocking::Client,
}
impl ArtifactLoader {
/// Create a new artifact loader with default settings
pub fn new() -> Result<Self> {
Self::with_timeout(DEFAULT_ARTIFACT_TIMEOUT)
}
/// Create a new artifact loader with custom timeout
pub fn with_timeout(timeout: Duration) -> Result<Self> {
let client =
reqwest::blocking::Client::builder().timeout(timeout).build().map_err(|e| {
BuilderError::RemoteFetchFailed("client initialization".to_string(), e.to_string())
})?;
Ok(Self { client })
}
/// Load contract artifact from a remote URL
pub fn load_artifact(&self, url: &str) -> Result<ContractArtifact> {
info!("Fetching {}", url);
let data = self.fetch_remote(url)?;
let json: Value = serde_json::from_slice(&data)?;
let deployed_bytecode = Self::extract_bytecode(&json, url)?;
let name = Self::extract_name(&json, url);
Ok(ContractArtifact { name, deployed_bytecode })
}
/// Fetch artifact from GitHub via HTTP
fn fetch_remote(&self, url: &str) -> Result<Vec<u8>> {
let response = self
.client
.get(url)
.send()
.map_err(|e| BuilderError::RemoteFetchFailed(url.to_string(), e.to_string()))?;
let data = response
.bytes()
.map_err(|e| BuilderError::RemoteFetchFailed(url.to_string(), e.to_string()))?
.to_vec();
Ok(data)
}
/// Extract bytecode from artifact JSON
fn extract_bytecode(json: &Value, url: &str) -> Result<Bytes> {
if let Some(deployed) = json.get("deployedBytecode") {
if let Some(hex) = deployed.get("object").and_then(|o| o.as_str()) {
return Self::parse_hex_bytecode(hex, url);
}
}
Err(BuilderError::MissingBytecode(url.to_string()))
}
/// Parse hex string into bytecode
fn parse_hex_bytecode(hex: &str, url: &str) -> Result<Bytes> {
hex::decode(hex)
.map(Bytes::from)
.map_err(|_| BuilderError::InvalidHex(format!("Invalid hex in bytecode: {}", url)))
}
/// Extract contract name from artifact JSON or URL
fn extract_name(json: &Value, url: &str) -> String {
if let Some(name) = json.get("contractName").and_then(|n| n.as_str()) {
return name.to_string();
}
// Fallback to filename from URL
url.split('/').last().and_then(|s| s.strip_suffix(".json")).unwrap_or("unknown").to_string()
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/genesis-builder/src/manifest.rs | crates/genesis-builder/src/manifest.rs | use crate::{
error::{BuilderError, Result},
types::Manifest,
};
use std::{fs, path::Path};
/// Load and parse the genesis contracts manifest from a TOML file
pub fn load_manifest(path: &Path) -> Result<Manifest> {
if !path.exists() {
return Err(BuilderError::ManifestNotFound(path.to_path_buf()));
}
let content = fs::read_to_string(path)?;
let manifest: Manifest = toml::from_str(&content)?;
validate_manifest(&manifest)?;
Ok(manifest)
}
/// Validate the addresses in the manifest
fn validate_addresses(manifest: &Manifest) -> Result<()> {
for (name, config) in &manifest.contracts {
if !config.address.starts_with("0x") {
return Err(BuilderError::InvalidAddress(format!(
"{}: address must start with 0x",
name
)));
}
}
Ok(())
}
/// Validate the manifest structure
fn validate_manifest(manifest: &Manifest) -> Result<()> {
// Check if contracts exist
if manifest.contracts.is_empty() {
return Err(BuilderError::NoContractsDefined);
}
validate_addresses(manifest)?;
Ok(())
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.