repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/ethereum/payload/src/validator.rs | crates/ethereum/payload/src/validator.rs | //! Validates execution payload wrt Ethereum consensus rules
use alloy_consensus::Block;
use alloy_rpc_types_engine::{ExecutionData, PayloadError};
use reth_chainspec::EthereumHardforks;
use reth_payload_validator::{cancun, prague, shanghai};
use reth_primitives_traits::{Block as _, SealedBlock, SignedTransaction};
use std::sync::Arc;
/// Execution payload validator.
#[derive(Clone, Debug)]
pub struct EthereumExecutionPayloadValidator<ChainSpec> {
/// Chain spec to validate against.
chain_spec: Arc<ChainSpec>,
}
impl<ChainSpec> EthereumExecutionPayloadValidator<ChainSpec> {
/// Create a new validator.
pub const fn new(chain_spec: Arc<ChainSpec>) -> Self {
Self { chain_spec }
}
/// Returns the chain spec used by the validator.
#[inline]
pub const fn chain_spec(&self) -> &Arc<ChainSpec> {
&self.chain_spec
}
}
impl<ChainSpec: EthereumHardforks> EthereumExecutionPayloadValidator<ChainSpec> {
/// Ensures that the given payload does not violate any consensus rules that concern the block's
/// layout,
///
/// See also [`ensure_well_formed_payload`]
pub fn ensure_well_formed_payload<T: SignedTransaction>(
&self,
payload: ExecutionData,
) -> Result<SealedBlock<Block<T>>, PayloadError> {
ensure_well_formed_payload(&self.chain_spec, payload)
}
}
/// Ensures that the given payload does not violate any consensus rules that concern the block's
/// layout, like:
/// - missing or invalid base fee
/// - invalid extra data
/// - invalid transactions
/// - incorrect hash
/// - the versioned hashes passed with the payload do not exactly match transaction versioned
/// hashes
/// - the block does not contain blob transactions if it is pre-cancun
///
/// The checks are done in the order that conforms with the engine-API specification.
///
/// This is intended to be invoked after receiving the payload from the CLI.
/// The additional [`MaybeCancunPayloadFields`](alloy_rpc_types_engine::MaybeCancunPayloadFields) are not part of the payload, but are additional fields in the `engine_newPayloadV3` RPC call, See also <https://github.com/ethereum/execution-apis/blob/fe8e13c288c592ec154ce25c534e26cb7ce0530d/src/engine/cancun.md#engine_newpayloadv3>
///
/// If the cancun fields are provided this also validates that the versioned hashes in the block
/// match the versioned hashes passed in the
/// [`CancunPayloadFields`](alloy_rpc_types_engine::CancunPayloadFields), if the cancun payload
/// fields are provided. If the payload fields are not provided, but versioned hashes exist
/// in the block, this is considered an error: [`PayloadError::InvalidVersionedHashes`].
///
/// This validates versioned hashes according to the Engine API Cancun spec:
/// <https://github.com/ethereum/execution-apis/blob/fe8e13c288c592ec154ce25c534e26cb7ce0530d/src/engine/cancun.md#specification>
pub fn ensure_well_formed_payload<ChainSpec, T>(
chain_spec: ChainSpec,
payload: ExecutionData,
) -> Result<SealedBlock<Block<T>>, PayloadError>
where
ChainSpec: EthereumHardforks,
T: SignedTransaction,
{
let ExecutionData { payload, sidecar } = payload;
let expected_hash = payload.block_hash();
// First parse the block
let sealed_block = payload.try_into_block_with_sidecar(&sidecar)?.seal_slow();
// Ensure the hash included in the payload matches the block hash
if expected_hash != sealed_block.hash() {
return Err(PayloadError::BlockHash {
execution: sealed_block.hash(),
consensus: expected_hash,
})
}
shanghai::ensure_well_formed_fields(
sealed_block.body(),
chain_spec.is_shanghai_active_at_timestamp(sealed_block.timestamp_seconds()),
)?;
cancun::ensure_well_formed_fields(
&sealed_block,
sidecar.cancun(),
chain_spec.is_cancun_active_at_timestamp(sealed_block.timestamp_seconds()),
)?;
prague::ensure_well_formed_fields(
sealed_block.body(),
sidecar.prague(),
chain_spec.is_prague_active_at_timestamp(sealed_block.timestamp_seconds()),
)?;
Ok(sealed_block)
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/docs/vocs/docs/snippets/sources/exex/remote/build.rs | docs/vocs/docs/snippets/sources/exex/remote/build.rs | fn main() -> Result<(), Box<dyn std::error::Error>> {
tonic_build::compile_protos("proto/exex.proto")?;
Ok(())
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/docs/vocs/docs/snippets/sources/exex/remote/src/lib.rs | docs/vocs/docs/snippets/sources/exex/remote/src/lib.rs | pub mod proto {
tonic::include_proto!("exex");
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/docs/vocs/docs/snippets/sources/exex/remote/src/exex_4.rs | docs/vocs/docs/snippets/sources/exex/remote/src/exex_4.rs | use futures_util::TryStreamExt;
use remote_exex::proto::{
self,
remote_ex_ex_server::{RemoteExEx, RemoteExExServer},
};
use reth::{builder::NodeTypes, primitives::EthPrimitives};
use reth_exex::{ExExContext, ExExEvent, ExExNotification};
use reth_node_api::FullNodeComponents;
use reth_node_ethereum::EthereumNode;
use reth_tracing::tracing::info;
use std::sync::Arc;
use tokio::sync::{broadcast, mpsc};
use tokio_stream::wrappers::ReceiverStream;
use tonic::{transport::Server, Request, Response, Status};
struct ExExService {
notifications: Arc<broadcast::Sender<ExExNotification>>,
}
#[tonic::async_trait]
impl RemoteExEx for ExExService {
type SubscribeStream = ReceiverStream<Result<proto::ExExNotification, Status>>;
async fn subscribe(
&self,
_request: Request<proto::SubscribeRequest>,
) -> Result<Response<Self::SubscribeStream>, Status> {
let (tx, rx) = mpsc::channel(1);
let mut notifications = self.notifications.subscribe();
tokio::spawn(async move {
while let Ok(notification) = notifications.recv().await {
let proto_notification = proto::ExExNotification {
data: bincode::serialize(¬ification).expect("failed to serialize"),
};
tx.send(Ok(proto_notification))
.await
.expect("failed to send notification to client");
info!("Notification sent to the gRPC client");
}
});
Ok(Response::new(ReceiverStream::new(rx)))
}
}
// ANCHOR: snippet
#[expect(dead_code)]
async fn remote_exex<Node: FullNodeComponents<Types: NodeTypes<Primitives = EthPrimitives>>>(
mut ctx: ExExContext<Node>,
notifications: Arc<broadcast::Sender<ExExNotification>>,
) -> eyre::Result<()> {
while let Some(notification) = ctx.notifications.try_next().await? {
if let Some(committed_chain) = notification.committed_chain() {
ctx.events.send(ExExEvent::FinishedHeight(committed_chain.tip().num_hash()))?;
}
info!("Notification sent to the gRPC server");
let _ = notifications.send(notification);
}
Ok(())
}
// ANCHOR_END: snippet
fn main() -> eyre::Result<()> {
reth::cli::Cli::parse_args().run(|builder, _| async move {
let notifications = Arc::new(broadcast::channel(1).0);
let server = Server::builder()
.add_service(RemoteExExServer::new(ExExService {
notifications: notifications.clone(),
}))
.serve("[::1]:10000".parse().unwrap());
let handle = builder.node(EthereumNode::default()).launch().await?;
handle.node.task_executor.spawn_critical("gRPC server", async move {
server.await.expect("failed to start gRPC server")
});
handle.wait_for_node_exit().await
})
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/docs/vocs/docs/snippets/sources/exex/remote/src/exex_1.rs | docs/vocs/docs/snippets/sources/exex/remote/src/exex_1.rs | use remote_exex::proto::{
self,
remote_ex_ex_server::{RemoteExEx, RemoteExExServer},
};
use reth_node_ethereum::EthereumNode;
use tokio::sync::mpsc;
use tokio_stream::wrappers::ReceiverStream;
use tonic::{transport::Server, Request, Response, Status};
struct ExExService {}
#[tonic::async_trait]
impl RemoteExEx for ExExService {
type SubscribeStream = ReceiverStream<Result<proto::ExExNotification, Status>>;
async fn subscribe(
&self,
_request: Request<proto::SubscribeRequest>,
) -> Result<Response<Self::SubscribeStream>, Status> {
let (_tx, rx) = mpsc::channel(1);
Ok(Response::new(ReceiverStream::new(rx)))
}
}
fn main() -> eyre::Result<()> {
reth::cli::Cli::parse_args().run(|builder, _| async move {
let server = Server::builder()
.add_service(RemoteExExServer::new(ExExService {}))
.serve("[::1]:10000".parse().unwrap());
let handle = builder.node(EthereumNode::default()).launch().await?;
handle.node.task_executor.spawn_critical("gRPC server", async move {
server.await.expect("failed to start gRPC server")
});
handle.wait_for_node_exit().await
})
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/docs/vocs/docs/snippets/sources/exex/remote/src/exex_2.rs | docs/vocs/docs/snippets/sources/exex/remote/src/exex_2.rs | use remote_exex::proto::{
self,
remote_ex_ex_server::{RemoteExEx, RemoteExExServer},
};
use reth_exex::ExExNotification;
use reth_node_ethereum::EthereumNode;
use std::sync::Arc;
use tokio::sync::{broadcast, mpsc};
use tokio_stream::wrappers::ReceiverStream;
use tonic::{transport::Server, Request, Response, Status};
#[expect(dead_code)]
struct ExExService {
notifications: Arc<broadcast::Sender<ExExNotification>>,
}
#[tonic::async_trait]
impl RemoteExEx for ExExService {
type SubscribeStream = ReceiverStream<Result<proto::ExExNotification, Status>>;
async fn subscribe(
&self,
_request: Request<proto::SubscribeRequest>,
) -> Result<Response<Self::SubscribeStream>, Status> {
let (_tx, rx) = mpsc::channel(1);
Ok(Response::new(ReceiverStream::new(rx)))
}
}
fn main() -> eyre::Result<()> {
reth::cli::Cli::parse_args().run(|builder, _| async move {
let notifications = Arc::new(broadcast::channel(1).0);
let server = Server::builder()
.add_service(RemoteExExServer::new(ExExService {
notifications: notifications.clone(),
}))
.serve("[::1]:10000".parse().unwrap());
let handle = builder.node(EthereumNode::default()).launch().await?;
handle.node.task_executor.spawn_critical("gRPC server", async move {
server.await.expect("failed to start gRPC server")
});
handle.wait_for_node_exit().await
})
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/docs/vocs/docs/snippets/sources/exex/remote/src/consumer.rs | docs/vocs/docs/snippets/sources/exex/remote/src/consumer.rs | use remote_exex::proto::{remote_ex_ex_client::RemoteExExClient, SubscribeRequest};
use reth_exex::ExExNotification;
use reth_tracing::{tracing::info, RethTracer, Tracer};
#[tokio::main]
async fn main() -> eyre::Result<()> {
let _ = RethTracer::new().init()?;
let mut client = RemoteExExClient::connect("http://[::1]:10000")
.await?
.max_encoding_message_size(usize::MAX)
.max_decoding_message_size(usize::MAX);
let mut stream = client.subscribe(SubscribeRequest {}).await?.into_inner();
while let Some(notification) = stream.message().await? {
let notification: ExExNotification = bincode::deserialize(¬ification.data)?;
match notification {
ExExNotification::ChainCommitted { new } => {
info!(committed_chain = ?new.range(), "Received commit");
}
ExExNotification::ChainReorged { old, new } => {
info!(from_chain = ?old.range(), to_chain = ?new.range(), "Received reorg");
}
ExExNotification::ChainReverted { old } => {
info!(reverted_chain = ?old.range(), "Received revert");
}
};
}
Ok(())
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/docs/vocs/docs/snippets/sources/exex/remote/src/exex.rs | docs/vocs/docs/snippets/sources/exex/remote/src/exex.rs | use futures_util::TryStreamExt;
use remote_exex::proto::{
self,
remote_ex_ex_server::{RemoteExEx, RemoteExExServer},
};
use reth::{builder::NodeTypes, primitives::EthPrimitives};
use reth_exex::{ExExContext, ExExEvent, ExExNotification};
use reth_node_api::FullNodeComponents;
use reth_node_ethereum::EthereumNode;
use reth_tracing::tracing::info;
use std::sync::Arc;
use tokio::sync::{broadcast, mpsc};
use tokio_stream::wrappers::ReceiverStream;
use tonic::{transport::Server, Request, Response, Status};
struct ExExService {
notifications: Arc<broadcast::Sender<ExExNotification>>,
}
#[tonic::async_trait]
impl RemoteExEx for ExExService {
type SubscribeStream = ReceiverStream<Result<proto::ExExNotification, Status>>;
async fn subscribe(
&self,
_request: Request<proto::SubscribeRequest>,
) -> Result<Response<Self::SubscribeStream>, Status> {
let (tx, rx) = mpsc::channel(1);
let mut notifications = self.notifications.subscribe();
tokio::spawn(async move {
while let Ok(notification) = notifications.recv().await {
let proto_notification = proto::ExExNotification {
data: bincode::serialize(¬ification).expect("failed to serialize"),
};
tx.send(Ok(proto_notification))
.await
.expect("failed to send notification to client");
info!("Notification sent to the gRPC client");
}
});
Ok(Response::new(ReceiverStream::new(rx)))
}
}
async fn remote_exex<Node: FullNodeComponents<Types: NodeTypes<Primitives = EthPrimitives>>>(
mut ctx: ExExContext<Node>,
notifications: Arc<broadcast::Sender<ExExNotification>>,
) -> eyre::Result<()> {
while let Some(notification) = ctx.notifications.try_next().await? {
if let Some(committed_chain) = notification.committed_chain() {
ctx.events.send(ExExEvent::FinishedHeight(committed_chain.tip().num_hash()))?;
}
info!("Notification sent to the gRPC server");
let _ = notifications.send(notification);
}
Ok(())
}
// ANCHOR: snippet
fn main() -> eyre::Result<()> {
reth::cli::Cli::parse_args().run(|builder, _| async move {
let notifications = Arc::new(broadcast::channel(1).0);
let server = Server::builder()
.add_service(RemoteExExServer::new(ExExService {
notifications: notifications.clone(),
}))
.serve("[::1]:10000".parse().unwrap());
let handle = builder
.node(EthereumNode::default())
.install_exex("remote-exex", |ctx| async move { Ok(remote_exex(ctx, notifications)) })
.launch()
.await?;
handle.node.task_executor.spawn_critical("gRPC server", async move {
server.await.expect("failed to start gRPC server")
});
handle.wait_for_node_exit().await
})
}
// ANCHOR_END: snippet
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/docs/vocs/docs/snippets/sources/exex/remote/src/exex_3.rs | docs/vocs/docs/snippets/sources/exex/remote/src/exex_3.rs | use remote_exex::proto::{
self,
remote_ex_ex_server::{RemoteExEx, RemoteExExServer},
};
use reth_exex::ExExNotification;
use reth_node_ethereum::EthereumNode;
use reth_tracing::tracing::info;
use std::sync::Arc;
use tokio::sync::{broadcast, mpsc};
use tokio_stream::wrappers::ReceiverStream;
use tonic::{transport::Server, Request, Response, Status};
struct ExExService {
notifications: Arc<broadcast::Sender<ExExNotification>>,
}
// ANCHOR: snippet
#[tonic::async_trait]
impl RemoteExEx for ExExService {
type SubscribeStream = ReceiverStream<Result<proto::ExExNotification, Status>>;
async fn subscribe(
&self,
_request: Request<proto::SubscribeRequest>,
) -> Result<Response<Self::SubscribeStream>, Status> {
let (tx, rx) = mpsc::channel(1);
let mut notifications = self.notifications.subscribe();
tokio::spawn(async move {
while let Ok(notification) = notifications.recv().await {
let proto_notification = proto::ExExNotification {
data: bincode::serialize(¬ification).expect("failed to serialize"),
};
tx.send(Ok(proto_notification))
.await
.expect("failed to send notification to client");
info!("Notification sent to the gRPC client");
}
});
Ok(Response::new(ReceiverStream::new(rx)))
}
}
// ANCHOR_END: snippet
fn main() -> eyre::Result<()> {
reth::cli::Cli::parse_args().run(|builder, _| async move {
let notifications = Arc::new(broadcast::channel(1).0);
let server = Server::builder()
.add_service(RemoteExExServer::new(ExExService {
notifications: notifications.clone(),
}))
.serve("[::1]:10000".parse().unwrap());
let handle = builder.node(EthereumNode::default()).launch().await?;
handle.node.task_executor.spawn_critical("gRPC server", async move {
server.await.expect("failed to start gRPC server")
});
handle.wait_for_node_exit().await
})
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/docs/vocs/docs/snippets/sources/exex/tracking-state/src/bin/1.rs | docs/vocs/docs/snippets/sources/exex/tracking-state/src/bin/1.rs | use std::{
future::Future,
pin::Pin,
task::{ready, Context, Poll},
};
use futures_util::{FutureExt, TryStreamExt};
use reth::{api::FullNodeComponents, builder::NodeTypes, primitives::EthPrimitives};
use reth_exex::{ExExContext, ExExEvent, ExExNotification};
use reth_node_ethereum::EthereumNode;
use reth_tracing::tracing::info;
struct MyExEx<Node: FullNodeComponents> {
ctx: ExExContext<Node>,
}
impl<Node: FullNodeComponents<Types: NodeTypes<Primitives = EthPrimitives>>> Future
for MyExEx<Node>
{
type Output = eyre::Result<()>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.get_mut();
while let Some(notification) = ready!(this.ctx.notifications.try_next().poll_unpin(cx))? {
match ¬ification {
ExExNotification::ChainCommitted { new } => {
info!(committed_chain = ?new.range(), "Received commit");
}
ExExNotification::ChainReorged { old, new } => {
info!(from_chain = ?old.range(), to_chain = ?new.range(), "Received reorg");
}
ExExNotification::ChainReverted { old } => {
info!(reverted_chain = ?old.range(), "Received revert");
}
};
if let Some(committed_chain) = notification.committed_chain() {
this.ctx
.events
.send(ExExEvent::FinishedHeight(committed_chain.tip().num_hash()))?;
}
}
Poll::Ready(Ok(()))
}
}
fn main() -> eyre::Result<()> {
reth::cli::Cli::parse_args().run(async move |builder, _| {
let handle = builder
.node(EthereumNode::default())
.install_exex("my-exex", async move |ctx| Ok(MyExEx { ctx }))
.launch()
.await?;
handle.wait_for_node_exit().await
})
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/docs/vocs/docs/snippets/sources/exex/tracking-state/src/bin/2.rs | docs/vocs/docs/snippets/sources/exex/tracking-state/src/bin/2.rs | use std::{
future::Future,
pin::Pin,
task::{ready, Context, Poll},
};
use alloy_primitives::BlockNumber;
use futures_util::{FutureExt, TryStreamExt};
use reth::{
api::{BlockBody, FullNodeComponents},
builder::NodeTypes,
primitives::EthPrimitives,
};
use reth_exex::{ExExContext, ExExEvent};
use reth_node_ethereum::EthereumNode;
use reth_tracing::tracing::info;
struct MyExEx<Node: FullNodeComponents> {
ctx: ExExContext<Node>,
/// First block that was committed since the start of the ExEx.
first_block: Option<BlockNumber>,
/// Total number of transactions committed.
transactions: u64,
}
impl<Node: FullNodeComponents> MyExEx<Node> {
fn new(ctx: ExExContext<Node>) -> Self {
Self { ctx, first_block: None, transactions: 0 }
}
}
impl<Node: FullNodeComponents<Types: NodeTypes<Primitives = EthPrimitives>>> Future
for MyExEx<Node>
{
type Output = eyre::Result<()>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.get_mut();
while let Some(notification) = ready!(this.ctx.notifications.try_next().poll_unpin(cx))? {
if let Some(reverted_chain) = notification.reverted_chain() {
this.transactions = this.transactions.saturating_sub(
reverted_chain.blocks_iter().map(|b| b.body().transaction_count() as u64).sum(),
);
}
if let Some(committed_chain) = notification.committed_chain() {
this.first_block.get_or_insert(committed_chain.first().number);
this.transactions += committed_chain
.blocks_iter()
.map(|b| b.body().transaction_count() as u64)
.sum::<u64>();
this.ctx
.events
.send(ExExEvent::FinishedHeight(committed_chain.tip().num_hash()))?;
}
if let Some(first_block) = this.first_block {
info!(%first_block, transactions = %this.transactions, "Total number of transactions");
}
}
Poll::Ready(Ok(()))
}
}
fn main() -> eyre::Result<()> {
reth::cli::Cli::parse_args().run(async move |builder, _| {
let handle = builder
.node(EthereumNode::default())
.install_exex("my-exex", async move |ctx| Ok(MyExEx::new(ctx)))
.launch()
.await?;
handle.wait_for_node_exit().await
})
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/docs/vocs/docs/snippets/sources/exex/hello-world/src/bin/1.rs | docs/vocs/docs/snippets/sources/exex/hello-world/src/bin/1.rs | use reth_node_ethereum::EthereumNode;
fn main() -> eyre::Result<()> {
reth::cli::Cli::parse_args().run(async move |builder, _| {
let handle = builder.node(EthereumNode::default()).launch().await?;
handle.wait_for_node_exit().await
})
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/docs/vocs/docs/snippets/sources/exex/hello-world/src/bin/2.rs | docs/vocs/docs/snippets/sources/exex/hello-world/src/bin/2.rs | use reth::api::FullNodeComponents;
use reth_exex::ExExContext;
use reth_node_ethereum::EthereumNode;
async fn my_exex<Node: FullNodeComponents>(mut _ctx: ExExContext<Node>) -> eyre::Result<()> {
#[expect(clippy::empty_loop)]
loop {}
}
fn main() -> eyre::Result<()> {
reth::cli::Cli::parse_args().run(async move |builder, _| {
let handle = builder
.node(EthereumNode::default())
.install_exex("my-exex", async move |ctx| Ok(my_exex(ctx)))
.launch()
.await?;
handle.wait_for_node_exit().await
})
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/docs/vocs/docs/snippets/sources/exex/hello-world/src/bin/3.rs | docs/vocs/docs/snippets/sources/exex/hello-world/src/bin/3.rs | use futures_util::TryStreamExt;
use reth::{api::FullNodeComponents, builder::NodeTypes, primitives::EthPrimitives};
use reth_exex::{ExExContext, ExExEvent, ExExNotification};
use reth_node_ethereum::EthereumNode;
use reth_tracing::tracing::info;
async fn my_exex<Node: FullNodeComponents<Types: NodeTypes<Primitives = EthPrimitives>>>(
mut ctx: ExExContext<Node>,
) -> eyre::Result<()> {
while let Some(notification) = ctx.notifications.try_next().await? {
match ¬ification {
ExExNotification::ChainCommitted { new } => {
info!(committed_chain = ?new.range(), "Received commit");
}
ExExNotification::ChainReorged { old, new } => {
info!(from_chain = ?old.range(), to_chain = ?new.range(), "Received reorg");
}
ExExNotification::ChainReverted { old } => {
info!(reverted_chain = ?old.range(), "Received revert");
}
};
if let Some(committed_chain) = notification.committed_chain() {
ctx.events.send(ExExEvent::FinishedHeight(committed_chain.tip().num_hash()))?;
}
}
Ok(())
}
fn main() -> eyre::Result<()> {
reth::cli::Cli::parse_args().run(async move |builder, _| {
let handle = builder
.node(EthereumNode::default())
.install_exex("my-exex", async move |ctx| Ok(my_exex(ctx)))
.launch()
.await?;
handle.wait_for_node_exit().await
})
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/docs/cli/help.rs | docs/cli/help.rs | #!/usr/bin/env -S cargo +nightly -Zscript
---
[package]
edition = "2021"
[dependencies]
clap = { version = "4", features = ["derive"] }
regex = "1"
---
use clap::Parser;
use regex::Regex;
use std::{
borrow::Cow,
fmt, fs, io,
iter::once,
path::{Path, PathBuf},
process::{Command, Stdio},
str,
sync::LazyLock,
};
const README: &str = r#"import Summary from './SUMMARY.mdx';
# CLI Reference
Automatically-generated CLI reference from `--help` output.
<Summary />
"#;
const TRIM_LINE_END_MARKDOWN: bool = true;
/// Lazy static regex to avoid recompiling the same regex pattern multiple times.
macro_rules! regex {
($re:expr) => {{
static RE: LazyLock<Regex> =
LazyLock::new(|| Regex::new($re).expect("Failed to compile regex pattern"));
&*RE
}};
}
/// Generate markdown files from help output of commands
#[derive(Parser, Debug)]
#[command(about, long_about = None)]
struct Args {
/// Root directory
#[arg(long, default_value_t = String::from("."))]
root_dir: String,
/// Indentation for the root SUMMARY.mdx file
#[arg(long, default_value_t = 2)]
root_indentation: usize,
/// Output directory
#[arg(long)]
out_dir: PathBuf,
/// Whether to add a README.md file
#[arg(long)]
readme: bool,
/// Whether to update the root SUMMARY.mdx file
#[arg(long)]
root_summary: bool,
/// Print verbose output
#[arg(short, long)]
verbose: bool,
/// Commands to generate markdown for.
#[arg(required = true, num_args = 1..)]
commands: Vec<PathBuf>,
}
fn write_file(file_path: &Path, content: &str) -> io::Result<()> {
let content = if TRIM_LINE_END_MARKDOWN {
content.lines().map(|line| line.trim_end()).collect::<Vec<_>>().join("\n")
} else {
content.to_string()
};
fs::write(file_path, content)
}
fn main() -> io::Result<()> {
let args = Args::parse();
debug_assert!(args.commands.len() >= 1);
let out_dir = args.out_dir;
fs::create_dir_all(&out_dir)?;
let mut todo_iter: Vec<Cmd> = args
.commands
.iter()
.rev() // reverse to keep the order (pop)
.map(Cmd::new)
.collect();
let mut output = Vec::new();
// Iterate over all commands and their subcommands.
while let Some(cmd) = todo_iter.pop() {
let (new_subcmds, stdout) = get_entry(&cmd)?;
if args.verbose && !new_subcmds.is_empty() {
println!("Found subcommands for \"{}\": {:?}", cmd.command_name(), new_subcmds);
}
// Add new subcommands to todo_iter (so that they are processed in the correct order).
for subcmd in new_subcmds.into_iter().rev() {
let new_subcmds: Vec<_> = cmd.subcommands.iter().cloned().chain(once(subcmd)).collect();
todo_iter.push(Cmd { cmd: cmd.cmd, subcommands: new_subcmds });
}
output.push((cmd, stdout));
}
// Generate markdown files.
for (cmd, stdout) in &output {
cmd_markdown(&out_dir, cmd, stdout)?;
}
// Generate SUMMARY.mdx.
let summary: String =
output.iter().map(|(cmd, _)| cmd_summary(cmd, 0)).chain(once("\n".to_string())).collect();
println!("Writing SUMMARY.mdx to \"{}\"", out_dir.to_string_lossy());
write_file(&out_dir.join("SUMMARY.mdx"), &summary)?;
// Generate README.md.
if args.readme {
let path = &out_dir.join("README.mdx");
if args.verbose {
println!("Writing README.mdx to \"{}\"", path.to_string_lossy());
}
write_file(path, README)?;
}
// Generate root SUMMARY.mdx.
if args.root_summary {
let root_summary: String =
output.iter().map(|(cmd, _)| cmd_summary(cmd, args.root_indentation)).collect();
let path = Path::new(args.root_dir.as_str());
if args.verbose {
println!("Updating root summary in \"{}\"", path.to_string_lossy());
}
// TODO: This is where we update the cli reference sidebar.ts
update_root_summary(path, &root_summary)?;
}
Ok(())
}
/// Returns the subcommands and help output for a command.
fn get_entry(cmd: &Cmd) -> io::Result<(Vec<String>, String)> {
let output = Command::new(cmd.cmd)
.args(&cmd.subcommands)
.arg("--help")
.env("NO_COLOR", "1")
.env("COLUMNS", "100")
.env("LINES", "10000")
.stdout(Stdio::piped())
.output()?;
if !output.status.success() {
let stderr = str::from_utf8(&output.stderr).unwrap_or("Failed to parse stderr as UTF-8");
return Err(io::Error::new(
io::ErrorKind::Other,
format!("Command \"{}\" failed:\n{}", cmd, stderr),
));
}
let stdout = str::from_utf8(&output.stdout)
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?
.to_string();
// Parse subcommands from the help output
let subcmds = parse_sub_commands(&stdout);
Ok((subcmds, stdout))
}
/// Returns a list of subcommands from the help output of a command.
fn parse_sub_commands(s: &str) -> Vec<String> {
// This regex matches lines starting with two spaces, followed by the subcommand name.
let re = regex!(r"^ (\S+)");
s.split("Commands:")
.nth(1) // Get the part after "Commands:"
.map(|commands_section| {
commands_section
.lines()
.take_while(|line| !line.starts_with("Options:") && !line.starts_with("Arguments:"))
.filter_map(|line| {
re.captures(line).and_then(|cap| cap.get(1).map(|m| m.as_str().to_string()))
})
.filter(|cmd| cmd != "help")
.map(String::from)
.collect()
})
.unwrap_or_default() // Return an empty Vec if "Commands:" was not found
}
/// Writes the markdown for a command to out_dir.
fn cmd_markdown(out_dir: &Path, cmd: &Cmd, stdout: &str) -> io::Result<()> {
let out = format!("# {}\n\n{}", cmd, help_markdown(cmd, stdout));
let out_path = out_dir.join(cmd.to_string().replace(" ", "/"));
fs::create_dir_all(out_path.parent().unwrap())?;
write_file(&out_path.with_extension("mdx"), &out)?;
Ok(())
}
/// Returns the markdown for a command's help output.
fn help_markdown(cmd: &Cmd, stdout: &str) -> String {
let (description, s) = parse_description(stdout);
format!(
"{}\n\n```bash\n$ {} --help\n```\n```txt\n{}\n```",
description,
cmd,
preprocess_help(s.trim())
)
}
/// Splits the help output into a description and the rest.
fn parse_description(s: &str) -> (&str, &str) {
match s.find("Usage:") {
Some(idx) => {
let description = s[..idx].trim().lines().next().unwrap_or("");
(description, &s[idx..])
}
None => ("", s),
}
}
/// Returns the summary for a command and its subcommands.
fn cmd_summary(cmd: &Cmd, indent: usize) -> String {
let cmd_s = cmd.to_string();
let cmd_path = cmd_s.replace(" ", "/");
let indent_string = " ".repeat(indent + (cmd.subcommands.len() * 2));
format!("{}- [`{}`](/cli/{})\n", indent_string, cmd_s, cmd_path)
}
/// Overwrites the root SUMMARY.mdx file with the generated content.
fn update_root_summary(root_dir: &Path, root_summary: &str) -> io::Result<()> {
let summary_file = root_dir.join("vocs/docs/pages/cli/SUMMARY.mdx");
println!("Overwriting {}", summary_file.display());
// Simply write the root summary content to the file
write_file(&summary_file, root_summary)
}
/// Preprocesses the help output of a command.
fn preprocess_help(s: &str) -> Cow<'_, str> {
static REPLACEMENTS: LazyLock<Vec<(Regex, &str)>> = LazyLock::new(|| {
let patterns: &[(&str, &str)] = &[
// Remove the user-specific paths.
(r"default: /.*/reth", "default: <CACHE_DIR>"),
// Remove the commit SHA and target architecture triple or fourth
// rustup available targets:
// aarch64-apple-darwin
// x86_64-unknown-linux-gnu
// x86_64-pc-windows-gnu
(
r"default: reth/.*-[0-9A-Fa-f]{6,10}/([_\w]+)-(\w+)-(\w+)(-\w+)?",
"default: reth/<VERSION>-<SHA>/<ARCH>",
),
// Remove the OS
(r"default: reth/.*/\w+", "default: reth/<VERSION>/<OS>"),
// Remove rpc.max-tracing-requests default value
(
r"(rpc.max-tracing-requests <COUNT>\n.*\n.*\n.*\n.*\n.*)\[default: \d+\]",
r"$1[default: <NUM CPU CORES-2>]",
),
// Handle engine.max-proof-task-concurrency dynamic default
(
r"(engine\.max-proof-task-concurrency.*)\[default: \d+\]",
r"$1[default: <DYNAMIC: CPU cores * 8>]",
),
// Handle engine.reserved-cpu-cores dynamic default
(
r"(engine\.reserved-cpu-cores.*)\[default: \d+\]",
r"$1[default: <DYNAMIC: min(2, CPU cores)>]",
),
];
patterns
.iter()
.map(|&(re, replace_with)| (Regex::new(re).expect(re), replace_with))
.collect()
});
let mut s = Cow::Borrowed(s);
for (re, replacement) in REPLACEMENTS.iter() {
if let Cow::Owned(result) = re.replace_all(&s, *replacement) {
s = Cow::Owned(result);
}
}
s
}
#[derive(Hash, Debug, PartialEq, Eq)]
struct Cmd<'a> {
/// path to binary (e.g. ./target/debug/reth)
cmd: &'a Path,
/// subcommands (e.g. [db, stats])
subcommands: Vec<String>,
}
impl<'a> Cmd<'a> {
fn command_name(&self) -> &str {
self.cmd.file_name().and_then(|os_str| os_str.to_str()).expect("Expect valid command")
}
fn new(cmd: &'a PathBuf) -> Self {
Self { cmd, subcommands: Vec::new() }
}
}
impl<'a> fmt::Display for Cmd<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.command_name())?;
if !self.subcommands.is_empty() {
write!(f, " {}", self.subcommands.join(" "))?;
}
Ok(())
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/custom-node-components/src/main.rs | examples/custom-node-components/src/main.rs | //! This example shows how to configure custom components for a reth node.
#![warn(unused_crate_dependencies)]
use reth_ethereum::{
chainspec::ChainSpec,
cli::interface::Cli,
node::{
api::{FullNodeTypes, NodeTypes},
builder::{components::PoolBuilder, BuilderContext},
node::EthereumAddOns,
EthereumNode,
},
pool::{
blobstore::InMemoryBlobStore, EthTransactionPool, PoolConfig,
TransactionValidationTaskExecutor,
},
provider::CanonStateSubscriptions,
EthPrimitives,
};
use reth_tracing::tracing::{debug, info};
fn main() {
Cli::parse_args()
.run(|builder, _| async move {
let handle = builder
// use the default ethereum node types
.with_types::<EthereumNode>()
// Configure the components of the node
// use default ethereum components but use our custom pool
.with_components(EthereumNode::components().pool(CustomPoolBuilder::default()))
.with_add_ons(EthereumAddOns::default())
.launch()
.await?;
handle.wait_for_node_exit().await
})
.unwrap();
}
/// A custom pool builder
#[derive(Debug, Clone, Default)]
#[non_exhaustive]
pub struct CustomPoolBuilder {
/// Use custom pool config
pool_config: PoolConfig,
}
/// Implement the [`PoolBuilder`] trait for the custom pool builder
///
/// This will be used to build the transaction pool and its maintenance tasks during launch.
impl<Node> PoolBuilder<Node> for CustomPoolBuilder
where
Node: FullNodeTypes<Types: NodeTypes<ChainSpec = ChainSpec, Primitives = EthPrimitives>>,
{
type Pool = EthTransactionPool<Node::Provider, InMemoryBlobStore>;
async fn build_pool(self, ctx: &BuilderContext<Node>) -> eyre::Result<Self::Pool> {
let data_dir = ctx.config().datadir();
let blob_store = InMemoryBlobStore::default();
let validator = TransactionValidationTaskExecutor::eth_builder(ctx.provider().clone())
.with_head_timestamp(ctx.head().timestamp)
.kzg_settings(ctx.kzg_settings()?)
.with_additional_tasks(ctx.config().txpool.additional_validation_tasks)
.build_with_tasks(ctx.task_executor().clone(), blob_store.clone());
let transaction_pool =
reth_ethereum::pool::Pool::eth_pool(validator, blob_store, self.pool_config);
info!(target: "reth::cli", "Transaction pool initialized");
let transactions_path = data_dir.txpool_transactions();
// spawn txpool maintenance task
{
let pool = transaction_pool.clone();
let chain_events = ctx.provider().canonical_state_stream();
let client = ctx.provider().clone();
let transactions_backup_config =
reth_ethereum::pool::maintain::LocalTransactionBackupConfig::with_local_txs_backup(
transactions_path,
);
ctx.task_executor().spawn_critical_with_graceful_shutdown_signal(
"local transactions backup task",
|shutdown| {
reth_ethereum::pool::maintain::backup_local_transactions_task(
shutdown,
pool.clone(),
transactions_backup_config,
)
},
);
// spawn the maintenance task
ctx.task_executor().spawn_critical(
"txpool maintenance task",
reth_ethereum::pool::maintain::maintain_transaction_pool_future(
client,
pool,
chain_events,
ctx.task_executor().clone(),
reth_ethereum::pool::maintain::MaintainPoolConfig {
max_tx_lifetime: transaction_pool.config().max_queued_lifetime,
..Default::default()
},
),
);
debug!(target: "reth::cli", "Spawned txpool maintenance task");
}
Ok(transaction_pool)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/node-builder-api/src/main.rs | examples/node-builder-api/src/main.rs | //! This example showcases various Nodebuilder use cases
use reth_ethereum::{
cli::interface::Cli,
node::{builder::components::NoopNetworkBuilder, node::EthereumAddOns, EthereumNode},
};
/// Maps the ethereum node's network component to the noop implementation.
///
/// This installs the [`NoopNetworkBuilder`] that does not launch a real network.
pub fn noop_network() {
Cli::parse_args()
.run(|builder, _| async move {
let handle = builder
// use the default ethereum node types
.with_types::<EthereumNode>()
// Configure the components of the node
// use default ethereum components but use the Noop network that does nothing but
.with_components(EthereumNode::components().network(NoopNetworkBuilder::eth()))
.with_add_ons(EthereumAddOns::default())
.launch()
.await?;
handle.wait_for_node_exit().await
})
.unwrap();
}
fn main() {}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/full-contract-state/src/main.rs | examples/full-contract-state/src/main.rs | //! Example demonstrating how to extract the full state of a specific contract from the reth
//! database.
//!
//! This example shows how to:
//! 1. Connect to a reth database
//! 2. Get basic account information (balance, nonce, code hash)
//! 3. Get contract bytecode
//! 4. Iterate through all storage slots for the contract
use reth_ethereum::{
chainspec::ChainSpecBuilder,
evm::revm::primitives::{Address, B256, U256},
node::EthereumNode,
primitives::{Account, Bytecode},
provider::{
db::{
cursor::{DbCursorRO, DbDupCursorRO},
tables,
transaction::DbTx,
},
providers::ReadOnlyConfig,
ProviderResult,
},
storage::{DBProvider, StateProvider},
};
use std::{collections::HashMap, str::FromStr};
/// Represents the complete state of a contract including account info, bytecode, and storage
#[derive(Debug, Clone)]
pub struct ContractState {
/// The address of the contract
pub address: Address,
/// Basic account information (balance, nonce, code hash)
pub account: Account,
/// Contract bytecode (None if not a contract or doesn't exist)
pub bytecode: Option<Bytecode>,
/// All storage slots for the contract
pub storage: HashMap<B256, U256>,
}
/// Extract the full state of a specific contract
pub fn extract_contract_state<P: DBProvider>(
provider: &P,
state_provider: &dyn StateProvider,
contract_address: Address,
) -> ProviderResult<Option<ContractState>> {
let account = state_provider.basic_account(&contract_address)?;
let Some(account) = account else {
return Ok(None);
};
let bytecode = state_provider.account_code(&contract_address)?;
let mut storage_cursor = provider.tx_ref().cursor_dup_read::<tables::PlainStorageState>()?;
let mut storage = HashMap::new();
if let Some((_, first_entry)) = storage_cursor.seek_exact(contract_address)? {
storage.insert(first_entry.key, first_entry.value);
while let Some((_, entry)) = storage_cursor.next_dup()? {
storage.insert(entry.key, entry.value);
}
}
Ok(Some(ContractState { address: contract_address, account, bytecode, storage }))
}
fn main() -> eyre::Result<()> {
let address = std::env::var("CONTRACT_ADDRESS")?;
let contract_address = Address::from_str(&address)?;
let datadir = std::env::var("RETH_DATADIR")?;
let spec = ChainSpecBuilder::mainnet().build();
let factory = EthereumNode::provider_factory_builder()
.open_read_only(spec.into(), ReadOnlyConfig::from_datadir(datadir))?;
let provider = factory.provider()?;
let state_provider = factory.latest()?;
let contract_state =
extract_contract_state(&provider, state_provider.as_ref(), contract_address)?;
if let Some(state) = contract_state {
println!("Contract: {}", state.address);
println!("Balance: {}", state.account.balance);
println!("Nonce: {}", state.account.nonce);
println!("Code hash: {:?}", state.account.bytecode_hash);
println!("Storage slots: {}", state.storage.len());
for (key, value) in &state.storage {
println!("\t{key}: {value}");
}
}
Ok(())
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/polygon-p2p/src/chain_cfg.rs | examples/polygon-p2p/src/chain_cfg.rs | use alloy_genesis::Genesis;
use reth_discv4::NodeRecord;
use reth_ethereum::chainspec::{ChainSpec, Head};
use std::sync::Arc;
const SHANGHAI_BLOCK: u64 = 50523000;
pub(crate) fn polygon_chain_spec() -> Arc<ChainSpec> {
let genesis: Genesis =
serde_json::from_str(include_str!("./genesis.json")).expect("deserialize genesis");
Arc::new(genesis.into())
}
/// Polygon mainnet boot nodes <https://github.com/maticnetwork/bor/blob/master/params/bootnodes.go#L79>
static BOOTNODES: [&str; 4] = [
"enode://b8f1cc9c5d4403703fbf377116469667d2b1823c0daf16b7250aa576bacf399e42c3930ccfcb02c5df6879565a2b8931335565f0e8d3f8e72385ecf4a4bf160a@3.36.224.80:30303",
"enode://8729e0c825f3d9cad382555f3e46dcff21af323e89025a0e6312df541f4a9e73abfa562d64906f5e59c51fe6f0501b3e61b07979606c56329c020ed739910759@54.194.245.5:30303",
"enode://76316d1cb93c8ed407d3332d595233401250d48f8fbb1d9c65bd18c0495eca1b43ec38ee0ea1c257c0abb7d1f25d649d359cdfe5a805842159cfe36c5f66b7e8@52.78.36.216:30303",
"enode://681ebac58d8dd2d8a6eef15329dfbad0ab960561524cf2dfde40ad646736fe5c244020f20b87e7c1520820bc625cfb487dd71d63a3a3bf0baea2dbb8ec7c79f1@34.240.245.39:30303",
];
pub(crate) fn head() -> Head {
Head { number: SHANGHAI_BLOCK, ..Default::default() }
}
pub(crate) fn boot_nodes() -> Vec<NodeRecord> {
BOOTNODES[..].iter().map(|s| s.parse().unwrap()).collect()
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/polygon-p2p/src/main.rs | examples/polygon-p2p/src/main.rs | //! Example for how hook into the polygon p2p network
//!
//! Run with
//!
//! ```sh
//! cargo run -p polygon-p2p
//! ```
//!
//! This launches a regular reth node overriding the engine api payload builder with our custom.
//!
//! Credits to: <https://merkle.io/blog/modifying-reth-to-build-the-fastest-transaction-network-on-bsc-and-polygon>
#![warn(unused_crate_dependencies)]
use chain_cfg::{boot_nodes, head, polygon_chain_spec};
use reth_discv4::Discv4ConfigBuilder;
use reth_ethereum::network::{
api::events::SessionInfo, config::NetworkMode, NetworkConfig, NetworkEvent,
NetworkEventListenerProvider, NetworkManager,
};
use reth_tracing::{
tracing::info, tracing_subscriber::filter::LevelFilter, LayerInfo, LogFormat, RethTracer,
Tracer,
};
use secp256k1::{rand, SecretKey};
use std::{
net::{Ipv4Addr, SocketAddr},
time::Duration,
};
use tokio_stream::StreamExt;
pub mod chain_cfg;
#[tokio::main]
async fn main() {
// The ECDSA private key used to create our enode identifier.
let secret_key = SecretKey::new(&mut rand::thread_rng());
let _ = RethTracer::new()
.with_stdout(LayerInfo::new(
LogFormat::Terminal,
LevelFilter::INFO.to_string(),
"".to_string(),
Some("always".to_string()),
))
.init();
// The local address we want to bind to
let local_addr = SocketAddr::new(Ipv4Addr::UNSPECIFIED.into(), 30303);
// The network configuration
let net_cfg = NetworkConfig::builder(secret_key)
.set_head(head())
.network_mode(NetworkMode::Work)
.listener_addr(local_addr)
.build_with_noop_provider(polygon_chain_spec());
// Set Discv4 lookup interval to 1 second
let mut discv4_cfg = Discv4ConfigBuilder::default();
let interval = Duration::from_secs(1);
discv4_cfg.add_boot_nodes(boot_nodes()).lookup_interval(interval);
let net_cfg = net_cfg.set_discovery_v4(discv4_cfg.build());
let net_manager = NetworkManager::eth(net_cfg).await.unwrap();
// The network handle is our entrypoint into the network.
let net_handle = net_manager.handle();
let mut events = net_handle.event_listener();
// NetworkManager is a long running task, let's spawn it
tokio::spawn(net_manager);
info!("Looking for Polygon peers...");
while let Some(evt) = events.next().await {
// For the sake of the example we only print the session established event
// with the chain specific details
if let NetworkEvent::ActivePeerSession { info, .. } = evt {
let SessionInfo { status, client_version, .. } = info;
let chain = status.chain;
info!(?chain, ?client_version, "Session established with a new peer.");
}
// More events here
}
// We will be disconnected from peers since we are not able to answer to network requests
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/network-txpool/src/main.rs | examples/network-txpool/src/main.rs | //! Example of how to use the network as a standalone component together with a transaction pool and
//! a custom pool validator.
//!
//! Run with
//!
//! ```sh
//! cargo run --release -p network-txpool -- node
//! ```
#![warn(unused_crate_dependencies)]
use reth_ethereum::{
network::{config::rng_secret_key, EthNetworkPrimitives, NetworkConfig, NetworkManager},
pool::{
blobstore::InMemoryBlobStore, test_utils::OkValidator, CoinbaseTipOrdering,
EthPooledTransaction, Pool, TransactionListenerKind, TransactionPool,
},
provider::test_utils::NoopProvider,
};
#[tokio::main]
async fn main() -> eyre::Result<()> {
// This block provider implementation is used for testing purposes.
// NOTE: This also means that we don't have access to the blockchain and are not able to serve
// any requests for headers or bodies which can result in dropped connections initiated by
// remote or able to validate transaction against the latest state.
let client = NoopProvider::default();
let pool: Pool<
OkValidator<EthPooledTransaction>,
CoinbaseTipOrdering<EthPooledTransaction>,
InMemoryBlobStore,
> = reth_ethereum::pool::Pool::new(
OkValidator::default(),
CoinbaseTipOrdering::default(),
InMemoryBlobStore::default(),
Default::default(),
);
// The key that's used for encrypting sessions and to identify our node.
let local_key = rng_secret_key();
// Configure the network
let config = NetworkConfig::<_, EthNetworkPrimitives>::builder(local_key)
.mainnet_boot_nodes()
.build(client);
let transactions_manager_config = config.transactions_manager_config.clone();
// create the network instance
let (_handle, network, txpool, _) = NetworkManager::builder(config)
.await?
.transactions(pool.clone(), transactions_manager_config)
.split_with_handle();
// this can be used to interact with the `txpool` service directly
let _txs_handle = txpool.handle();
// spawn the network task
tokio::task::spawn(network);
// spawn the pool task
tokio::task::spawn(txpool);
// listen for new transactions
let mut txs = pool.pending_transactions_listener_for(TransactionListenerKind::All);
while let Some(tx) = txs.recv().await {
println!("Received new transaction: {tx:?}");
}
Ok(())
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/custom-node/src/engine.rs | examples/custom-node/src/engine.rs | use crate::{
chainspec::CustomChainSpec,
evm::CustomEvmConfig,
primitives::{CustomHeader, CustomNodePrimitives, CustomTransaction},
CustomNode,
};
use alloy_eips::eip2718::WithEncoded;
use op_alloy_rpc_types_engine::{OpExecutionData, OpExecutionPayload};
use reth_chain_state::ExecutedBlockWithTrieUpdates;
use reth_engine_primitives::EngineApiValidator;
use reth_ethereum::{
node::api::{
validate_version_specific_fields, AddOnsContext, BuiltPayload, EngineApiMessageVersion,
EngineObjectValidationError, ExecutionPayload, FullNodeComponents, NewPayloadError,
NodePrimitives, PayloadAttributes, PayloadBuilderAttributes, PayloadOrAttributes,
PayloadTypes, PayloadValidator,
},
primitives::{RecoveredBlock, SealedBlock},
storage::StateProviderFactory,
trie::{KeccakKeyHasher, KeyHasher},
};
use reth_node_builder::{rpc::PayloadValidatorBuilder, InvalidPayloadAttributesError};
use reth_op::node::{
engine::OpEngineValidator, payload::OpAttributes, OpBuiltPayload, OpEngineTypes,
OpPayloadAttributes, OpPayloadBuilderAttributes,
};
use revm_primitives::U256;
use serde::{Deserialize, Serialize};
use std::sync::Arc;
use thiserror::Error;
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
pub struct CustomPayloadTypes;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CustomExecutionData {
pub inner: OpExecutionData,
pub extension: u64,
}
impl ExecutionPayload for CustomExecutionData {
fn parent_hash(&self) -> revm_primitives::B256 {
self.inner.parent_hash()
}
fn block_hash(&self) -> revm_primitives::B256 {
self.inner.block_hash()
}
fn block_number(&self) -> u64 {
self.inner.block_number()
}
fn withdrawals(&self) -> Option<&Vec<alloy_eips::eip4895::Withdrawal>> {
None
}
fn parent_beacon_block_root(&self) -> Option<revm_primitives::B256> {
self.inner.parent_beacon_block_root()
}
fn timestamp(&self) -> u64 {
self.inner.timestamp()
}
fn gas_used(&self) -> u64 {
self.inner.gas_used()
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CustomPayloadAttributes {
#[serde(flatten)]
inner: OpPayloadAttributes,
extension: u64,
}
impl PayloadAttributes for CustomPayloadAttributes {
fn timestamp(&self) -> u64 {
self.inner.timestamp()
}
fn withdrawals(&self) -> Option<&Vec<alloy_eips::eip4895::Withdrawal>> {
self.inner.withdrawals()
}
fn parent_beacon_block_root(&self) -> Option<revm_primitives::B256> {
self.inner.parent_beacon_block_root()
}
}
#[derive(Debug, Clone)]
pub struct CustomPayloadBuilderAttributes {
pub inner: OpPayloadBuilderAttributes<CustomTransaction>,
pub extension: u64,
}
impl PayloadBuilderAttributes for CustomPayloadBuilderAttributes {
type RpcPayloadAttributes = CustomPayloadAttributes;
type Error = alloy_rlp::Error;
fn try_new(
parent: revm_primitives::B256,
rpc_payload_attributes: Self::RpcPayloadAttributes,
version: u8,
) -> Result<Self, Self::Error>
where
Self: Sized,
{
let CustomPayloadAttributes { inner, extension } = rpc_payload_attributes;
Ok(Self { inner: OpPayloadBuilderAttributes::try_new(parent, inner, version)?, extension })
}
fn payload_id(&self) -> alloy_rpc_types_engine::PayloadId {
self.inner.payload_id()
}
fn parent(&self) -> revm_primitives::B256 {
self.inner.parent()
}
fn timestamp(&self) -> u64 {
self.inner.timestamp()
}
fn parent_beacon_block_root(&self) -> Option<revm_primitives::B256> {
self.inner.parent_beacon_block_root()
}
fn suggested_fee_recipient(&self) -> revm_primitives::Address {
self.inner.suggested_fee_recipient()
}
fn prev_randao(&self) -> revm_primitives::B256 {
self.inner.prev_randao()
}
fn withdrawals(&self) -> &alloy_eips::eip4895::Withdrawals {
self.inner.withdrawals()
}
}
impl OpAttributes for CustomPayloadBuilderAttributes {
type Transaction = CustomTransaction;
fn no_tx_pool(&self) -> bool {
self.inner.no_tx_pool
}
fn sequencer_transactions(&self) -> &[WithEncoded<Self::Transaction>] {
&self.inner.transactions
}
}
#[derive(Debug, Clone)]
pub struct CustomBuiltPayload(pub OpBuiltPayload<CustomNodePrimitives>);
impl BuiltPayload for CustomBuiltPayload {
type Primitives = CustomNodePrimitives;
fn block(&self) -> &SealedBlock<<Self::Primitives as NodePrimitives>::Block> {
self.0.block()
}
fn fees(&self) -> U256 {
self.0.fees()
}
fn executed_block(&self) -> Option<ExecutedBlockWithTrieUpdates<Self::Primitives>> {
self.0.executed_block()
}
fn requests(&self) -> Option<alloy_eips::eip7685::Requests> {
self.0.requests()
}
}
impl From<CustomBuiltPayload>
for alloy_consensus::Block<<CustomNodePrimitives as NodePrimitives>::SignedTx>
{
fn from(value: CustomBuiltPayload) -> Self {
value.0.into_sealed_block().into_block().map_header(|header| header.inner)
}
}
impl PayloadTypes for CustomPayloadTypes {
type ExecutionData = CustomExecutionData;
type BuiltPayload = OpBuiltPayload<CustomNodePrimitives>;
type PayloadAttributes = CustomPayloadAttributes;
type PayloadBuilderAttributes = CustomPayloadBuilderAttributes;
fn block_to_payload(
block: SealedBlock<
<<Self::BuiltPayload as BuiltPayload>::Primitives as NodePrimitives>::Block,
>,
) -> Self::ExecutionData {
let extension = block.header().extension;
let block_hash = block.hash();
let block = block.into_block().map_header(|header| header.inner);
let (payload, sidecar) = OpExecutionPayload::from_block_unchecked(block_hash, &block);
CustomExecutionData { inner: OpExecutionData { payload, sidecar }, extension }
}
}
/// Custom engine validator
#[derive(Debug, Clone)]
pub struct CustomEngineValidator<P> {
inner: OpEngineValidator<P, CustomTransaction, CustomChainSpec>,
}
impl<P> CustomEngineValidator<P>
where
P: Send + Sync + Unpin + 'static,
{
/// Instantiates a new validator.
pub fn new<KH: KeyHasher>(chain_spec: Arc<CustomChainSpec>, provider: P) -> Self {
Self { inner: OpEngineValidator::new::<KH>(chain_spec, provider) }
}
/// Returns the chain spec used by the validator.
#[inline]
fn chain_spec(&self) -> &CustomChainSpec {
self.inner.chain_spec()
}
}
impl<P> PayloadValidator<CustomPayloadTypes> for CustomEngineValidator<P>
where
P: StateProviderFactory + Send + Sync + Unpin + 'static,
{
type Block = crate::primitives::block::Block;
fn ensure_well_formed_payload(
&self,
payload: CustomExecutionData,
) -> Result<RecoveredBlock<Self::Block>, NewPayloadError> {
let sealed_block = PayloadValidator::<OpEngineTypes>::ensure_well_formed_payload(
&self.inner,
payload.inner,
)?;
let (block, senders) = sealed_block.split_sealed();
let (header, body) = block.split_sealed_header_body();
let header = CustomHeader { inner: header.into_header(), extension: payload.extension };
let body = body.map_ommers(|_| CustomHeader::default());
let block = SealedBlock::<Self::Block>::from_parts_unhashed(header, body);
Ok(block.with_senders(senders))
}
fn validate_payload_attributes_against_header(
&self,
_attr: &CustomPayloadAttributes,
_header: &<Self::Block as reth_ethereum::primitives::Block>::Header,
) -> Result<(), InvalidPayloadAttributesError> {
// skip default timestamp validation
Ok(())
}
}
impl<P> EngineApiValidator<CustomPayloadTypes> for CustomEngineValidator<P>
where
P: StateProviderFactory + Send + Sync + Unpin + 'static,
{
fn validate_version_specific_fields(
&self,
version: EngineApiMessageVersion,
payload_or_attrs: PayloadOrAttributes<'_, CustomExecutionData, CustomPayloadAttributes>,
) -> Result<(), EngineObjectValidationError> {
validate_version_specific_fields(self.chain_spec(), version, payload_or_attrs)
}
fn ensure_well_formed_attributes(
&self,
version: EngineApiMessageVersion,
attributes: &CustomPayloadAttributes,
) -> Result<(), EngineObjectValidationError> {
validate_version_specific_fields(
self.chain_spec(),
version,
PayloadOrAttributes::<CustomExecutionData, _>::PayloadAttributes(attributes),
)?;
// custom validation logic - ensure that the custom field is not zero
// if attributes.extension == 0 {
// return Err(EngineObjectValidationError::invalid_params(
// CustomError::CustomFieldIsNotZero,
// ))
// }
Ok(())
}
}
/// Custom error type used in payload attributes validation
#[derive(Debug, Error)]
pub enum CustomError {
#[error("Custom field is not zero")]
CustomFieldIsNotZero,
}
/// Custom engine validator builder
#[derive(Debug, Default, Clone, Copy)]
#[non_exhaustive]
pub struct CustomEngineValidatorBuilder;
impl<N> PayloadValidatorBuilder<N> for CustomEngineValidatorBuilder
where
N: FullNodeComponents<Types = CustomNode, Evm = CustomEvmConfig>,
{
type Validator = CustomEngineValidator<N::Provider>;
async fn build(self, ctx: &AddOnsContext<'_, N>) -> eyre::Result<Self::Validator> {
Ok(CustomEngineValidator::new::<KeccakKeyHasher>(
ctx.config.chain.clone(),
ctx.node.provider().clone(),
))
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/custom-node/src/lib.rs | examples/custom-node/src/lib.rs | //! This example shows how implement a custom node.
//!
//! A node consists of:
//! - primitives: block,header,transactions
//! - components: network,pool,evm
//! - engine: advances the node
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
use crate::{
engine::{CustomEngineValidatorBuilder, CustomPayloadTypes},
engine_api::CustomEngineApiBuilder,
evm::CustomExecutorBuilder,
pool::CustomPooledTransaction,
primitives::CustomTransaction,
rpc::CustomRpcTypes,
};
use chainspec::CustomChainSpec;
use primitives::CustomNodePrimitives;
use reth_ethereum::node::api::{FullNodeTypes, NodeTypes};
use reth_node_builder::{
components::{BasicPayloadServiceBuilder, ComponentsBuilder},
Node, NodeAdapter,
};
use reth_op::{
node::{
node::{OpConsensusBuilder, OpNetworkBuilder, OpPayloadBuilder, OpPoolBuilder},
txpool, OpAddOns, OpNode,
},
rpc::OpEthApiBuilder,
};
pub mod chainspec;
pub mod engine;
pub mod engine_api;
pub mod evm;
pub mod pool;
pub mod primitives;
pub mod rpc;
#[derive(Debug, Clone)]
pub struct CustomNode {
inner: OpNode,
}
impl NodeTypes for CustomNode {
type Primitives = CustomNodePrimitives;
type ChainSpec = CustomChainSpec;
type Storage = <OpNode as NodeTypes>::Storage;
type Payload = CustomPayloadTypes;
}
impl<N> Node<N> for CustomNode
where
N: FullNodeTypes<Types = Self>,
{
type ComponentsBuilder = ComponentsBuilder<
N,
OpPoolBuilder<txpool::OpPooledTransaction<CustomTransaction, CustomPooledTransaction>>,
BasicPayloadServiceBuilder<OpPayloadBuilder>,
OpNetworkBuilder,
CustomExecutorBuilder,
OpConsensusBuilder,
>;
type AddOns = OpAddOns<
NodeAdapter<N>,
OpEthApiBuilder<CustomRpcTypes>,
CustomEngineValidatorBuilder,
CustomEngineApiBuilder,
>;
fn components_builder(&self) -> Self::ComponentsBuilder {
ComponentsBuilder::default()
.node_types::<N>()
.pool(OpPoolBuilder::default())
.executor(CustomExecutorBuilder::default())
.payload(BasicPayloadServiceBuilder::new(OpPayloadBuilder::new(false)))
.network(OpNetworkBuilder::new(false, false))
.consensus(OpConsensusBuilder::default())
}
fn add_ons(&self) -> Self::AddOns {
self.inner.add_ons_builder().build()
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/custom-node/src/rpc.rs | examples/custom-node/src/rpc.rs | use crate::{
evm::CustomTxEnv,
primitives::{CustomHeader, CustomTransaction},
};
use alloy_consensus::error::ValueError;
use alloy_network::TxSigner;
use op_alloy_consensus::OpTxEnvelope;
use op_alloy_rpc_types::{OpTransactionReceipt, OpTransactionRequest};
use reth_op::rpc::RpcTypes;
use reth_rpc_api::eth::{
transaction::TryIntoTxEnv, EthTxEnvError, SignTxRequestError, SignableTxRequest, TryIntoSimTx,
};
use revm::context::{BlockEnv, CfgEnv};
#[derive(Debug, Clone, Copy, Default)]
#[non_exhaustive]
pub struct CustomRpcTypes;
impl RpcTypes for CustomRpcTypes {
type Header = alloy_rpc_types_eth::Header<CustomHeader>;
type Receipt = OpTransactionReceipt;
type TransactionRequest = OpTransactionRequest;
type TransactionResponse = op_alloy_rpc_types::Transaction<CustomTransaction>;
}
impl TryIntoSimTx<CustomTransaction> for OpTransactionRequest {
fn try_into_sim_tx(self) -> Result<CustomTransaction, ValueError<Self>> {
Ok(CustomTransaction::Op(self.try_into_sim_tx()?))
}
}
impl TryIntoTxEnv<CustomTxEnv> for OpTransactionRequest {
type Err = EthTxEnvError;
fn try_into_tx_env<Spec>(
self,
cfg_env: &CfgEnv<Spec>,
block_env: &BlockEnv,
) -> Result<CustomTxEnv, Self::Err> {
Ok(CustomTxEnv::Op(self.try_into_tx_env(cfg_env, block_env)?))
}
}
impl SignableTxRequest<CustomTransaction> for OpTransactionRequest {
async fn try_build_and_sign(
self,
signer: impl TxSigner<alloy_primitives::Signature> + Send,
) -> Result<CustomTransaction, SignTxRequestError> {
Ok(CustomTransaction::Op(
SignableTxRequest::<OpTxEnvelope>::try_build_and_sign(self, signer).await?,
))
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/custom-node/src/chainspec.rs | examples/custom-node/src/chainspec.rs | use crate::primitives::CustomHeader;
use alloy_genesis::Genesis;
use reth_ethereum::{
chainspec::{EthChainSpec, EthereumHardforks, Hardfork, Hardforks},
primitives::SealedHeader,
};
use reth_network_peers::NodeRecord;
use reth_op::chainspec::OpChainSpec;
use reth_optimism_forks::OpHardforks;
#[derive(Debug, Clone)]
pub struct CustomChainSpec {
inner: OpChainSpec,
genesis_header: SealedHeader<CustomHeader>,
}
impl CustomChainSpec {
pub const fn inner(&self) -> &OpChainSpec {
&self.inner
}
}
impl Hardforks for CustomChainSpec {
fn fork<H: Hardfork>(&self, fork: H) -> reth_ethereum::chainspec::ForkCondition {
self.inner.fork(fork)
}
fn forks_iter(
&self,
) -> impl Iterator<Item = (&dyn Hardfork, reth_ethereum::chainspec::ForkCondition)> {
self.inner.forks_iter()
}
fn fork_id(&self, head: &reth_ethereum::chainspec::Head) -> reth_ethereum::chainspec::ForkId {
self.inner.fork_id(head)
}
fn latest_fork_id(&self) -> reth_ethereum::chainspec::ForkId {
self.inner.latest_fork_id()
}
fn fork_filter(
&self,
head: reth_ethereum::chainspec::Head,
) -> reth_ethereum::chainspec::ForkFilter {
self.inner.fork_filter(head)
}
}
impl EthChainSpec for CustomChainSpec {
type Header = CustomHeader;
fn chain(&self) -> reth_ethereum::chainspec::Chain {
self.inner.chain()
}
fn base_fee_params_at_timestamp(
&self,
timestamp: u64,
) -> reth_ethereum::chainspec::BaseFeeParams {
self.inner.base_fee_params_at_timestamp(timestamp)
}
fn blob_params_at_timestamp(&self, timestamp: u64) -> Option<alloy_eips::eip7840::BlobParams> {
self.inner.blob_params_at_timestamp(timestamp)
}
fn deposit_contract(&self) -> Option<&reth_ethereum::chainspec::DepositContract> {
self.inner.deposit_contract()
}
fn genesis_hash(&self) -> revm_primitives::B256 {
self.genesis_header.hash()
}
fn prune_delete_limit(&self) -> usize {
self.inner.prune_delete_limit()
}
fn display_hardforks(&self) -> Box<dyn std::fmt::Display> {
self.inner.display_hardforks()
}
fn genesis_header(&self) -> &Self::Header {
&self.genesis_header
}
fn genesis(&self) -> &Genesis {
self.inner.genesis()
}
fn bootnodes(&self) -> Option<Vec<NodeRecord>> {
self.inner.bootnodes()
}
fn final_paris_total_difficulty(&self) -> Option<revm_primitives::U256> {
self.inner.get_final_paris_total_difficulty()
}
}
impl EthereumHardforks for CustomChainSpec {
fn ethereum_fork_activation(
&self,
fork: reth_ethereum::chainspec::EthereumHardfork,
) -> reth_ethereum::chainspec::ForkCondition {
self.inner.ethereum_fork_activation(fork)
}
}
impl OpHardforks for CustomChainSpec {
fn op_fork_activation(
&self,
fork: reth_optimism_forks::OpHardfork,
) -> reth_ethereum::chainspec::ForkCondition {
self.inner.op_fork_activation(fork)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/custom-node/src/pool.rs | examples/custom-node/src/pool.rs | use crate::primitives::{CustomTransaction, TxPayment};
use alloy_consensus::{
crypto::RecoveryError, error::ValueError, transaction::SignerRecoverable, Signed,
TransactionEnvelope,
};
use alloy_primitives::{Address, Sealed, B256};
use op_alloy_consensus::{OpPooledTransaction, OpTransaction, TxDeposit};
use reth_ethereum::primitives::{
serde_bincode_compat::RlpBincode, InMemorySize, SignedTransaction,
};
#[derive(Clone, Debug, TransactionEnvelope)]
#[envelope(tx_type_name = CustomPooledTxType)]
pub enum CustomPooledTransaction {
/// A regular Optimism transaction as defined by [`OpPooledTransaction`].
#[envelope(flatten)]
Op(OpPooledTransaction),
/// A [`TxPayment`] tagged with type 0x7E.
#[envelope(ty = 42)]
Payment(Signed<TxPayment>),
}
impl From<CustomPooledTransaction> for CustomTransaction {
fn from(tx: CustomPooledTransaction) -> Self {
match tx {
CustomPooledTransaction::Op(tx) => Self::Op(tx.into()),
CustomPooledTransaction::Payment(tx) => Self::Payment(tx),
}
}
}
impl TryFrom<CustomTransaction> for CustomPooledTransaction {
type Error = ValueError<CustomTransaction>;
fn try_from(tx: CustomTransaction) -> Result<Self, Self::Error> {
match tx {
CustomTransaction::Op(op) => Ok(Self::Op(
OpPooledTransaction::try_from(op).map_err(|op| op.map(CustomTransaction::Op))?,
)),
CustomTransaction::Payment(payment) => Ok(Self::Payment(payment)),
}
}
}
impl RlpBincode for CustomPooledTransaction {}
impl OpTransaction for CustomPooledTransaction {
fn is_deposit(&self) -> bool {
false
}
fn as_deposit(&self) -> Option<&Sealed<TxDeposit>> {
None
}
}
impl SignerRecoverable for CustomPooledTransaction {
fn recover_signer(&self) -> Result<Address, RecoveryError> {
match self {
CustomPooledTransaction::Op(tx) => SignerRecoverable::recover_signer(tx),
CustomPooledTransaction::Payment(tx) => SignerRecoverable::recover_signer(tx),
}
}
fn recover_signer_unchecked(&self) -> Result<Address, RecoveryError> {
match self {
CustomPooledTransaction::Op(tx) => SignerRecoverable::recover_signer_unchecked(tx),
CustomPooledTransaction::Payment(tx) => SignerRecoverable::recover_signer_unchecked(tx),
}
}
}
impl SignedTransaction for CustomPooledTransaction {
fn tx_hash(&self) -> &B256 {
match self {
CustomPooledTransaction::Op(tx) => SignedTransaction::tx_hash(tx),
CustomPooledTransaction::Payment(tx) => tx.hash(),
}
}
}
impl InMemorySize for CustomPooledTransaction {
fn size(&self) -> usize {
match self {
CustomPooledTransaction::Op(tx) => InMemorySize::size(tx),
CustomPooledTransaction::Payment(tx) => InMemorySize::size(tx),
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/custom-node/src/engine_api.rs | examples/custom-node/src/engine_api.rs | use crate::{
engine::{CustomExecutionData, CustomPayloadAttributes, CustomPayloadTypes},
primitives::CustomNodePrimitives,
CustomNode,
};
use alloy_rpc_types_engine::{
ExecutionPayloadV3, ForkchoiceState, ForkchoiceUpdated, PayloadId, PayloadStatus,
};
use async_trait::async_trait;
use jsonrpsee::{core::RpcResult, proc_macros::rpc, RpcModule};
use reth_ethereum::node::api::{
AddOnsContext, ConsensusEngineHandle, EngineApiMessageVersion, FullNodeComponents,
};
use reth_node_builder::rpc::EngineApiBuilder;
use reth_op::node::OpBuiltPayload;
use reth_payload_builder::PayloadStore;
use reth_rpc_api::IntoEngineApiRpcModule;
use reth_rpc_engine_api::EngineApiError;
use std::sync::Arc;
#[derive(serde::Deserialize)]
pub struct CustomExecutionPayloadInput {}
#[derive(Clone, serde::Serialize)]
pub struct CustomExecutionPayloadEnvelope {
execution_payload: ExecutionPayloadV3,
extension: u64,
}
impl From<OpBuiltPayload<CustomNodePrimitives>> for CustomExecutionPayloadEnvelope {
fn from(value: OpBuiltPayload<CustomNodePrimitives>) -> Self {
let sealed_block = value.into_sealed_block();
let hash = sealed_block.hash();
let extension = sealed_block.header().extension;
let block = sealed_block.into_block();
Self {
execution_payload: ExecutionPayloadV3::from_block_unchecked(hash, &block.clone()),
extension,
}
}
}
#[rpc(server, namespace = "engine")]
pub trait CustomEngineApi {
#[method(name = "newPayload")]
async fn new_payload(&self, payload: CustomExecutionData) -> RpcResult<PayloadStatus>;
#[method(name = "forkchoiceUpdated")]
async fn fork_choice_updated(
&self,
fork_choice_state: ForkchoiceState,
payload_attributes: Option<CustomPayloadAttributes>,
) -> RpcResult<ForkchoiceUpdated>;
#[method(name = "getPayload")]
async fn get_payload(&self, payload_id: PayloadId)
-> RpcResult<CustomExecutionPayloadEnvelope>;
}
pub struct CustomEngineApi {
inner: Arc<CustomEngineApiInner>,
}
struct CustomEngineApiInner {
beacon_consensus: ConsensusEngineHandle<CustomPayloadTypes>,
payload_store: PayloadStore<CustomPayloadTypes>,
}
impl CustomEngineApiInner {
fn new(
beacon_consensus: ConsensusEngineHandle<CustomPayloadTypes>,
payload_store: PayloadStore<CustomPayloadTypes>,
) -> Self {
Self { beacon_consensus, payload_store }
}
}
#[async_trait]
impl CustomEngineApiServer for CustomEngineApi {
async fn new_payload(&self, payload: CustomExecutionData) -> RpcResult<PayloadStatus> {
Ok(self
.inner
.beacon_consensus
.new_payload(payload)
.await
.map_err(EngineApiError::NewPayload)?)
}
async fn fork_choice_updated(
&self,
fork_choice_state: ForkchoiceState,
payload_attributes: Option<CustomPayloadAttributes>,
) -> RpcResult<ForkchoiceUpdated> {
Ok(self
.inner
.beacon_consensus
.fork_choice_updated(fork_choice_state, payload_attributes, EngineApiMessageVersion::V3)
.await
.map_err(EngineApiError::ForkChoiceUpdate)?)
}
async fn get_payload(
&self,
payload_id: PayloadId,
) -> RpcResult<CustomExecutionPayloadEnvelope> {
Ok(self
.inner
.payload_store
.resolve(payload_id)
.await
.ok_or(EngineApiError::UnknownPayload)?
.map_err(|_| EngineApiError::UnknownPayload)?
.into())
}
}
impl IntoEngineApiRpcModule for CustomEngineApi
where
Self: CustomEngineApiServer,
{
fn into_rpc_module(self) -> RpcModule<()> {
self.into_rpc().remove_context()
}
}
#[derive(Debug, Default, Clone)]
pub struct CustomEngineApiBuilder {}
impl<N> EngineApiBuilder<N> for CustomEngineApiBuilder
where
N: FullNodeComponents<Types = CustomNode>,
{
type EngineApi = CustomEngineApi;
async fn build_engine_api(self, ctx: &AddOnsContext<'_, N>) -> eyre::Result<Self::EngineApi> {
Ok(CustomEngineApi {
inner: Arc::new(CustomEngineApiInner::new(
ctx.beacon_engine_handle.clone(),
PayloadStore::new(ctx.node.payload_builder_handle().clone()),
)),
})
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/custom-node/src/evm/config.rs | examples/custom-node/src/evm/config.rs | use crate::{
chainspec::CustomChainSpec,
engine::{CustomExecutionData, CustomPayloadBuilderAttributes},
evm::{alloy::CustomEvmFactory, executor::CustomBlockExecutionCtx, CustomBlockAssembler},
primitives::{Block, CustomHeader, CustomNodePrimitives, CustomTransaction},
};
use alloy_consensus::BlockHeader;
use alloy_eips::{eip2718::WithEncoded, Decodable2718};
use alloy_evm::EvmEnv;
use alloy_op_evm::OpBlockExecutionCtx;
use alloy_rpc_types_engine::PayloadError;
use op_revm::OpSpecId;
use reth_engine_primitives::ExecutableTxIterator;
use reth_ethereum::{
chainspec::EthChainSpec,
node::api::{BuildNextEnv, ConfigureEvm, PayloadBuilderError},
primitives::{SealedBlock, SealedHeader},
};
use reth_node_builder::{ConfigureEngineEvm, NewPayloadError};
use reth_op::{
chainspec::OpHardforks,
evm::primitives::{EvmEnvFor, ExecutionCtxFor},
node::{OpEvmConfig, OpNextBlockEnvAttributes, OpRethReceiptBuilder},
primitives::SignedTransaction,
};
use reth_optimism_flashblocks::ExecutionPayloadBaseV1;
use reth_rpc_api::eth::helpers::pending_block::BuildPendingEnv;
use std::sync::Arc;
#[derive(Debug, Clone)]
pub struct CustomEvmConfig {
pub(super) inner: OpEvmConfig,
pub(super) block_assembler: CustomBlockAssembler,
pub(super) custom_evm_factory: CustomEvmFactory,
}
impl CustomEvmConfig {
pub fn new(chain_spec: Arc<CustomChainSpec>) -> Self {
Self {
inner: OpEvmConfig::new(
Arc::new(chain_spec.inner().clone()),
OpRethReceiptBuilder::default(),
),
block_assembler: CustomBlockAssembler::new(chain_spec),
custom_evm_factory: CustomEvmFactory::new(),
}
}
}
impl ConfigureEvm for CustomEvmConfig {
type Primitives = CustomNodePrimitives;
type Error = <OpEvmConfig as ConfigureEvm>::Error;
type NextBlockEnvCtx = CustomNextBlockEnvAttributes;
type BlockExecutorFactory = Self;
type BlockAssembler = CustomBlockAssembler;
fn block_executor_factory(&self) -> &Self::BlockExecutorFactory {
self
}
fn block_assembler(&self) -> &Self::BlockAssembler {
&self.block_assembler
}
fn evm_env(&self, header: &CustomHeader) -> EvmEnv<OpSpecId> {
self.inner.evm_env(header)
}
fn next_evm_env(
&self,
parent: &CustomHeader,
attributes: &CustomNextBlockEnvAttributes,
) -> Result<EvmEnv<OpSpecId>, Self::Error> {
self.inner.next_evm_env(parent, &attributes.inner)
}
fn context_for_block(&self, block: &SealedBlock<Block>) -> CustomBlockExecutionCtx {
CustomBlockExecutionCtx {
inner: OpBlockExecutionCtx {
parent_hash: block.header().parent_hash(),
parent_beacon_block_root: block.header().parent_beacon_block_root(),
extra_data: block.header().extra_data().clone(),
},
extension: block.extension,
}
}
fn context_for_next_block(
&self,
parent: &SealedHeader<CustomHeader>,
attributes: Self::NextBlockEnvCtx,
) -> CustomBlockExecutionCtx {
CustomBlockExecutionCtx {
inner: OpBlockExecutionCtx {
parent_hash: parent.hash(),
parent_beacon_block_root: attributes.inner.parent_beacon_block_root,
extra_data: attributes.inner.extra_data,
},
extension: attributes.extension,
}
}
}
impl ConfigureEngineEvm<CustomExecutionData> for CustomEvmConfig {
fn evm_env_for_payload(&self, payload: &CustomExecutionData) -> EvmEnvFor<Self> {
self.inner.evm_env_for_payload(&payload.inner)
}
fn context_for_payload<'a>(
&self,
payload: &'a CustomExecutionData,
) -> ExecutionCtxFor<'a, Self> {
CustomBlockExecutionCtx {
inner: self.inner.context_for_payload(&payload.inner),
extension: payload.extension,
}
}
fn tx_iterator_for_payload(
&self,
payload: &CustomExecutionData,
) -> impl ExecutableTxIterator<Self> {
payload.inner.payload.transactions().clone().into_iter().map(|encoded| {
let tx = CustomTransaction::decode_2718_exact(encoded.as_ref())
.map_err(Into::into)
.map_err(PayloadError::Decode)?;
let signer = tx.try_recover().map_err(NewPayloadError::other)?;
Ok::<_, NewPayloadError>(WithEncoded::new(encoded, tx.with_signer(signer)))
})
}
}
/// Additional parameters required for executing next block custom transactions.
#[derive(Debug, Clone)]
pub struct CustomNextBlockEnvAttributes {
inner: OpNextBlockEnvAttributes,
extension: u64,
}
impl From<ExecutionPayloadBaseV1> for CustomNextBlockEnvAttributes {
fn from(value: ExecutionPayloadBaseV1) -> Self {
Self { inner: value.into(), extension: 0 }
}
}
impl BuildPendingEnv<CustomHeader> for CustomNextBlockEnvAttributes {
fn build_pending_env(parent: &SealedHeader<CustomHeader>) -> Self {
Self {
inner: OpNextBlockEnvAttributes::build_pending_env(parent),
extension: parent.extension,
}
}
}
impl<H, ChainSpec> BuildNextEnv<CustomPayloadBuilderAttributes, H, ChainSpec>
for CustomNextBlockEnvAttributes
where
H: BlockHeader,
ChainSpec: EthChainSpec + OpHardforks,
{
fn build_next_env(
attributes: &CustomPayloadBuilderAttributes,
parent: &SealedHeader<H>,
chain_spec: &ChainSpec,
) -> Result<Self, PayloadBuilderError> {
let inner =
OpNextBlockEnvAttributes::build_next_env(&attributes.inner, parent, chain_spec)?;
Ok(CustomNextBlockEnvAttributes { inner, extension: attributes.extension })
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/custom-node/src/evm/assembler.rs | examples/custom-node/src/evm/assembler.rs | use crate::{
chainspec::CustomChainSpec,
evm::executor::CustomBlockExecutionCtx,
primitives::{Block, CustomHeader, CustomTransaction},
};
use alloy_evm::block::{BlockExecutionError, BlockExecutorFactory};
use reth_ethereum::{
evm::primitives::execute::{BlockAssembler, BlockAssemblerInput},
primitives::Receipt,
};
use reth_op::{node::OpBlockAssembler, DepositReceipt};
use std::sync::Arc;
#[derive(Clone, Debug)]
pub struct CustomBlockAssembler {
block_assembler: OpBlockAssembler<CustomChainSpec>,
}
impl CustomBlockAssembler {
pub const fn new(chain_spec: Arc<CustomChainSpec>) -> Self {
Self { block_assembler: OpBlockAssembler::new(chain_spec) }
}
}
impl<F> BlockAssembler<F> for CustomBlockAssembler
where
F: for<'a> BlockExecutorFactory<
ExecutionCtx<'a> = CustomBlockExecutionCtx,
Transaction = CustomTransaction,
Receipt: Receipt + DepositReceipt,
>,
{
type Block = Block;
fn assemble_block(
&self,
input: BlockAssemblerInput<'_, '_, F, CustomHeader>,
) -> Result<Self::Block, BlockExecutionError> {
Ok(self.block_assembler.assemble_block(input)?.map_header(From::from))
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/custom-node/src/evm/builder.rs | examples/custom-node/src/evm/builder.rs | use crate::{chainspec::CustomChainSpec, evm::CustomEvmConfig, primitives::CustomNodePrimitives};
use reth_ethereum::node::api::FullNodeTypes;
use reth_node_builder::{components::ExecutorBuilder, BuilderContext, NodeTypes};
use std::{future, future::Future};
#[derive(Debug, Clone, Default)]
#[non_exhaustive]
pub struct CustomExecutorBuilder;
impl<Node: FullNodeTypes> ExecutorBuilder<Node> for CustomExecutorBuilder
where
Node::Types: NodeTypes<ChainSpec = CustomChainSpec, Primitives = CustomNodePrimitives>,
{
type EVM = CustomEvmConfig;
fn build_evm(
self,
ctx: &BuilderContext<Node>,
) -> impl Future<Output = eyre::Result<Self::EVM>> + Send {
future::ready(Ok(CustomEvmConfig::new(ctx.chain_spec())))
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/custom-node/src/evm/alloy.rs | examples/custom-node/src/evm/alloy.rs | use crate::evm::{CustomTxEnv, PaymentTxEnv};
use alloy_evm::{precompiles::PrecompilesMap, Database, Evm, EvmEnv, EvmFactory};
use alloy_op_evm::{OpEvm, OpEvmFactory};
use alloy_primitives::{Address, Bytes};
use op_revm::{
precompiles::OpPrecompiles, L1BlockInfo, OpContext, OpHaltReason, OpSpecId, OpTransaction,
OpTransactionError,
};
use reth_ethereum::evm::revm::{
context::{result::ResultAndState, BlockEnv, CfgEnv},
handler::PrecompileProvider,
interpreter::InterpreterResult,
Context, Inspector, Journal,
};
use revm::{context_interface::result::EVMError, inspector::NoOpInspector};
use std::error::Error;
/// EVM context contains data that EVM needs for execution of [`CustomTxEnv`].
pub type CustomContext<DB> =
Context<BlockEnv, OpTransaction<PaymentTxEnv>, CfgEnv<OpSpecId>, DB, Journal<DB>, L1BlockInfo>;
pub struct CustomEvm<DB: Database, I, P = OpPrecompiles> {
inner: OpEvm<DB, I, P>,
}
impl<DB: Database, I, P> CustomEvm<DB, I, P> {
pub fn new(op: OpEvm<DB, I, P>) -> Self {
Self { inner: op }
}
}
impl<DB, I, P> Evm for CustomEvm<DB, I, P>
where
DB: Database,
I: Inspector<OpContext<DB>>,
P: PrecompileProvider<OpContext<DB>, Output = InterpreterResult>,
{
type DB = DB;
type Tx = CustomTxEnv;
type Error = EVMError<DB::Error, OpTransactionError>;
type HaltReason = OpHaltReason;
type Spec = OpSpecId;
type Precompiles = P;
type Inspector = I;
fn block(&self) -> &BlockEnv {
self.inner.block()
}
fn chain_id(&self) -> u64 {
self.inner.chain_id()
}
fn transact_raw(
&mut self,
tx: Self::Tx,
) -> Result<ResultAndState<Self::HaltReason>, Self::Error> {
match tx {
CustomTxEnv::Op(tx) => self.inner.transact_raw(tx),
CustomTxEnv::Payment(..) => todo!(),
}
}
fn transact_system_call(
&mut self,
caller: Address,
contract: Address,
data: Bytes,
) -> Result<ResultAndState<Self::HaltReason>, Self::Error> {
self.inner.transact_system_call(caller, contract, data)
}
fn finish(self) -> (Self::DB, EvmEnv<Self::Spec>) {
self.inner.finish()
}
fn set_inspector_enabled(&mut self, enabled: bool) {
self.inner.set_inspector_enabled(enabled)
}
fn components(&self) -> (&Self::DB, &Self::Inspector, &Self::Precompiles) {
self.inner.components()
}
fn components_mut(&mut self) -> (&mut Self::DB, &mut Self::Inspector, &mut Self::Precompiles) {
self.inner.components_mut()
}
}
#[derive(Default, Debug, Clone, Copy)]
pub struct CustomEvmFactory(pub OpEvmFactory);
impl CustomEvmFactory {
pub fn new() -> Self {
Self::default()
}
}
impl EvmFactory for CustomEvmFactory {
type Evm<DB: Database, I: Inspector<OpContext<DB>>> = CustomEvm<DB, I, Self::Precompiles>;
type Context<DB: Database> = OpContext<DB>;
type Tx = CustomTxEnv;
type Error<DBError: Error + Send + Sync + 'static> = EVMError<DBError, OpTransactionError>;
type HaltReason = OpHaltReason;
type Spec = OpSpecId;
type Precompiles = PrecompilesMap;
fn create_evm<DB: Database>(
&self,
db: DB,
input: EvmEnv<Self::Spec>,
) -> Self::Evm<DB, NoOpInspector> {
CustomEvm::new(self.0.create_evm(db, input))
}
fn create_evm_with_inspector<DB: Database, I: Inspector<Self::Context<DB>>>(
&self,
db: DB,
input: EvmEnv<Self::Spec>,
inspector: I,
) -> Self::Evm<DB, I> {
CustomEvm::new(self.0.create_evm_with_inspector(db, input, inspector))
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/custom-node/src/evm/env.rs | examples/custom-node/src/evm/env.rs | use crate::primitives::{CustomTransaction, TxPayment};
use alloy_eips::{eip2930::AccessList, Typed2718};
use alloy_evm::{FromRecoveredTx, FromTxWithEncoded, IntoTxEnv};
use alloy_primitives::{Address, Bytes, TxKind, B256, U256};
use op_alloy_consensus::OpTxEnvelope;
use op_revm::OpTransaction;
use reth_ethereum::evm::{primitives::TransactionEnv, revm::context::TxEnv};
/// An Optimism transaction extended by [`PaymentTxEnv`] that can be fed to [`Evm`].
///
/// [`Evm`]: alloy_evm::Evm
#[derive(Clone, Debug)]
pub enum CustomTxEnv {
Op(OpTransaction<TxEnv>),
Payment(PaymentTxEnv),
}
/// A transaction environment is a set of information related to an Ethereum transaction that can be
/// fed to [`Evm`] for execution.
///
/// [`Evm`]: alloy_evm::Evm
#[derive(Clone, Debug, Default)]
pub struct PaymentTxEnv(pub TxEnv);
impl revm::context::Transaction for CustomTxEnv {
type AccessListItem<'a>
= <TxEnv as revm::context::Transaction>::AccessListItem<'a>
where
Self: 'a;
type Authorization<'a>
= <TxEnv as revm::context::Transaction>::Authorization<'a>
where
Self: 'a;
fn tx_type(&self) -> u8 {
match self {
Self::Op(tx) => tx.tx_type(),
Self::Payment(tx) => tx.tx_type(),
}
}
fn caller(&self) -> Address {
match self {
Self::Op(tx) => tx.caller(),
Self::Payment(tx) => tx.caller(),
}
}
fn gas_limit(&self) -> u64 {
match self {
Self::Op(tx) => tx.gas_limit(),
Self::Payment(tx) => tx.gas_limit(),
}
}
fn value(&self) -> U256 {
match self {
Self::Op(tx) => tx.value(),
Self::Payment(tx) => tx.value(),
}
}
fn input(&self) -> &Bytes {
match self {
Self::Op(tx) => tx.input(),
Self::Payment(tx) => tx.input(),
}
}
fn nonce(&self) -> u64 {
match self {
Self::Op(tx) => revm::context::Transaction::nonce(tx),
Self::Payment(tx) => revm::context::Transaction::nonce(tx),
}
}
fn kind(&self) -> TxKind {
match self {
Self::Op(tx) => tx.kind(),
Self::Payment(tx) => tx.kind(),
}
}
fn chain_id(&self) -> Option<u64> {
match self {
Self::Op(tx) => tx.chain_id(),
Self::Payment(tx) => tx.chain_id(),
}
}
fn gas_price(&self) -> u128 {
match self {
Self::Op(tx) => tx.gas_price(),
Self::Payment(tx) => tx.gas_price(),
}
}
fn access_list(&self) -> Option<impl Iterator<Item = Self::AccessListItem<'_>>> {
Some(match self {
Self::Op(tx) => tx.base.access_list.iter(),
Self::Payment(tx) => tx.0.access_list.iter(),
})
}
fn blob_versioned_hashes(&self) -> &[B256] {
match self {
Self::Op(tx) => tx.blob_versioned_hashes(),
Self::Payment(tx) => tx.blob_versioned_hashes(),
}
}
fn max_fee_per_blob_gas(&self) -> u128 {
match self {
Self::Op(tx) => tx.max_fee_per_blob_gas(),
Self::Payment(tx) => tx.max_fee_per_blob_gas(),
}
}
fn authorization_list_len(&self) -> usize {
match self {
Self::Op(tx) => tx.authorization_list_len(),
Self::Payment(tx) => tx.authorization_list_len(),
}
}
fn authorization_list(&self) -> impl Iterator<Item = Self::Authorization<'_>> {
match self {
Self::Op(tx) => tx.base.authorization_list.iter(),
Self::Payment(tx) => tx.0.authorization_list.iter(),
}
}
fn max_priority_fee_per_gas(&self) -> Option<u128> {
match self {
Self::Op(tx) => tx.max_priority_fee_per_gas(),
Self::Payment(tx) => tx.max_priority_fee_per_gas(),
}
}
}
impl revm::context::Transaction for PaymentTxEnv {
type AccessListItem<'a>
= <TxEnv as revm::context::Transaction>::AccessListItem<'a>
where
Self: 'a;
type Authorization<'a>
= <TxEnv as revm::context::Transaction>::Authorization<'a>
where
Self: 'a;
fn tx_type(&self) -> u8 {
self.0.tx_type()
}
fn caller(&self) -> Address {
self.0.caller()
}
fn gas_limit(&self) -> u64 {
self.0.gas_limit()
}
fn value(&self) -> U256 {
self.0.value()
}
fn input(&self) -> &Bytes {
self.0.input()
}
fn nonce(&self) -> u64 {
revm::context::Transaction::nonce(&self.0)
}
fn kind(&self) -> TxKind {
self.0.kind()
}
fn chain_id(&self) -> Option<u64> {
self.0.chain_id()
}
fn gas_price(&self) -> u128 {
self.0.gas_price()
}
fn access_list(&self) -> Option<impl Iterator<Item = Self::AccessListItem<'_>>> {
self.0.access_list()
}
fn blob_versioned_hashes(&self) -> &[B256] {
self.0.blob_versioned_hashes()
}
fn max_fee_per_blob_gas(&self) -> u128 {
self.0.max_fee_per_blob_gas()
}
fn authorization_list_len(&self) -> usize {
self.0.authorization_list_len()
}
fn authorization_list(&self) -> impl Iterator<Item = Self::Authorization<'_>> {
self.0.authorization_list()
}
fn max_priority_fee_per_gas(&self) -> Option<u128> {
self.0.max_priority_fee_per_gas()
}
}
impl TransactionEnv for PaymentTxEnv {
fn set_gas_limit(&mut self, gas_limit: u64) {
self.0.set_gas_limit(gas_limit);
}
fn nonce(&self) -> u64 {
self.0.nonce()
}
fn set_nonce(&mut self, nonce: u64) {
self.0.set_nonce(nonce);
}
fn set_access_list(&mut self, access_list: AccessList) {
self.0.set_access_list(access_list);
}
}
impl TransactionEnv for CustomTxEnv {
fn set_gas_limit(&mut self, gas_limit: u64) {
match self {
Self::Op(tx) => tx.set_gas_limit(gas_limit),
Self::Payment(tx) => tx.set_gas_limit(gas_limit),
}
}
fn nonce(&self) -> u64 {
match self {
Self::Op(tx) => tx.nonce(),
Self::Payment(tx) => tx.nonce(),
}
}
fn set_nonce(&mut self, nonce: u64) {
match self {
Self::Op(tx) => tx.set_nonce(nonce),
Self::Payment(tx) => tx.set_nonce(nonce),
}
}
fn set_access_list(&mut self, access_list: AccessList) {
match self {
Self::Op(tx) => tx.set_access_list(access_list),
Self::Payment(tx) => tx.set_access_list(access_list),
}
}
}
impl FromRecoveredTx<TxPayment> for TxEnv {
fn from_recovered_tx(tx: &TxPayment, caller: Address) -> Self {
let TxPayment {
chain_id,
nonce,
gas_limit,
max_fee_per_gas,
max_priority_fee_per_gas,
to,
value,
} = tx;
Self {
tx_type: tx.ty(),
caller,
gas_limit: *gas_limit,
gas_price: *max_fee_per_gas,
gas_priority_fee: Some(*max_priority_fee_per_gas),
kind: TxKind::Call(*to),
value: *value,
nonce: *nonce,
chain_id: Some(*chain_id),
..Default::default()
}
}
}
impl FromTxWithEncoded<TxPayment> for TxEnv {
fn from_encoded_tx(tx: &TxPayment, sender: Address, _encoded: Bytes) -> Self {
Self::from_recovered_tx(tx, sender)
}
}
impl FromRecoveredTx<OpTxEnvelope> for CustomTxEnv {
fn from_recovered_tx(tx: &OpTxEnvelope, sender: Address) -> Self {
Self::Op(OpTransaction::from_recovered_tx(tx, sender))
}
}
impl FromTxWithEncoded<OpTxEnvelope> for CustomTxEnv {
fn from_encoded_tx(tx: &OpTxEnvelope, sender: Address, encoded: Bytes) -> Self {
Self::Op(OpTransaction::from_encoded_tx(tx, sender, encoded))
}
}
impl FromRecoveredTx<CustomTransaction> for CustomTxEnv {
fn from_recovered_tx(tx: &CustomTransaction, sender: Address) -> Self {
match tx {
CustomTransaction::Op(tx) => Self::from_recovered_tx(tx, sender),
CustomTransaction::Payment(tx) => {
Self::Payment(PaymentTxEnv(TxEnv::from_recovered_tx(tx.tx(), sender)))
}
}
}
}
impl FromTxWithEncoded<CustomTransaction> for CustomTxEnv {
fn from_encoded_tx(tx: &CustomTransaction, sender: Address, encoded: Bytes) -> Self {
match tx {
CustomTransaction::Op(tx) => Self::from_encoded_tx(tx, sender, encoded),
CustomTransaction::Payment(tx) => {
Self::Payment(PaymentTxEnv(TxEnv::from_encoded_tx(tx.tx(), sender, encoded)))
}
}
}
}
impl IntoTxEnv<Self> for CustomTxEnv {
fn into_tx_env(self) -> Self {
self
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/custom-node/src/evm/executor.rs | examples/custom-node/src/evm/executor.rs | use crate::{
evm::{
alloy::{CustomEvm, CustomEvmFactory},
CustomEvmConfig, CustomTxEnv,
},
primitives::CustomTransaction,
};
use alloy_consensus::transaction::Recovered;
use alloy_evm::{
block::{
BlockExecutionError, BlockExecutionResult, BlockExecutor, BlockExecutorFactory,
BlockExecutorFor, CommitChanges, ExecutableTx, OnStateHook,
},
precompiles::PrecompilesMap,
Database, Evm,
};
use alloy_op_evm::{OpBlockExecutionCtx, OpBlockExecutor};
use reth_ethereum::evm::primitives::InspectorFor;
use reth_op::{chainspec::OpChainSpec, node::OpRethReceiptBuilder, OpReceipt};
use revm::{context::result::ExecutionResult, database::State};
use std::sync::Arc;
pub struct CustomBlockExecutor<Evm> {
inner: OpBlockExecutor<Evm, OpRethReceiptBuilder, Arc<OpChainSpec>>,
}
impl<'db, DB, E> BlockExecutor for CustomBlockExecutor<E>
where
DB: Database + 'db,
E: Evm<DB = &'db mut State<DB>, Tx = CustomTxEnv>,
{
type Transaction = CustomTransaction;
type Receipt = OpReceipt;
type Evm = E;
fn apply_pre_execution_changes(&mut self) -> Result<(), BlockExecutionError> {
self.inner.apply_pre_execution_changes()
}
fn execute_transaction_with_commit_condition(
&mut self,
tx: impl ExecutableTx<Self>,
f: impl FnOnce(&ExecutionResult<<Self::Evm as Evm>::HaltReason>) -> CommitChanges,
) -> Result<Option<u64>, BlockExecutionError> {
match tx.tx() {
CustomTransaction::Op(op_tx) => self.inner.execute_transaction_with_commit_condition(
Recovered::new_unchecked(op_tx, *tx.signer()),
f,
),
CustomTransaction::Payment(..) => todo!(),
}
}
fn finish(self) -> Result<(Self::Evm, BlockExecutionResult<OpReceipt>), BlockExecutionError> {
self.inner.finish()
}
fn set_state_hook(&mut self, _hook: Option<Box<dyn OnStateHook>>) {
self.inner.set_state_hook(_hook)
}
fn evm_mut(&mut self) -> &mut Self::Evm {
self.inner.evm_mut()
}
fn evm(&self) -> &Self::Evm {
self.inner.evm()
}
}
impl BlockExecutorFactory for CustomEvmConfig {
type EvmFactory = CustomEvmFactory;
type ExecutionCtx<'a> = CustomBlockExecutionCtx;
type Transaction = CustomTransaction;
type Receipt = OpReceipt;
fn evm_factory(&self) -> &Self::EvmFactory {
&self.custom_evm_factory
}
fn create_executor<'a, DB, I>(
&'a self,
evm: CustomEvm<&'a mut State<DB>, I, PrecompilesMap>,
ctx: CustomBlockExecutionCtx,
) -> impl BlockExecutorFor<'a, Self, DB, I>
where
DB: Database + 'a,
I: InspectorFor<Self, &'a mut State<DB>> + 'a,
{
CustomBlockExecutor {
inner: OpBlockExecutor::new(
evm,
ctx.inner,
self.inner.chain_spec().clone(),
*self.inner.executor_factory.receipt_builder(),
),
}
}
}
/// Additional parameters for executing custom transactions.
#[derive(Debug, Clone)]
pub struct CustomBlockExecutionCtx {
pub inner: OpBlockExecutionCtx,
pub extension: u64,
}
impl From<CustomBlockExecutionCtx> for OpBlockExecutionCtx {
fn from(value: CustomBlockExecutionCtx) -> Self {
value.inner
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/custom-node/src/evm/mod.rs | examples/custom-node/src/evm/mod.rs | mod alloy;
mod assembler;
mod builder;
mod config;
mod env;
mod executor;
pub use alloy::{CustomContext, CustomEvm};
pub use assembler::CustomBlockAssembler;
pub use builder::CustomExecutorBuilder;
pub use config::CustomEvmConfig;
pub use env::{CustomTxEnv, PaymentTxEnv};
pub use executor::CustomBlockExecutor;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/custom-node/src/primitives/tx.rs | examples/custom-node/src/primitives/tx.rs | use super::TxPayment;
use alloy_consensus::{
crypto::RecoveryError, transaction::SignerRecoverable, Signed, TransactionEnvelope,
};
use alloy_eips::Encodable2718;
use alloy_primitives::{Sealed, Signature, B256};
use alloy_rlp::BufMut;
use op_alloy_consensus::{OpTxEnvelope, TxDeposit};
use reth_codecs::{
alloy::transaction::{CompactEnvelope, FromTxCompact, ToTxCompact},
Compact,
};
use reth_ethereum::primitives::{serde_bincode_compat::RlpBincode, InMemorySize};
use reth_op::{primitives::SignedTransaction, OpTransaction};
use revm_primitives::Address;
/// Either [`OpTxEnvelope`] or [`TxPayment`].
#[derive(Debug, Clone, TransactionEnvelope)]
#[envelope(tx_type_name = TxTypeCustom)]
pub enum CustomTransaction {
/// A regular Optimism transaction as defined by [`OpTxEnvelope`].
#[envelope(flatten)]
Op(OpTxEnvelope),
/// A [`TxPayment`] tagged with type 0x7E.
#[envelope(ty = 42)]
Payment(Signed<TxPayment>),
}
impl RlpBincode for CustomTransaction {}
impl reth_codecs::alloy::transaction::Envelope for CustomTransaction {
fn signature(&self) -> &Signature {
match self {
CustomTransaction::Op(tx) => tx.signature(),
CustomTransaction::Payment(tx) => tx.signature(),
}
}
fn tx_type(&self) -> Self::TxType {
match self {
CustomTransaction::Op(tx) => TxTypeCustom::Op(tx.tx_type()),
CustomTransaction::Payment(_) => TxTypeCustom::Payment,
}
}
}
impl FromTxCompact for CustomTransaction {
type TxType = TxTypeCustom;
fn from_tx_compact(buf: &[u8], tx_type: Self::TxType, signature: Signature) -> (Self, &[u8])
where
Self: Sized,
{
match tx_type {
TxTypeCustom::Op(tx_type) => {
let (tx, buf) = OpTxEnvelope::from_tx_compact(buf, tx_type, signature);
(Self::Op(tx), buf)
}
TxTypeCustom::Payment => {
let (tx, buf) = TxPayment::from_compact(buf, buf.len());
let tx = Signed::new_unhashed(tx, signature);
(Self::Payment(tx), buf)
}
}
}
}
impl ToTxCompact for CustomTransaction {
fn to_tx_compact(&self, buf: &mut (impl BufMut + AsMut<[u8]>)) {
match self {
CustomTransaction::Op(tx) => tx.to_tx_compact(buf),
CustomTransaction::Payment(tx) => {
tx.tx().to_compact(buf);
}
}
}
}
impl Compact for CustomTransaction {
fn to_compact<B>(&self, buf: &mut B) -> usize
where
B: BufMut + AsMut<[u8]>,
{
<Self as CompactEnvelope>::to_compact(self, buf)
}
fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) {
<Self as CompactEnvelope>::from_compact(buf, len)
}
}
impl OpTransaction for CustomTransaction {
fn is_deposit(&self) -> bool {
match self {
CustomTransaction::Op(op) => op.is_deposit(),
CustomTransaction::Payment(_) => false,
}
}
fn as_deposit(&self) -> Option<&Sealed<TxDeposit>> {
match self {
CustomTransaction::Op(op) => op.as_deposit(),
CustomTransaction::Payment(_) => None,
}
}
}
impl SignerRecoverable for CustomTransaction {
fn recover_signer(&self) -> Result<Address, RecoveryError> {
match self {
CustomTransaction::Op(tx) => SignerRecoverable::recover_signer(tx),
CustomTransaction::Payment(tx) => SignerRecoverable::recover_signer(tx),
}
}
fn recover_signer_unchecked(&self) -> Result<Address, RecoveryError> {
match self {
CustomTransaction::Op(tx) => SignerRecoverable::recover_signer_unchecked(tx),
CustomTransaction::Payment(tx) => SignerRecoverable::recover_signer_unchecked(tx),
}
}
}
impl SignedTransaction for CustomTransaction {
fn tx_hash(&self) -> &B256 {
match self {
CustomTransaction::Op(tx) => SignedTransaction::tx_hash(tx),
CustomTransaction::Payment(tx) => tx.hash(),
}
}
}
impl InMemorySize for CustomTransaction {
fn size(&self) -> usize {
match self {
CustomTransaction::Op(tx) => InMemorySize::size(tx),
CustomTransaction::Payment(tx) => InMemorySize::size(tx),
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/custom-node/src/primitives/block.rs | examples/custom-node/src/primitives/block.rs | use crate::primitives::{CustomHeader, CustomTransaction};
/// The Block type of this node
pub type Block = alloy_consensus::Block<CustomTransaction, CustomHeader>;
/// The body type of this node
pub type BlockBody = alloy_consensus::BlockBody<CustomTransaction, CustomHeader>;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/custom-node/src/primitives/header.rs | examples/custom-node/src/primitives/header.rs | use alloy_consensus::Header;
use alloy_primitives::{Address, BlockNumber, Bloom, Bytes, Sealable, B256, B64, U256};
use alloy_rlp::{Encodable, RlpDecodable, RlpEncodable};
use reth_codecs::Compact;
use reth_ethereum::primitives::{serde_bincode_compat::RlpBincode, BlockHeader, InMemorySize};
use revm_primitives::keccak256;
use serde::{Deserialize, Serialize};
/// The header type of this node
///
/// This type extends the regular ethereum header with an extension.
#[derive(
Clone,
Debug,
PartialEq,
Eq,
Hash,
derive_more::AsRef,
derive_more::Deref,
Default,
RlpEncodable,
RlpDecodable,
Serialize,
Deserialize,
)]
#[serde(rename_all = "camelCase")]
pub struct CustomHeader {
/// The regular eth header
#[as_ref]
#[deref]
#[serde(flatten)]
pub inner: Header,
/// The extended header
pub extension: u64,
}
impl From<Header> for CustomHeader {
fn from(value: Header) -> Self {
CustomHeader { inner: value, extension: 0 }
}
}
impl AsRef<Self> for CustomHeader {
fn as_ref(&self) -> &Self {
self
}
}
impl Sealable for CustomHeader {
fn hash_slow(&self) -> B256 {
let mut out = Vec::new();
self.encode(&mut out);
keccak256(&out)
}
}
impl alloy_consensus::BlockHeader for CustomHeader {
fn parent_hash(&self) -> B256 {
self.inner.parent_hash()
}
fn ommers_hash(&self) -> B256 {
self.inner.ommers_hash()
}
fn beneficiary(&self) -> Address {
self.inner.beneficiary()
}
fn state_root(&self) -> B256 {
self.inner.state_root()
}
fn transactions_root(&self) -> B256 {
self.inner.transactions_root()
}
fn receipts_root(&self) -> B256 {
self.inner.receipts_root()
}
fn withdrawals_root(&self) -> Option<B256> {
self.inner.withdrawals_root()
}
fn logs_bloom(&self) -> Bloom {
self.inner.logs_bloom()
}
fn difficulty(&self) -> U256 {
self.inner.difficulty()
}
fn number(&self) -> BlockNumber {
self.inner.number()
}
fn gas_limit(&self) -> u64 {
self.inner.gas_limit()
}
fn gas_used(&self) -> u64 {
self.inner.gas_used()
}
fn timestamp(&self) -> u64 {
self.inner.timestamp()
}
fn mix_hash(&self) -> Option<B256> {
self.inner.mix_hash()
}
fn nonce(&self) -> Option<B64> {
self.inner.nonce()
}
fn base_fee_per_gas(&self) -> Option<u64> {
self.inner.base_fee_per_gas()
}
fn blob_gas_used(&self) -> Option<u64> {
self.inner.blob_gas_used()
}
fn excess_blob_gas(&self) -> Option<u64> {
self.inner.excess_blob_gas()
}
fn parent_beacon_block_root(&self) -> Option<B256> {
self.inner.parent_beacon_block_root()
}
fn requests_hash(&self) -> Option<B256> {
self.inner.requests_hash()
}
fn extra_data(&self) -> &Bytes {
self.inner.extra_data()
}
}
impl InMemorySize for CustomHeader {
fn size(&self) -> usize {
self.inner.size() + self.extension.size()
}
}
impl reth_codecs::Compact for CustomHeader {
fn to_compact<B>(&self, buf: &mut B) -> usize
where
B: alloy_rlp::bytes::BufMut + AsMut<[u8]>,
{
let identifier = self.inner.to_compact(buf);
self.extension.to_compact(buf);
identifier
}
fn from_compact(buf: &[u8], identifier: usize) -> (Self, &[u8]) {
let (eth_header, buf) = Compact::from_compact(buf, identifier);
let (extension, buf) = Compact::from_compact(buf, buf.len());
(Self { inner: eth_header, extension }, buf)
}
}
impl reth_db_api::table::Compress for CustomHeader {
type Compressed = Vec<u8>;
fn compress_to_buf<B: alloy_primitives::bytes::BufMut + AsMut<[u8]>>(&self, buf: &mut B) {
let _ = Compact::to_compact(self, buf);
}
}
impl reth_db_api::table::Decompress for CustomHeader {
fn decompress(value: &[u8]) -> Result<Self, reth_db_api::DatabaseError> {
let (obj, _) = Compact::from_compact(value, value.len());
Ok(obj)
}
}
impl BlockHeader for CustomHeader {}
impl RlpBincode for CustomHeader {}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/custom-node/src/primitives/tx_custom.rs | examples/custom-node/src/primitives/tx_custom.rs | use crate::primitives::PAYMENT_TX_TYPE_ID;
use alloy_consensus::{
transaction::{RlpEcdsaDecodableTx, RlpEcdsaEncodableTx},
SignableTransaction, Transaction,
};
use alloy_eips::{eip2930::AccessList, eip7702::SignedAuthorization, Typed2718};
use alloy_primitives::{Address, Bytes, ChainId, Signature, TxKind, B256, U256};
use alloy_rlp::{BufMut, Decodable, Encodable};
use core::mem;
use reth_ethereum::primitives::{serde_bincode_compat::RlpBincode, InMemorySize};
/// A transaction with a priority fee ([EIP-1559](https://eips.ethereum.org/EIPS/eip-1559)).
#[derive(
Clone,
Debug,
Default,
PartialEq,
Eq,
Hash,
serde::Serialize,
serde::Deserialize,
reth_codecs::Compact,
)]
#[serde(rename_all = "camelCase")]
#[doc(alias = "PaymentTransaction", alias = "TransactionPayment", alias = "PaymentTx")]
pub struct TxPayment {
/// EIP-155: Simple replay attack protection
#[serde(with = "alloy_serde::quantity")]
pub chain_id: ChainId,
/// A scalar value equal to the number of transactions sent by the sender; formally Tn.
#[serde(with = "alloy_serde::quantity")]
pub nonce: u64,
/// A scalar value equal to the maximum
/// amount of gas that should be used in executing
/// this transaction. This is paid up-front, before any
/// computation is done and may not be increased
/// later; formally Tg.
#[serde(with = "alloy_serde::quantity", rename = "gas", alias = "gasLimit")]
pub gas_limit: u64,
/// A scalar value equal to the maximum
/// amount of gas that should be used in executing
/// this transaction. This is paid up-front, before any
/// computation is done and may not be increased
/// later; formally Tg.
///
/// As ethereum circulation is around 120mil eth as of 2022 that is around
/// 120000000000000000000000000 wei we are safe to use u128 as its max number is:
/// 340282366920938463463374607431768211455
///
/// This is also known as `GasFeeCap`
#[serde(with = "alloy_serde::quantity")]
pub max_fee_per_gas: u128,
/// Max Priority fee that transaction is paying
///
/// As ethereum circulation is around 120mil eth as of 2022 that is around
/// 120000000000000000000000000 wei we are safe to use u128 as its max number is:
/// 340282366920938463463374607431768211455
///
/// This is also known as `GasTipCap`
#[serde(with = "alloy_serde::quantity")]
pub max_priority_fee_per_gas: u128,
/// The 160-bit address of the message call’s recipient.
pub to: Address,
/// A scalar value equal to the number of Wei to
/// be transferred to the message call’s recipient or,
/// in the case of contract creation, as an endowment
/// to the newly created account; formally Tv.
pub value: U256,
}
impl TxPayment {
/// Get the transaction type
#[doc(alias = "transaction_type")]
pub const fn tx_type() -> super::tx::TxTypeCustom {
super::tx::TxTypeCustom::Payment
}
/// Calculates a heuristic for the in-memory size of the [TxPayment]
/// transaction.
#[inline]
pub fn size(&self) -> usize {
mem::size_of::<ChainId>() + // chain_id
mem::size_of::<u64>() + // nonce
mem::size_of::<u64>() + // gas_limit
mem::size_of::<u128>() + // max_fee_per_gas
mem::size_of::<u128>() + // max_priority_fee_per_gas
mem::size_of::<Address>() + // to
mem::size_of::<U256>() // value
}
}
impl RlpEcdsaEncodableTx for TxPayment {
/// Outputs the length of the transaction's fields, without a RLP header.
fn rlp_encoded_fields_length(&self) -> usize {
self.chain_id.length() +
self.nonce.length() +
self.max_priority_fee_per_gas.length() +
self.max_fee_per_gas.length() +
self.gas_limit.length() +
self.to.length() +
self.value.length()
}
/// Encodes only the transaction's fields into the desired buffer, without
/// a RLP header.
fn rlp_encode_fields(&self, out: &mut dyn alloy_rlp::BufMut) {
self.chain_id.encode(out);
self.nonce.encode(out);
self.max_priority_fee_per_gas.encode(out);
self.max_fee_per_gas.encode(out);
self.gas_limit.encode(out);
self.to.encode(out);
self.value.encode(out);
}
}
impl RlpEcdsaDecodableTx for TxPayment {
const DEFAULT_TX_TYPE: u8 = { PAYMENT_TX_TYPE_ID };
/// Decodes the inner [TxPayment] fields from RLP bytes.
///
/// NOTE: This assumes a RLP header has already been decoded, and _just_
/// decodes the following RLP fields in the following order:
///
/// - `chain_id`
/// - `nonce`
/// - `max_priority_fee_per_gas`
/// - `max_fee_per_gas`
/// - `gas_limit`
/// - `to`
/// - `value`
/// - `data` (`input`)
/// - `access_list`
fn rlp_decode_fields(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
Ok(Self {
chain_id: Decodable::decode(buf)?,
nonce: Decodable::decode(buf)?,
max_priority_fee_per_gas: Decodable::decode(buf)?,
max_fee_per_gas: Decodable::decode(buf)?,
gas_limit: Decodable::decode(buf)?,
to: Decodable::decode(buf)?,
value: Decodable::decode(buf)?,
})
}
}
impl Transaction for TxPayment {
#[inline]
fn chain_id(&self) -> Option<ChainId> {
Some(self.chain_id)
}
#[inline]
fn nonce(&self) -> u64 {
self.nonce
}
#[inline]
fn gas_limit(&self) -> u64 {
self.gas_limit
}
#[inline]
fn gas_price(&self) -> Option<u128> {
None
}
#[inline]
fn max_fee_per_gas(&self) -> u128 {
self.max_fee_per_gas
}
#[inline]
fn max_priority_fee_per_gas(&self) -> Option<u128> {
Some(self.max_priority_fee_per_gas)
}
#[inline]
fn max_fee_per_blob_gas(&self) -> Option<u128> {
None
}
#[inline]
fn priority_fee_or_price(&self) -> u128 {
self.max_priority_fee_per_gas
}
fn effective_gas_price(&self, base_fee: Option<u64>) -> u128 {
base_fee.map_or(self.max_fee_per_gas, |base_fee| {
// if the tip is greater than the max priority fee per gas, set it to the max
// priority fee per gas + base fee
let tip = self.max_fee_per_gas.saturating_sub(base_fee as u128);
if tip > self.max_priority_fee_per_gas {
self.max_priority_fee_per_gas + base_fee as u128
} else {
// otherwise return the max fee per gas
self.max_fee_per_gas
}
})
}
#[inline]
fn is_dynamic_fee(&self) -> bool {
true
}
#[inline]
fn kind(&self) -> TxKind {
TxKind::Call(self.to)
}
#[inline]
fn is_create(&self) -> bool {
false
}
#[inline]
fn value(&self) -> U256 {
self.value
}
#[inline]
fn input(&self) -> &Bytes {
// No input data
static EMPTY_BYTES: Bytes = Bytes::new();
&EMPTY_BYTES
}
#[inline]
fn access_list(&self) -> Option<&AccessList> {
None
}
#[inline]
fn blob_versioned_hashes(&self) -> Option<&[B256]> {
None
}
#[inline]
fn authorization_list(&self) -> Option<&[SignedAuthorization]> {
None
}
}
impl Typed2718 for TxPayment {
fn ty(&self) -> u8 {
PAYMENT_TX_TYPE_ID
}
}
impl SignableTransaction<Signature> for TxPayment {
fn set_chain_id(&mut self, chain_id: ChainId) {
self.chain_id = chain_id;
}
fn encode_for_signing(&self, out: &mut dyn alloy_rlp::BufMut) {
out.put_u8(Self::tx_type().ty());
self.encode(out)
}
fn payload_len_for_signature(&self) -> usize {
self.length() + 1
}
}
impl Encodable for TxPayment {
fn encode(&self, out: &mut dyn BufMut) {
self.rlp_encode(out);
}
fn length(&self) -> usize {
self.rlp_encoded_length()
}
}
impl Decodable for TxPayment {
fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
Self::rlp_decode(buf)
}
}
impl InMemorySize for TxPayment {
fn size(&self) -> usize {
TxPayment::size(self)
}
}
impl RlpBincode for TxPayment {}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/custom-node/src/primitives/mod.rs | examples/custom-node/src/primitives/mod.rs | //! Contains the primitive types of this node.
pub mod header;
pub use header::*;
pub mod block;
pub use block::*;
pub mod tx;
pub use tx::*;
pub mod tx_type;
pub use tx_type::*;
pub mod tx_custom;
pub use tx_custom::*;
use reth_ethereum::primitives::NodePrimitives;
use reth_op::OpReceipt;
#[derive(Debug, Clone, Default, PartialEq, Eq)]
pub struct CustomNodePrimitives;
impl NodePrimitives for CustomNodePrimitives {
type Block = Block;
type BlockHeader = CustomHeader;
type BlockBody = BlockBody;
type SignedTx = CustomTransaction;
type Receipt = OpReceipt;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/custom-node/src/primitives/tx_type.rs | examples/custom-node/src/primitives/tx_type.rs | use crate::primitives::TxTypeCustom;
use alloy_primitives::bytes::{Buf, BufMut};
use reth_codecs::{txtype::COMPACT_EXTENDED_IDENTIFIER_FLAG, Compact};
pub const PAYMENT_TX_TYPE_ID: u8 = 42;
impl Compact for TxTypeCustom {
fn to_compact<B>(&self, buf: &mut B) -> usize
where
B: BufMut + AsMut<[u8]>,
{
match self {
Self::Op(ty) => ty.to_compact(buf),
Self::Payment => {
buf.put_u8(PAYMENT_TX_TYPE_ID);
COMPACT_EXTENDED_IDENTIFIER_FLAG
}
}
}
fn from_compact(mut buf: &[u8], identifier: usize) -> (Self, &[u8]) {
match identifier {
COMPACT_EXTENDED_IDENTIFIER_FLAG => (
{
let extended_identifier = buf.get_u8();
match extended_identifier {
PAYMENT_TX_TYPE_ID => Self::Payment,
_ => panic!("Unsupported TxType identifier: {extended_identifier}"),
}
},
buf,
),
v => Self::from_compact(buf, v),
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/bsc-p2p/src/lib.rs | examples/bsc-p2p/src/lib.rs | pub mod block_import;
pub mod chainspec;
pub mod handshake;
pub mod upgrade_status;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/bsc-p2p/src/upgrade_status.rs | examples/bsc-p2p/src/upgrade_status.rs | //! Implement BSC upgrade message which is required during handshake with other BSC clients, e.g.,
//! geth.
use alloy_rlp::{Decodable, Encodable, RlpDecodable, RlpEncodable};
use bytes::{Buf, BufMut, Bytes, BytesMut};
/// The message id for the upgrade status message, used in the BSC handshake.
const UPGRADE_STATUS_MESSAGE_ID: u8 = 0x0b;
/// UpdateStatus packet introduced in BSC to notify peers whether to broadcast transaction or not.
/// It is used during the p2p handshake.
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct UpgradeStatus {
/// Extension for support customized features for BSC.
pub extension: UpgradeStatusExtension,
}
impl Encodable for UpgradeStatus {
fn encode(&self, out: &mut dyn BufMut) {
UPGRADE_STATUS_MESSAGE_ID.encode(out);
self.extension.encode(out);
}
}
impl Decodable for UpgradeStatus {
fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
let message_id = u8::decode(buf)?;
if message_id != UPGRADE_STATUS_MESSAGE_ID {
return Err(alloy_rlp::Error::Custom("Invalid message ID"));
}
buf.advance(1);
let extension = UpgradeStatusExtension::decode(buf)?;
Ok(Self { extension })
}
}
impl UpgradeStatus {
/// Encode the upgrade status message into RLPx bytes.
pub fn into_rlpx(self) -> Bytes {
let mut out = BytesMut::new();
self.encode(&mut out);
out.freeze()
}
}
/// The extension to define whether to enable or disable the flag.
/// This flag currently is ignored, and will be supported later.
#[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct UpgradeStatusExtension {
// TODO: support disable_peer_tx_broadcast flag
/// To notify a peer to disable the broadcast of transactions or not.
pub disable_peer_tx_broadcast: bool,
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/bsc-p2p/src/handshake.rs | examples/bsc-p2p/src/handshake.rs | use crate::upgrade_status::{UpgradeStatus, UpgradeStatusExtension};
use alloy_rlp::Decodable;
use futures::SinkExt;
use reth_eth_wire::{
errors::{EthHandshakeError, EthStreamError},
handshake::{EthRlpxHandshake, EthereumEthHandshake, UnauthEth},
UnifiedStatus,
};
use reth_eth_wire_types::{DisconnectReason, EthVersion};
use reth_ethereum_forks::ForkFilter;
use std::{future::Future, pin::Pin};
use tokio::time::{timeout, Duration};
use tokio_stream::StreamExt;
use tracing::debug;
#[derive(Debug, Default)]
/// The Binance Smart Chain (BSC) P2P handshake.
#[non_exhaustive]
pub struct BscHandshake;
impl BscHandshake {
/// Negotiate the upgrade status message.
pub async fn upgrade_status(
unauth: &mut dyn UnauthEth,
negotiated_status: UnifiedStatus,
) -> Result<UnifiedStatus, EthStreamError> {
if negotiated_status.version > EthVersion::Eth66 {
// Send upgrade status message allowing peer to broadcast transactions
let upgrade_msg = UpgradeStatus {
extension: UpgradeStatusExtension { disable_peer_tx_broadcast: false },
};
unauth.start_send_unpin(upgrade_msg.into_rlpx())?;
// Receive peer's upgrade status response
let their_msg = match unauth.next().await {
Some(Ok(msg)) => msg,
Some(Err(e)) => return Err(EthStreamError::from(e)),
None => {
unauth.disconnect(DisconnectReason::DisconnectRequested).await?;
return Err(EthStreamError::EthHandshakeError(EthHandshakeError::NoResponse));
}
};
// Decode their response
match UpgradeStatus::decode(&mut their_msg.as_ref()).map_err(|e| {
debug!("Decode error in BSC handshake: msg={their_msg:x}");
EthStreamError::InvalidMessage(e.into())
}) {
Ok(_) => {
// Successful handshake
return Ok(negotiated_status);
}
Err(_) => {
unauth.disconnect(DisconnectReason::ProtocolBreach).await?;
return Err(EthStreamError::EthHandshakeError(
EthHandshakeError::NonStatusMessageInHandshake,
));
}
}
}
Ok(negotiated_status)
}
}
impl EthRlpxHandshake for BscHandshake {
fn handshake<'a>(
&'a self,
unauth: &'a mut dyn UnauthEth,
status: UnifiedStatus,
fork_filter: ForkFilter,
timeout_limit: Duration,
) -> Pin<Box<dyn Future<Output = Result<UnifiedStatus, EthStreamError>> + 'a + Send>> {
Box::pin(async move {
let fut = async {
let negotiated_status =
EthereumEthHandshake(unauth).eth_handshake(status, fork_filter).await?;
Self::upgrade_status(unauth, negotiated_status).await
};
timeout(timeout_limit, fut).await.map_err(|_| EthStreamError::StreamTimeout)?
})
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/bsc-p2p/src/chainspec.rs | examples/bsc-p2p/src/chainspec.rs | //! Chain specification for BSC, credits to: <https://github.com/bnb-chain/reth/blob/main/crates/bsc/chainspec/src/bsc.rs>
use alloy_primitives::{BlockHash, U256};
use reth_chainspec::{
hardfork, make_genesis_header, BaseFeeParams, BaseFeeParamsKind, Chain, ChainHardforks,
ChainSpec, EthereumHardfork, ForkCondition, Hardfork, Head, NamedChain,
};
use reth_network_peers::NodeRecord;
use reth_primitives::SealedHeader;
use std::{str::FromStr, sync::Arc};
hardfork!(
/// The name of a bsc hardfork.
///
/// When building a list of hardforks for a chain, it's still expected to mix with [`EthereumHardfork`].
BscHardfork {
/// BSC `Ramanujan` hardfork
Ramanujan,
/// BSC `Niels` hardfork
Niels,
/// BSC `MirrorSync` hardfork
MirrorSync,
/// BSC `Bruno` hardfork
Bruno,
/// BSC `Euler` hardfork
Euler,
/// BSC `Nano` hardfork
Nano,
/// BSC `Moran` hardfork
Moran,
/// BSC `Gibbs` hardfork
Gibbs,
/// BSC `Planck` hardfork
Planck,
/// BSC `Luban` hardfork
Luban,
/// BSC `Plato` hardfork
Plato,
/// BSC `Hertz` hardfork
Hertz,
/// BSC `HertzFix` hardfork
HertzFix,
/// BSC `Kepler` hardfork
Kepler,
/// BSC `Feynman` hardfork
Feynman,
/// BSC `FeynmanFix` hardfork
FeynmanFix,
/// BSC `Haber` hardfork
Haber,
/// BSC `HaberFix` hardfork
HaberFix,
/// BSC `Bohr` hardfork
Bohr,
/// BSC `Pascal` hardfork
Pascal,
/// BSC `Prague` hardfork
Prague,
}
);
impl BscHardfork {
/// Bsc mainnet list of hardforks.
fn bsc_mainnet() -> ChainHardforks {
ChainHardforks::new(vec![
(EthereumHardfork::Frontier.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Homestead.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Tangerine.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::SpuriousDragon.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Byzantium.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Constantinople.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Petersburg.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::Istanbul.boxed(), ForkCondition::Block(0)),
(EthereumHardfork::MuirGlacier.boxed(), ForkCondition::Block(0)),
(Self::Ramanujan.boxed(), ForkCondition::Block(0)),
(Self::Niels.boxed(), ForkCondition::Block(0)),
(Self::MirrorSync.boxed(), ForkCondition::Block(5184000)),
(Self::Bruno.boxed(), ForkCondition::Block(13082000)),
(Self::Euler.boxed(), ForkCondition::Block(18907621)),
(Self::Nano.boxed(), ForkCondition::Block(21962149)),
(Self::Moran.boxed(), ForkCondition::Block(22107423)),
(Self::Gibbs.boxed(), ForkCondition::Block(23846001)),
(Self::Planck.boxed(), ForkCondition::Block(27281024)),
(Self::Luban.boxed(), ForkCondition::Block(29020050)),
(Self::Plato.boxed(), ForkCondition::Block(30720096)),
(EthereumHardfork::Berlin.boxed(), ForkCondition::Block(31302048)),
(EthereumHardfork::London.boxed(), ForkCondition::Block(31302048)),
(Self::Hertz.boxed(), ForkCondition::Block(31302048)),
(Self::HertzFix.boxed(), ForkCondition::Block(34140700)),
(EthereumHardfork::Shanghai.boxed(), ForkCondition::Timestamp(1705996800)),
(Self::Kepler.boxed(), ForkCondition::Timestamp(1705996800)),
(Self::Feynman.boxed(), ForkCondition::Timestamp(1713419340)),
(Self::FeynmanFix.boxed(), ForkCondition::Timestamp(1713419340)),
(EthereumHardfork::Cancun.boxed(), ForkCondition::Timestamp(1718863500)),
(Self::Haber.boxed(), ForkCondition::Timestamp(1718863500)),
(Self::HaberFix.boxed(), ForkCondition::Timestamp(1727316120)),
(Self::Bohr.boxed(), ForkCondition::Timestamp(1727317200)),
(Self::Pascal.boxed(), ForkCondition::Timestamp(1742436600)),
(Self::Prague.boxed(), ForkCondition::Timestamp(1742436600)),
])
}
}
pub fn bsc_chain_spec() -> Arc<ChainSpec> {
let genesis = serde_json::from_str(include_str!("genesis.json"))
.expect("Can't deserialize BSC Mainnet genesis json");
let hardforks = BscHardfork::bsc_mainnet();
ChainSpec {
chain: Chain::from_named(NamedChain::BinanceSmartChain),
genesis: serde_json::from_str(include_str!("genesis.json"))
.expect("Can't deserialize BSC Mainnet genesis json"),
paris_block_and_final_difficulty: Some((0, U256::from(0))),
hardforks: BscHardfork::bsc_mainnet(),
deposit_contract: None,
base_fee_params: BaseFeeParamsKind::Constant(BaseFeeParams::new(1, 1)),
prune_delete_limit: 3500,
genesis_header: SealedHeader::new(
make_genesis_header(&genesis, &hardforks),
BlockHash::from_str(
"0x0d21840abff46b96c84b2ac9e10e4f5cdaeb5693cb665db62a2f3b02d2d57b5b",
)
.unwrap(),
),
..Default::default()
}
.into()
}
/// BSC mainnet bootnodes <https://github.com/bnb-chain/bsc/blob/master/params/bootnodes.go#L23>
static BOOTNODES : [&str; 6] = [
"enode://433c8bfdf53a3e2268ccb1b829e47f629793291cbddf0c76ae626da802f90532251fc558e2e0d10d6725e759088439bf1cd4714716b03a259a35d4b2e4acfa7f@52.69.102.73:30311",
"enode://571bee8fb902a625942f10a770ccf727ae2ba1bab2a2b64e121594a99c9437317f6166a395670a00b7d93647eacafe598b6bbcef15b40b6d1a10243865a3e80f@35.73.84.120:30311",
"enode://fac42fb0ba082b7d1eebded216db42161163d42e4f52c9e47716946d64468a62da4ba0b1cac0df5e8bf1e5284861d757339751c33d51dfef318be5168803d0b5@18.203.152.54:30311",
"enode://3063d1c9e1b824cfbb7c7b6abafa34faec6bb4e7e06941d218d760acdd7963b274278c5c3e63914bd6d1b58504c59ec5522c56f883baceb8538674b92da48a96@34.250.32.100:30311",
"enode://ad78c64a4ade83692488aa42e4c94084516e555d3f340d9802c2bf106a3df8868bc46eae083d2de4018f40e8d9a9952c32a0943cd68855a9bc9fd07aac982a6d@34.204.214.24:30311",
"enode://5db798deb67df75d073f8e2953dad283148133acb520625ea804c9c4ad09a35f13592a762d8f89056248f3889f6dcc33490c145774ea4ff2966982294909b37a@107.20.191.97:30311",
];
pub fn boot_nodes() -> Vec<NodeRecord> {
BOOTNODES[..].iter().map(|s| s.parse().unwrap()).collect()
}
pub fn head() -> Head {
Head { number: 40_000_000, timestamp: 1742436600, ..Default::default() }
}
#[cfg(test)]
mod tests {
use crate::chainspec::{bsc_chain_spec, head};
use alloy_primitives::hex;
use reth_chainspec::{ForkHash, ForkId};
#[test]
fn can_create_forkid() {
let b = hex::decode("ce18f5d3").unwrap();
let expected = [b[0], b[1], b[2], b[3]];
let expected_f_id = ForkId { hash: ForkHash(expected), next: 0 };
let fork_id = bsc_chain_spec().fork_id(&head());
assert_eq!(fork_id, expected_f_id);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/bsc-p2p/src/main.rs | examples/bsc-p2p/src/main.rs | //! Example for how to hook into the bsc p2p network
//!
//! Run with
//!
//! ```sh
//! cargo run -p bsc-p2p
//! ```
//!
//! This launches a regular reth node overriding the engine api payload builder with our custom.
//!
//! Credits to: <https://merkle.io/blog/modifying-reth-to-build-the-fastest-transaction-network-on-bsc-and-polygon>
use chainspec::{boot_nodes, bsc_chain_spec, head};
use handshake::BscHandshake;
use reth_discv4::Discv4ConfigBuilder;
use reth_network::{
EthNetworkPrimitives, NetworkConfig, NetworkEvent, NetworkEventListenerProvider,
NetworkManager, PeersInfo,
};
use reth_network_api::events::{PeerEvent, SessionInfo};
use reth_provider::noop::NoopProvider;
use reth_tracing::{
tracing_subscriber::filter::LevelFilter, LayerInfo, LogFormat, RethTracer, Tracer,
};
use secp256k1::{rand, SecretKey};
use std::{
net::{Ipv4Addr, SocketAddr},
sync::Arc,
time::Duration,
};
use tokio_stream::StreamExt;
use tracing::info;
mod block_import;
mod chainspec;
mod handshake;
mod upgrade_status;
#[tokio::main]
async fn main() {
let _ = RethTracer::new()
.with_stdout(LayerInfo::new(
LogFormat::Terminal,
LevelFilter::INFO.to_string(),
"".to_string(),
Some("always".to_string()),
))
.init();
let local_addr = SocketAddr::new(Ipv4Addr::UNSPECIFIED.into(), 30303);
let secret_key = SecretKey::new(&mut rand::thread_rng());
let bsc_boot_nodes = boot_nodes();
let net_cfg = NetworkConfig::builder(secret_key)
.boot_nodes(bsc_boot_nodes.clone())
.set_head(head())
.with_pow()
.listener_addr(local_addr)
.eth_rlpx_handshake(Arc::new(BscHandshake::default()))
.build(NoopProvider::eth(bsc_chain_spec()));
let net_cfg = net_cfg.set_discovery_v4(
Discv4ConfigBuilder::default()
.add_boot_nodes(bsc_boot_nodes)
.lookup_interval(Duration::from_millis(500))
.build(),
);
let net_manager = NetworkManager::<EthNetworkPrimitives>::new(net_cfg).await.unwrap();
let net_handle = net_manager.handle().clone();
let mut events = net_handle.event_listener();
tokio::spawn(net_manager);
while let Some(evt) = events.next().await {
match evt {
NetworkEvent::ActivePeerSession { info, .. } => {
let SessionInfo { status, client_version, peer_id, .. } = info;
info!(peers=%net_handle.num_connected_peers() , %peer_id, chain = %status.chain, ?client_version, "Session established with a new peer.");
}
NetworkEvent::Peer(PeerEvent::SessionClosed { peer_id, reason }) => {
info!(peers=%net_handle.num_connected_peers() , %peer_id, ?reason, "Session closed.");
}
_ => {}
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/bsc-p2p/src/block_import/parlia.rs | examples/bsc-p2p/src/block_import/parlia.rs | use alloy_primitives::{BlockNumber, B256};
use reth_provider::{BlockNumReader, ProviderError};
use std::cmp::Ordering;
/// Errors that can occur in Parlia consensus
#[derive(Debug, thiserror::Error)]
pub enum ParliaConsensusErr {
/// Error from the provider
#[error(transparent)]
Provider(#[from] ProviderError),
/// Head block hash not found
#[error("Head block hash not found")]
HeadHashNotFound,
}
/// Parlia consensus implementation
pub struct ParliaConsensus<P> {
/// The provider for reading block information
provider: P,
}
impl<P> ParliaConsensus<P> {
/// Create a new Parlia consensus instance
pub fn new(provider: P) -> Self {
Self { provider }
}
}
impl<P> ParliaConsensus<P>
where
P: BlockNumReader + Clone,
{
/// Determines the head block hash according to Parlia consensus rules:
/// 1. Follow the highest block number
/// 2. For same height blocks, pick the one with lower hash
pub(crate) fn canonical_head(
&self,
hash: B256,
number: BlockNumber,
) -> Result<B256, ParliaConsensusErr> {
let current_head = self.provider.best_block_number()?;
let current_hash =
self.provider.block_hash(current_head)?.ok_or(ParliaConsensusErr::HeadHashNotFound)?;
match number.cmp(¤t_head) {
Ordering::Greater => Ok(hash),
Ordering::Equal => Ok(hash.min(current_hash)),
Ordering::Less => Ok(current_hash),
}
}
}
#[cfg(test)]
mod tests {
use std::collections::HashMap;
use alloy_primitives::hex;
use reth_chainspec::ChainInfo;
use reth_provider::BlockHashReader;
use super::*;
#[derive(Clone)]
struct MockProvider {
blocks: HashMap<BlockNumber, B256>,
head_number: BlockNumber,
head_hash: B256,
}
impl MockProvider {
fn new(head_number: BlockNumber, head_hash: B256) -> Self {
let mut blocks = HashMap::new();
blocks.insert(head_number, head_hash);
Self { blocks, head_number, head_hash }
}
}
impl BlockHashReader for MockProvider {
fn block_hash(&self, number: BlockNumber) -> Result<Option<B256>, ProviderError> {
Ok(self.blocks.get(&number).copied())
}
fn canonical_hashes_range(
&self,
_start: BlockNumber,
_end: BlockNumber,
) -> Result<Vec<B256>, ProviderError> {
Ok(vec![])
}
}
impl BlockNumReader for MockProvider {
fn chain_info(&self) -> Result<ChainInfo, ProviderError> {
Ok(ChainInfo { best_hash: self.head_hash, best_number: self.head_number })
}
fn best_block_number(&self) -> Result<BlockNumber, ProviderError> {
Ok(self.head_number)
}
fn last_block_number(&self) -> Result<BlockNumber, ProviderError> {
Ok(self.head_number)
}
fn block_number(&self, hash: B256) -> Result<Option<BlockNumber>, ProviderError> {
Ok(self.blocks.iter().find_map(|(num, h)| (*h == hash).then_some(*num)))
}
}
#[test]
fn test_canonical_head() {
let hash1 = B256::from_slice(&hex!(
"1111111111111111111111111111111111111111111111111111111111111111"
));
let hash2 = B256::from_slice(&hex!(
"2222222222222222222222222222222222222222222222222222222222222222"
));
let test_cases = [
((hash1, 2, 1, hash2), hash1), // Higher block wins
((hash1, 1, 2, hash2), hash2), // Lower block stays
((hash1, 1, 1, hash2), hash1), // Same height, lower hash wins
((hash2, 1, 1, hash1), hash1), // Same height, lower hash stays
];
for ((curr_hash, curr_num, head_num, head_hash), expected) in test_cases {
let provider = MockProvider::new(head_num, head_hash);
let consensus = ParliaConsensus::new(provider);
assert_eq!(consensus.canonical_head(curr_hash, curr_num).unwrap(), expected);
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/bsc-p2p/src/block_import/service.rs | examples/bsc-p2p/src/block_import/service.rs | use super::handle::ImportHandle;
use crate::block_import::parlia::{ParliaConsensus, ParliaConsensusErr};
use alloy_rpc_types::engine::{ForkchoiceState, PayloadStatusEnum};
use futures::{future::Either, stream::FuturesUnordered, StreamExt};
use reth_engine_primitives::{ConsensusEngineHandle, EngineTypes};
use reth_eth_wire::NewBlock;
use reth_network::{
import::{BlockImportError, BlockImportEvent, BlockImportOutcome, BlockValidation},
message::NewBlockMessage,
};
use reth_network_api::PeerId;
use reth_payload_primitives::{BuiltPayload, EngineApiMessageVersion, PayloadTypes};
use reth_primitives::NodePrimitives;
use reth_primitives_traits::{AlloyBlockHeader, Block};
use reth_provider::{BlockHashReader, BlockNumReader};
use std::{
future::Future,
pin::Pin,
sync::Arc,
task::{Context, Poll},
};
use tokio::sync::mpsc::{self, UnboundedReceiver, UnboundedSender};
/// The block type for a given engine
pub type BscBlock<T> =
<<<T as PayloadTypes>::BuiltPayload as BuiltPayload>::Primitives as NodePrimitives>::Block;
/// Network message containing a new block
pub(crate) type BlockMsg<T> = NewBlockMessage<NewBlock<BscBlock<T>>>;
/// Import outcome for a block
pub(crate) type Outcome<T> = BlockImportOutcome<NewBlock<BscBlock<T>>>;
/// Import event for a block
pub(crate) type ImportEvent<T> = BlockImportEvent<NewBlock<BscBlock<T>>>;
/// Future that processes a block import and returns its outcome
type PayloadFut<T> = Pin<Box<dyn Future<Output = Outcome<T>> + Send + Sync>>;
/// Future that processes a forkchoice update and returns its outcome
type FcuFut<T> = Pin<Box<dyn Future<Output = Outcome<T>> + Send + Sync>>;
/// Channel message type for incoming blocks
pub(crate) type IncomingBlock<T> = (BlockMsg<T>, PeerId);
/// A service that handles bidirectional block import communication with the network.
/// It receives new blocks from the network via `from_network` channel and sends back
/// import outcomes via `to_network` channel.
pub struct ImportService<Provider, T>
where
Provider: BlockNumReader + Clone,
T: PayloadTypes,
{
/// The handle to communicate with the engine service
engine: ConsensusEngineHandle<T>,
/// The consensus implementation
consensus: Arc<ParliaConsensus<Provider>>,
/// Receive the new block from the network
from_network: UnboundedReceiver<IncomingBlock<T>>,
/// Send the event of the import to the network
to_network: UnboundedSender<ImportEvent<T>>,
/// Pending block imports.
pending_imports: FuturesUnordered<Either<PayloadFut<T>, FcuFut<T>>>,
}
impl<Provider, T> ImportService<Provider, T>
where
Provider: BlockNumReader + Clone + 'static,
T: PayloadTypes,
{
/// Create a new block import service
pub fn new(
consensus: Arc<ParliaConsensus<Provider>>,
engine: ConsensusEngineHandle<T>,
) -> (Self, ImportHandle<T>) {
let (to_import, from_network) = mpsc::unbounded_channel();
let (to_network, import_outcome) = mpsc::unbounded_channel();
(
Self {
engine,
consensus,
from_network,
to_network,
pending_imports: FuturesUnordered::new(),
},
ImportHandle::new(to_import, import_outcome),
)
}
/// Process a new payload and return the outcome
fn new_payload(&self, block: BlockMsg<T>, peer_id: PeerId) -> PayloadFut<T> {
let engine = self.engine.clone();
Box::pin(async move {
let sealed_block = block.block.block.clone().seal();
let payload = T::block_to_payload(sealed_block);
match engine.new_payload(payload).await {
Ok(payload_status) => match payload_status.status {
PayloadStatusEnum::Valid => Outcome::<T> {
peer: peer_id,
result: Ok(BlockValidation::ValidBlock { block }),
},
PayloadStatusEnum::Invalid { validation_error } => Outcome::<T> {
peer: peer_id,
result: Err(BlockImportError::Other(validation_error.into())),
},
PayloadStatusEnum::Syncing => Outcome::<T> {
peer: peer_id,
result: Err(BlockImportError::Other(
PayloadStatusEnum::Syncing.as_str().into(),
)),
},
_ => Outcome::<T> {
peer: peer_id,
result: Err(BlockImportError::Other("Unsupported payload status".into())),
},
},
Err(err) => {
Outcome::<T> { peer: peer_id, result: Err(BlockImportError::Other(err.into())) }
}
}
})
}
/// Process a forkchoice update and return the outcome
fn update_fork_choice(&self, block: BlockMsg<T>, peer_id: PeerId) -> FcuFut<T> {
let engine = self.engine.clone();
let consensus = self.consensus.clone();
let sealed_block = block.block.block.clone().seal();
let hash = sealed_block.hash();
let number = sealed_block.number();
Box::pin(async move {
let head_block_hash = match consensus.canonical_head(hash, number) {
Ok(hash) => hash,
Err(ParliaConsensusErr::Provider(e)) => {
return Outcome::<T> {
peer: peer_id,
result: Err(BlockImportError::Other(e.into())),
}
}
Err(ParliaConsensusErr::HeadHashNotFound) => {
return Outcome::<T> {
peer: peer_id,
result: Err(BlockImportError::Other("Current head hash not found".into())),
}
}
};
let state = ForkchoiceState {
head_block_hash,
safe_block_hash: head_block_hash,
finalized_block_hash: head_block_hash,
};
match engine.fork_choice_updated(state, None, EngineApiMessageVersion::default()).await
{
Ok(response) => match response.payload_status.status {
PayloadStatusEnum::Valid => Outcome::<T> {
peer: peer_id,
result: Ok(BlockValidation::ValidBlock { block }),
},
PayloadStatusEnum::Invalid { validation_error } => Outcome::<T> {
peer: peer_id,
result: Err(BlockImportError::Other(validation_error.into())),
},
PayloadStatusEnum::Syncing => Outcome::<T> {
peer: peer_id,
result: Err(BlockImportError::Other(
PayloadStatusEnum::Syncing.as_str().into(),
)),
},
_ => Outcome::<T> {
peer: peer_id,
result: Err(BlockImportError::Other(
"Unsupported forkchoice payload status".into(),
)),
},
},
Err(err) => {
Outcome::<T> { peer: peer_id, result: Err(BlockImportError::Other(err.into())) }
}
}
})
}
/// Add a new block import task to the pending imports
fn on_new_block(&mut self, block: BlockMsg<T>, peer_id: PeerId) {
let payload_fut = self.new_payload(block.clone(), peer_id);
self.pending_imports.push(Either::Left(payload_fut));
let fcu_fut = self.update_fork_choice(block, peer_id);
self.pending_imports.push(Either::Right(fcu_fut));
}
}
impl<Provider, T> Future for ImportService<Provider, T>
where
Provider: BlockNumReader + BlockHashReader + Clone + 'static + Unpin,
T: PayloadTypes,
{
type Output = Result<(), Box<dyn std::error::Error>>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.get_mut();
// Receive new blocks from network
while let Poll::Ready(Some((block, peer_id))) = this.from_network.poll_recv(cx) {
this.on_new_block(block, peer_id);
}
// Process completed imports and send events to network
while let Poll::Ready(Some(outcome)) = this.pending_imports.poll_next_unpin(cx) {
if let Err(e) = this.to_network.send(BlockImportEvent::Outcome(outcome)) {
return Poll::Ready(Err(Box::new(e)));
}
}
Poll::Pending
}
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_primitives::{B256, U128};
use alloy_rpc_types::engine::PayloadStatus;
use reth_chainspec::ChainInfo;
use reth_engine_primitives::{BeaconEngineMessage, OnForkChoiceUpdated};
use reth_eth_wire::NewBlock;
use reth_node_ethereum::EthEngineTypes;
use reth_primitives::Block;
use reth_provider::ProviderError;
use std::{
sync::Arc,
task::{Context, Poll},
};
#[tokio::test]
async fn can_handle_valid_block() {
let mut fixture = TestFixture::new(EngineResponses::both_valid()).await;
fixture
.assert_block_import(|outcome| {
matches!(
outcome,
BlockImportEvent::Outcome(BlockImportOutcome {
peer: _,
result: Ok(BlockValidation::ValidBlock { .. })
})
)
})
.await;
}
#[tokio::test]
async fn can_handle_invalid_new_payload() {
let mut fixture = TestFixture::new(EngineResponses::invalid_new_payload()).await;
fixture
.assert_block_import(|outcome| {
matches!(
outcome,
BlockImportEvent::Outcome(BlockImportOutcome {
peer: _,
result: Err(BlockImportError::Other(_))
})
)
})
.await;
}
#[tokio::test]
async fn can_handle_invalid_fcu() {
let mut fixture = TestFixture::new(EngineResponses::invalid_fcu()).await;
fixture
.assert_block_import(|outcome| {
matches!(
outcome,
BlockImportEvent::Outcome(BlockImportOutcome {
peer: _,
result: Err(BlockImportError::Other(_))
})
)
})
.await;
}
#[derive(Clone)]
struct MockProvider;
impl BlockNumReader for MockProvider {
fn chain_info(&self) -> Result<ChainInfo, ProviderError> {
unimplemented!()
}
fn best_block_number(&self) -> Result<u64, ProviderError> {
Ok(0)
}
fn last_block_number(&self) -> Result<u64, ProviderError> {
Ok(0)
}
fn block_number(&self, _hash: B256) -> Result<Option<u64>, ProviderError> {
Ok(None)
}
}
impl BlockHashReader for MockProvider {
fn block_hash(&self, _number: u64) -> Result<Option<B256>, ProviderError> {
Ok(Some(B256::ZERO))
}
fn canonical_hashes_range(
&self,
_start: u64,
_end: u64,
) -> Result<Vec<B256>, ProviderError> {
Ok(vec![])
}
}
/// Response configuration for engine messages
struct EngineResponses {
new_payload: PayloadStatusEnum,
fcu: PayloadStatusEnum,
}
impl EngineResponses {
fn both_valid() -> Self {
Self { new_payload: PayloadStatusEnum::Valid, fcu: PayloadStatusEnum::Valid }
}
fn invalid_new_payload() -> Self {
Self {
new_payload: PayloadStatusEnum::Invalid { validation_error: "test error".into() },
fcu: PayloadStatusEnum::Valid,
}
}
fn invalid_fcu() -> Self {
Self {
new_payload: PayloadStatusEnum::Valid,
fcu: PayloadStatusEnum::Invalid { validation_error: "fcu error".into() },
}
}
}
/// Test fixture for block import tests
struct TestFixture {
handle: ImportHandle<EthEngineTypes>,
}
impl TestFixture {
/// Create a new test fixture with the given engine responses
async fn new(responses: EngineResponses) -> Self {
let consensus = Arc::new(ParliaConsensus::new(MockProvider));
let (to_engine, from_engine) = mpsc::unbounded_channel();
let engine_handle = ConsensusEngineHandle::new(to_engine);
handle_engine_msg(from_engine, responses).await;
let (service, handle) = ImportService::new(consensus, engine_handle);
tokio::spawn(Box::pin(async move {
service.await.unwrap();
}));
Self { handle }
}
/// Run a block import test with the given event assertion
async fn assert_block_import<F>(&mut self, assert_fn: F)
where
F: Fn(&BlockImportEvent<NewBlock<BscBlock<EthEngineTypes>>>) -> bool,
{
let block_msg = create_test_block();
self.handle.send_block(block_msg, PeerId::random()).unwrap();
let waker = futures::task::noop_waker();
let mut cx = Context::from_waker(&waker);
let mut outcomes = Vec::new();
// Wait for both NewPayload and FCU outcomes
while outcomes.len() < 2 {
match self.handle.poll_outcome(&mut cx) {
Poll::Ready(Some(outcome)) => {
outcomes.push(outcome);
}
Poll::Ready(None) => break,
Poll::Pending => tokio::task::yield_now().await,
}
}
// Assert that at least one outcome matches our criteria
assert!(
outcomes.iter().any(assert_fn),
"No outcome matched the expected criteria. Outcomes: {outcomes:?}"
);
}
}
/// Creates a test block message
fn create_test_block() -> NewBlockMessage<NewBlock<Block>> {
let block: reth_primitives::Block = Block::default();
let new_block = NewBlock { block: block.clone(), td: U128::ZERO };
NewBlockMessage { hash: block.header.hash_slow(), block: Arc::new(new_block) }
}
/// Helper function to handle engine messages with specified payload statuses
async fn handle_engine_msg(
mut from_engine: mpsc::UnboundedReceiver<BeaconEngineMessage<EthEngineTypes>>,
responses: EngineResponses,
) {
tokio::spawn(Box::pin(async move {
while let Some(message) = from_engine.recv().await {
match message {
BeaconEngineMessage::NewPayload { payload: _, tx } => {
tx.send(Ok(PayloadStatus::new(responses.new_payload.clone(), None)))
.unwrap();
}
BeaconEngineMessage::ForkchoiceUpdated {
state: _,
payload_attrs: _,
version: _,
tx,
} => {
tx.send(Ok(OnForkChoiceUpdated::valid(PayloadStatus::new(
responses.fcu.clone(),
None,
))))
.unwrap();
}
_ => {}
}
}
}));
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/bsc-p2p/src/block_import/mod.rs | examples/bsc-p2p/src/block_import/mod.rs | #![allow(unused)]
use handle::ImportHandle;
use reth_engine_primitives::EngineTypes;
use reth_eth_wire::NewBlock;
use reth_network::import::{BlockImport, BlockImportOutcome, NewBlockEvent};
use reth_network_peers::PeerId;
use reth_payload_primitives::{BuiltPayload, PayloadTypes};
use reth_primitives::NodePrimitives;
use service::{BlockMsg, BscBlock, ImportEvent, Outcome};
use std::{
fmt,
task::{ready, Context, Poll},
};
mod handle;
mod parlia;
mod service;
pub struct BscBlockImport<T: PayloadTypes> {
handle: ImportHandle<T>,
}
impl<T: PayloadTypes> BscBlockImport<T> {
pub fn new(handle: ImportHandle<T>) -> Self {
Self { handle }
}
}
impl<T: PayloadTypes> BlockImport<NewBlock<BscBlock<T>>> for BscBlockImport<T> {
fn on_new_block(
&mut self,
peer_id: PeerId,
incoming_block: NewBlockEvent<NewBlock<BscBlock<T>>>,
) {
if let NewBlockEvent::Block(block) = incoming_block {
let _ = self.handle.send_block(block, peer_id);
}
}
fn poll(&mut self, cx: &mut Context<'_>) -> Poll<ImportEvent<T>> {
match ready!(self.handle.poll_outcome(cx)) {
Some(outcome) => Poll::Ready(outcome),
None => Poll::Pending,
}
}
}
impl<T: PayloadTypes> fmt::Debug for BscBlockImport<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("BscBlockImport")
.field("engine_handle", &"ConsensusEngineHandle")
.field("service_handle", &"BscBlockImportHandle")
.finish()
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/bsc-p2p/src/block_import/handle.rs | examples/bsc-p2p/src/block_import/handle.rs | use std::task::{Context, Poll};
use reth_engine_primitives::EngineTypes;
use reth_network::import::BlockImportError;
use reth_network_api::PeerId;
use reth_payload_primitives::PayloadTypes;
use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender};
use super::service::{BlockMsg, ImportEvent, IncomingBlock, Outcome};
/// A handle for interacting with the block import service.
///
/// This handle provides a bidirectional communication channel with the
/// [`super::service::ImportService`]:
/// - Blocks can be sent to the service for import via [`send_block`](ImportHandle::send_block)
/// - Import outcomes can be received via [`poll_outcome`](ImportHandle::poll_outcome)`
pub struct ImportHandle<T: PayloadTypes> {
/// Send the new block to the service
to_import: UnboundedSender<IncomingBlock<T>>,
/// Receive the event(Announcement/Outcome) of the import
import_outcome: UnboundedReceiver<ImportEvent<T>>,
}
impl<T: PayloadTypes> ImportHandle<T> {
/// Create a new handle with the provided channels
pub fn new(
to_import: UnboundedSender<IncomingBlock<T>>,
import_outcome: UnboundedReceiver<ImportEvent<T>>,
) -> Self {
Self { to_import, import_outcome }
}
/// Sends the block to import to the service.
/// Returns a [`BlockImportError`] if the channel to the import service is closed.
pub fn send_block(&self, block: BlockMsg<T>, peer_id: PeerId) -> Result<(), BlockImportError> {
self.to_import
.send((block, peer_id))
.map_err(|_| BlockImportError::Other("block import service channel closed".into()))
}
/// Poll for the next import event
pub fn poll_outcome(&mut self, cx: &mut Context<'_>) -> Poll<Option<ImportEvent<T>>> {
self.import_outcome.poll_recv(cx)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/bsc-p2p/tests/it/p2p.rs | examples/bsc-p2p/tests/it/p2p.rs | use example_bsc_p2p::{
chainspec::{boot_nodes, bsc_chain_spec, head},
handshake::BscHandshake,
};
use reth_chainspec::NamedChain;
use reth_discv4::Discv4ConfigBuilder;
use reth_network::{
EthNetworkPrimitives, NetworkConfig, NetworkEvent, NetworkEventListenerProvider, NetworkManager,
};
use reth_provider::noop::NoopProvider;
use secp256k1::{rand, SecretKey};
use std::{
net::{Ipv4Addr, SocketAddr},
sync::Arc,
time::Duration,
};
use tokio::time::timeout;
use tokio_stream::StreamExt;
#[tokio::test(flavor = "multi_thread")]
#[ignore]
async fn can_connect() {
reth_tracing::init_test_tracing();
let local_addr = SocketAddr::new(Ipv4Addr::UNSPECIFIED.into(), 30303);
let secret_key = SecretKey::new(&mut rand::thread_rng());
let net_cfg = NetworkConfig::<_, EthNetworkPrimitives>::builder(secret_key)
.boot_nodes(boot_nodes())
.set_head(head())
.with_pow()
.listener_addr(local_addr)
.eth_rlpx_handshake(Arc::new(BscHandshake::default()))
.build(NoopProvider::eth(bsc_chain_spec()));
let net_cfg = net_cfg.set_discovery_v4(
Discv4ConfigBuilder::default()
.add_boot_nodes(boot_nodes())
.lookup_interval(Duration::from_millis(500))
.build(),
);
let net_manager = NetworkManager::<EthNetworkPrimitives>::new(net_cfg).await.unwrap();
let net_handle = net_manager.handle().clone();
let mut events = net_handle.event_listener();
tokio::spawn(net_manager);
let result = timeout(Duration::from_secs(10), async {
while let Some(evt) = events.next().await {
if let NetworkEvent::ActivePeerSession { info, .. } = evt {
assert_eq!(
info.status.chain.to_string(),
NamedChain::BinanceSmartChain.to_string()
);
return Ok(());
}
}
Err("Expected event not received")
})
.await;
assert!(result.is_ok(), "Test timed out without receiving the expected event");
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/bsc-p2p/tests/it/main.rs | examples/bsc-p2p/tests/it/main.rs | #![allow(missing_docs)]
mod p2p;
const fn main() {}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/custom-inspector/src/main.rs | examples/custom-inspector/src/main.rs | //! Example of how to use a custom inspector to trace new pending transactions
//!
//! Run with
//!
//! ```sh
//! cargo run --release -p custom-inspector -- node --http --ws --recipients 0x....,0x....
//! ```
//!
//! If no recipients are specified, all transactions will be inspected.
#![warn(unused_crate_dependencies)]
use alloy_eips::BlockNumberOrTag;
use alloy_evm::Evm;
use alloy_primitives::Address;
use alloy_rpc_types_eth::{state::EvmOverrides, TransactionRequest};
use clap::Parser;
use futures_util::StreamExt;
use reth_ethereum::{
cli::{chainspec::EthereumChainSpecParser, interface::Cli},
evm::{
primitives::ConfigureEvm,
revm::revm::{
bytecode::opcode::OpCode,
context_interface::ContextTr,
inspector::Inspector,
interpreter::{interpreter::EthInterpreter, interpreter_types::Jumps, Interpreter},
},
},
node::{builder::NodeHandle, EthereumNode},
pool::TransactionPool,
rpc::api::eth::helpers::Call,
};
fn main() {
Cli::<EthereumChainSpecParser, RethCliTxpoolExt>::parse()
.run(|builder, args| async move {
// launch the node
let NodeHandle { node, node_exit_future } =
builder.node(EthereumNode::default()).launch().await?;
// create a new subscription to pending transactions
let mut pending_transactions = node.pool.new_pending_pool_transactions_listener();
// get an instance of the `trace_` API handler
let eth_api = node.rpc_registry.eth_api().clone();
println!("Spawning trace task!");
// Spawn an async block to listen for transactions.
node.task_executor.spawn(Box::pin(async move {
// Waiting for new transactions
while let Some(event) = pending_transactions.next().await {
let tx = event.transaction;
println!("Transaction received: {tx:?}");
if let Some(recipient) = tx.to() {
if args.is_match(&recipient) {
// convert the pool transaction
let call_request =
TransactionRequest::from_recovered_transaction(tx.to_consensus());
let evm_config = node.evm_config.clone();
let result = eth_api
.spawn_with_call_at(
call_request,
BlockNumberOrTag::Latest.into(),
EvmOverrides::default(),
move |db, evm_env, tx_env| {
let mut dummy_inspector = DummyInspector::default();
let mut evm = evm_config.evm_with_env_and_inspector(
db,
evm_env,
&mut dummy_inspector,
);
// execute the transaction on a blocking task and await
// the
// inspector result
let _ = evm.transact(tx_env)?;
Ok(dummy_inspector)
},
)
.await;
if let Ok(ret_val) = result {
let hash = tx.hash();
println!(
"Inspector result for transaction {}: \n {}",
hash,
ret_val.ret_val.join("\n")
);
}
}
}
}
}));
node_exit_future.await
})
.unwrap();
}
/// Our custom cli args extension that adds one flag to reth default CLI.
#[derive(Debug, Clone, Default, clap::Args)]
struct RethCliTxpoolExt {
/// The addresses of the recipients that we want to trace.
#[arg(long, value_delimiter = ',')]
pub recipients: Vec<Address>,
}
impl RethCliTxpoolExt {
/// Check if the recipient is in the list of recipients to trace.
pub fn is_match(&self, recipient: &Address) -> bool {
self.recipients.is_empty() || self.recipients.contains(recipient)
}
}
/// A dummy inspector that logs the opcodes and their corresponding program counter for a
/// transaction
#[derive(Default, Debug, Clone)]
struct DummyInspector {
ret_val: Vec<String>,
}
impl<CTX> Inspector<CTX, EthInterpreter> for DummyInspector
where
CTX: ContextTr,
{
/// This method is called at each step of the EVM execution.
/// It checks if the current opcode is valid and if so, it stores the opcode and its
/// corresponding program counter in the `ret_val` vector.
fn step(&mut self, interp: &mut Interpreter<EthInterpreter>, _context: &mut CTX) {
if let Some(opcode) = OpCode::new(interp.bytecode.opcode()) {
self.ret_val.push(format!("{}: {}", interp.bytecode.pc(), opcode));
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/rpc-db/src/myrpc_ext.rs | examples/rpc-db/src/myrpc_ext.rs | // Reth block related imports
use reth_ethereum::{provider::BlockReaderIdExt, rpc::eth::EthResult, Block};
// Rpc related imports
use jsonrpsee::proc_macros::rpc;
/// trait interface for a custom rpc namespace: `myrpcExt`
///
/// This defines an additional namespace where all methods are configured as trait functions.
#[rpc(server, namespace = "myrpcExt")]
pub trait MyRpcExtApi {
/// Returns block 0.
#[method(name = "customMethod")]
fn custom_method(&self) -> EthResult<Option<Block>>;
}
/// The type that implements `myrpcExt` rpc namespace trait
pub struct MyRpcExt<Provider> {
pub provider: Provider,
}
impl<Provider> MyRpcExtApiServer for MyRpcExt<Provider>
where
Provider: BlockReaderIdExt<Block = Block> + 'static,
{
/// Showcasing how to implement a custom rpc method
/// using the provider.
fn custom_method(&self) -> EthResult<Option<Block>> {
let block = self.provider.block_by_number(0)?;
Ok(block)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/rpc-db/src/main.rs | examples/rpc-db/src/main.rs | //! Example illustrating how to run the ETH JSON RPC API as standalone over a DB file.
//!
//! Run with
//!
//! ```sh
//! cargo run -p rpc-db
//! ```
//!
//! This installs an additional RPC method `myrpcExt_customMethod` that can be queried via [cast](https://github.com/foundry-rs/foundry)
//!
//! ```sh
//! cast rpc myrpcExt_customMethod
//! ```
#![warn(unused_crate_dependencies)]
use std::{path::Path, sync::Arc};
use reth_ethereum::{
chainspec::ChainSpecBuilder,
consensus::EthBeaconConsensus,
network::api::noop::NoopNetwork,
node::{api::NodeTypesWithDBAdapter, EthEvmConfig, EthereumNode},
pool::noop::NoopTransactionPool,
provider::{
db::{mdbx::DatabaseArguments, open_db_read_only, ClientVersion, DatabaseEnv},
providers::{BlockchainProvider, StaticFileProvider},
ProviderFactory,
},
rpc::{
builder::{RethRpcModule, RpcModuleBuilder, RpcServerConfig, TransportRpcModuleConfig},
EthApiBuilder,
},
tasks::TokioTaskExecutor,
};
// Configuring the network parts, ideally also wouldn't need to think about this.
use myrpc_ext::{MyRpcExt, MyRpcExtApiServer};
// Custom rpc extension
pub mod myrpc_ext;
#[tokio::main]
async fn main() -> eyre::Result<()> {
// 1. Setup the DB
let db_path = std::env::var("RETH_DB_PATH")?;
let db_path = Path::new(&db_path);
let db = Arc::new(open_db_read_only(
db_path.join("db").as_path(),
DatabaseArguments::new(ClientVersion::default()),
)?);
let spec = Arc::new(ChainSpecBuilder::mainnet().build());
let factory = ProviderFactory::<NodeTypesWithDBAdapter<EthereumNode, Arc<DatabaseEnv>>>::new(
db.clone(),
spec.clone(),
StaticFileProvider::read_only(db_path.join("static_files"), true)?,
);
// 2. Setup the blockchain provider using only the database provider and a noop for the tree to
// satisfy trait bounds. Tree is not used in this example since we are only operating on the
// disk and don't handle new blocks/live sync etc, which is done by the blockchain tree.
let provider = BlockchainProvider::new(factory)?;
let rpc_builder = RpcModuleBuilder::default()
.with_provider(provider.clone())
// Rest is just noops that do nothing
.with_noop_pool()
.with_noop_network()
.with_executor(Box::new(TokioTaskExecutor::default()))
.with_evm_config(EthEvmConfig::new(spec.clone()))
.with_consensus(EthBeaconConsensus::new(spec.clone()));
let eth_api = EthApiBuilder::new(
provider.clone(),
NoopTransactionPool::default(),
NoopNetwork::default(),
EthEvmConfig::mainnet(),
)
.build();
// Pick which namespaces to expose.
let config = TransportRpcModuleConfig::default().with_http([RethRpcModule::Eth]);
let mut server = rpc_builder.build(config, eth_api);
// Add a custom rpc namespace
let custom_rpc = MyRpcExt { provider };
server.merge_configured(custom_rpc.into_rpc())?;
// Start the server & keep it alive
let server_args =
RpcServerConfig::http(Default::default()).with_http_address("0.0.0.0:8545".parse()?);
let _handle = server_args.start(&server).await?;
futures::future::pending::<()>().await;
Ok(())
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/manual-p2p/src/main.rs | examples/manual-p2p/src/main.rs | //! Low level example of connecting to and communicating with a peer.
//!
//! Run with
//!
//! ```sh
//! cargo run -p manual-p2p
//! ```
#![warn(unused_crate_dependencies)]
use std::time::Duration;
use alloy_consensus::constants::MAINNET_GENESIS_HASH;
use futures::StreamExt;
use reth_discv4::{DiscoveryUpdate, Discv4, Discv4ConfigBuilder, DEFAULT_DISCOVERY_ADDRESS};
use reth_ecies::stream::ECIESStream;
use reth_ethereum::{
chainspec::{Chain, EthereumHardfork, Head, MAINNET},
network::{
config::rng_secret_key,
eth_wire::{
EthMessage, EthStream, HelloMessage, P2PStream, UnauthedEthStream, UnauthedP2PStream,
UnifiedStatus,
},
EthNetworkPrimitives,
},
};
use reth_network_peers::{mainnet_nodes, pk2id, NodeRecord};
use secp256k1::{SecretKey, SECP256K1};
use std::sync::LazyLock;
use tokio::net::TcpStream;
type AuthedP2PStream = P2PStream<ECIESStream<TcpStream>>;
type AuthedEthStream = EthStream<P2PStream<ECIESStream<TcpStream>>, EthNetworkPrimitives>;
pub static MAINNET_BOOT_NODES: LazyLock<Vec<NodeRecord>> = LazyLock::new(mainnet_nodes);
#[tokio::main]
async fn main() -> eyre::Result<()> {
// Setup configs related to this 'node' by creating a new random
let our_key = rng_secret_key();
let our_enr = NodeRecord::from_secret_key(DEFAULT_DISCOVERY_ADDRESS, &our_key);
// Setup discovery v4 protocol to find peers to talk to
let mut discv4_cfg = Discv4ConfigBuilder::default();
discv4_cfg.add_boot_nodes(MAINNET_BOOT_NODES.clone()).lookup_interval(Duration::from_secs(1));
// Start discovery protocol
let discv4 = Discv4::spawn(our_enr.udp_addr(), our_enr, our_key, discv4_cfg.build()).await?;
let mut discv4_stream = discv4.update_stream().await?;
while let Some(update) = discv4_stream.next().await {
tokio::spawn(async move {
if let DiscoveryUpdate::Added(peer) = update {
// Boot nodes hard at work, lets not disturb them
if MAINNET_BOOT_NODES.contains(&peer) {
return
}
let (p2p_stream, their_hello) = match handshake_p2p(peer, our_key).await {
Ok(s) => s,
Err(e) => {
println!("Failed P2P handshake with peer {}, {}", peer.address, e);
return
}
};
let (eth_stream, their_status) = match handshake_eth(p2p_stream).await {
Ok(s) => s,
Err(e) => {
println!("Failed ETH handshake with peer {}, {}", peer.address, e);
return
}
};
println!(
"Successfully connected to a peer at {}:{} ({}) using eth-wire version eth/{}",
peer.address, peer.tcp_port, their_hello.client_version, their_status.version
);
snoop(peer, eth_stream).await;
}
});
}
Ok(())
}
// Perform a P2P handshake with a peer
async fn handshake_p2p(
peer: NodeRecord,
key: SecretKey,
) -> eyre::Result<(AuthedP2PStream, HelloMessage)> {
let outgoing = TcpStream::connect((peer.address, peer.tcp_port)).await?;
let ecies_stream = ECIESStream::connect(outgoing, key, peer.id).await?;
let our_peer_id = pk2id(&key.public_key(SECP256K1));
let our_hello = HelloMessage::builder(our_peer_id).build();
Ok(UnauthedP2PStream::new(ecies_stream).handshake(our_hello).await?)
}
// Perform a ETH Wire handshake with a peer
async fn handshake_eth(
p2p_stream: AuthedP2PStream,
) -> eyre::Result<(AuthedEthStream, UnifiedStatus)> {
let fork_filter = MAINNET.fork_filter(Head {
timestamp: MAINNET.fork(EthereumHardfork::Shanghai).as_timestamp().unwrap(),
..Default::default()
});
let unified_status = UnifiedStatus::builder()
.chain(Chain::mainnet())
.genesis(MAINNET_GENESIS_HASH)
.forkid(MAINNET.hardfork_fork_id(EthereumHardfork::Shanghai).unwrap())
.build();
let status = UnifiedStatus {
version: p2p_stream.shared_capabilities().eth()?.version().try_into()?,
..unified_status
};
let eth_unauthed = UnauthedEthStream::new(p2p_stream);
Ok(eth_unauthed.handshake(status, fork_filter).await?)
}
// Snoop by greedily capturing all broadcasts that the peer emits
// note: this node cannot handle request so will be disconnected by peer when challenged
async fn snoop(peer: NodeRecord, mut eth_stream: AuthedEthStream) {
while let Some(Ok(update)) = eth_stream.next().await {
match update {
EthMessage::NewPooledTransactionHashes66(txs) => {
println!("Got {} new tx hashes from peer {}", txs.0.len(), peer.address);
}
EthMessage::NewBlock(block) => {
println!("Got new block data {:?} from peer {}", block, peer.address);
}
EthMessage::NewPooledTransactionHashes68(txs) => {
println!("Got {} new tx hashes from peer {}", txs.hashes.len(), peer.address);
}
EthMessage::NewBlockHashes(block_hashes) => {
println!(
"Got {} new block hashes from peer {}",
block_hashes.0.len(),
peer.address
);
}
EthMessage::GetNodeData(_) => {
println!("Unable to serve GetNodeData request to peer {}", peer.address);
}
EthMessage::GetReceipts(_) => {
println!("Unable to serve GetReceipts request to peer {}", peer.address);
}
EthMessage::GetBlockHeaders(_) => {
println!("Unable to serve GetBlockHeaders request to peer {}", peer.address);
}
EthMessage::GetBlockBodies(_) => {
println!("Unable to serve GetBlockBodies request to peer {}", peer.address);
}
EthMessage::GetPooledTransactions(_) => {
println!("Unable to serve GetPooledTransactions request to peer {}", peer.address);
}
_ => {}
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/exex-subscription/src/main.rs | examples/exex-subscription/src/main.rs | #![allow(dead_code)]
//! An ExEx example that installs a new RPC subscription endpoint that emit storage changes for a
//! requested address.
#[allow(dead_code)]
use alloy_primitives::{Address, U256};
use clap::Parser;
use futures::TryStreamExt;
use jsonrpsee::{
core::SubscriptionResult, proc_macros::rpc, tracing, PendingSubscriptionSink,
SubscriptionMessage,
};
use reth_ethereum::{
exex::{ExExContext, ExExEvent, ExExNotification},
node::{api::FullNodeComponents, EthereumNode},
};
use std::collections::HashMap;
use tokio::sync::{mpsc, oneshot};
use tracing::{error, info};
/// Subscription update format for storage changes.
/// This is the format that will be sent to the client when a storage change occurs.
#[derive(Debug, Clone, Copy, Default, serde::Serialize)]
struct StorageDiff {
address: Address,
key: U256,
old_value: U256,
new_value: U256,
}
/// Subscription request format for storage changes.
struct SubscriptionRequest {
/// The address to subscribe to.
address: Address,
/// The response channel to send the subscription updates to.
response: oneshot::Sender<mpsc::UnboundedReceiver<StorageDiff>>,
}
/// Subscription request format for storage changes.
type SubscriptionSender = mpsc::UnboundedSender<SubscriptionRequest>;
/// API to subscribe to storage changes for a specific Ethereum address.
#[rpc(server, namespace = "watcher")]
pub trait StorageWatcherApi {
/// Subscribes to storage changes for a given Ethereum address and streams `StorageDiff`
/// updates.
#[subscription(name = "subscribeStorageChanges", item = StorageDiff)]
fn subscribe_storage_changes(&self, address: Address) -> SubscriptionResult;
}
/// API implementation for the storage watcher.
#[derive(Clone)]
struct StorageWatcherRpc {
/// The subscription sender to send subscription requests to.
subscriptions: SubscriptionSender,
}
impl StorageWatcherRpc {
/// Creates a new [`StorageWatcherRpc`] instance with the given subscription sender.
fn new(subscriptions: SubscriptionSender) -> Self {
Self { subscriptions }
}
}
impl StorageWatcherApiServer for StorageWatcherRpc {
fn subscribe_storage_changes(
&self,
pending: PendingSubscriptionSink,
address: Address,
) -> SubscriptionResult {
let subscription = self.subscriptions.clone();
tokio::spawn(async move {
let sink = match pending.accept().await {
Ok(sink) => sink,
Err(e) => {
error!("failed to accept subscription: {e}");
return;
}
};
let (resp_tx, resp_rx) = oneshot::channel();
subscription.send(SubscriptionRequest { address, response: resp_tx }).unwrap();
let Ok(mut rx) = resp_rx.await else { return };
while let Some(diff) = rx.recv().await {
let msg = SubscriptionMessage::from(
serde_json::value::to_raw_value(&diff).expect("serialize"),
);
if sink.send(msg).await.is_err() {
break;
}
}
});
Ok(())
}
}
async fn my_exex<Node: FullNodeComponents>(
mut ctx: ExExContext<Node>,
mut subscription_requests: mpsc::UnboundedReceiver<SubscriptionRequest>,
) -> eyre::Result<()> {
let mut subscriptions: HashMap<Address, Vec<mpsc::UnboundedSender<StorageDiff>>> =
HashMap::new();
loop {
tokio::select! {
maybe_notification = ctx.notifications.try_next() => {
let notification = match maybe_notification? {
Some(notification) => notification,
None => break,
};
match ¬ification {
ExExNotification::ChainCommitted { new } => {
info!(committed_chain = ?new.range(), "Received commit");
let execution_outcome = new.execution_outcome();
for (address, senders) in subscriptions.iter_mut() {
for change in &execution_outcome.bundle.state {
if change.0 == address {
for (key, slot) in &change.1.storage {
let diff = StorageDiff {
address: *change.0,
key: *key,
old_value: slot.original_value().into(),
new_value: slot.present_value().into(),
};
// Send diff to all the active subscribers
senders.retain(|sender| sender.send(diff).is_ok());
}
}
}
}
}
ExExNotification::ChainReorged { old, new } => {
info!(from_chain = ?old.range(), to_chain = ?new.range(), "Received reorg");
}
ExExNotification::ChainReverted { old } => {
info!(reverted_chain = ?old.range(), "Received revert");
}
}
if let Some(committed_chain) = notification.committed_chain() {
ctx.events.send(ExExEvent::FinishedHeight(committed_chain.tip().num_hash()))?;
}
}
maybe_subscription = subscription_requests.recv() => {
match maybe_subscription {
Some(SubscriptionRequest { address, response }) => {
let (tx, rx) = mpsc::unbounded_channel();
subscriptions.entry(address).or_default().push(tx);
let _ = response.send(rx);
}
None => {
// channel closed
}
}
}
}
}
Ok(())
}
#[derive(Parser, Debug)]
struct Args {
#[arg(long)]
enable_ext: bool,
}
fn main() -> eyre::Result<()> {
reth_ethereum::cli::Cli::parse_args().run(|builder, _args| async move {
let (subscriptions_tx, subscriptions_rx) = mpsc::unbounded_channel::<SubscriptionRequest>();
let rpc = StorageWatcherRpc::new(subscriptions_tx.clone());
let handle = builder
.node(EthereumNode::default())
.extend_rpc_modules(move |ctx| {
ctx.modules.merge_configured(StorageWatcherApiServer::into_rpc(rpc))?;
Ok(())
})
.install_exex("my-exex", async move |ctx| Ok(my_exex(ctx, subscriptions_rx)))
.launch()
.await?;
handle.wait_for_node_exit().await
})
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/network-proxy/src/main.rs | examples/network-proxy/src/main.rs | //! Example of how to use the network with a proxy for eth requests handling.
//!
//! This connects two peers:
//! - first peer installs a channel for incoming eth request
//! - second peer connects to first peer and sends a header request
//!
//! Run with
//!
//! ```sh
//! cargo run --release -p example-network-proxy
//! ```
#![warn(unused_crate_dependencies)]
use futures::StreamExt;
use reth_ethereum::{
chainspec::DEV,
network::{
config::rng_secret_key,
eth_requests::IncomingEthRequest,
p2p::HeadersClient,
transactions::NetworkTransactionEvent,
types::{BlockHashOrNumber, NewPooledTransactionHashes68},
BlockDownloaderProvider, FetchClient, NetworkConfig, NetworkEventListenerProvider,
NetworkHandle, NetworkInfo, NetworkManager, Peers,
},
};
#[tokio::main]
async fn main() -> eyre::Result<()> {
reth_tracing::init_test_tracing();
// The key that's used for encrypting sessions and to identify our node.
let local_key = rng_secret_key();
// Configure the network
let config = NetworkConfig::builder(local_key).build_with_noop_provider(DEV.clone());
let (requests_tx, mut requests_rx) = tokio::sync::mpsc::channel(1000);
let (transactions_tx, mut transactions_rx) = tokio::sync::mpsc::unbounded_channel();
// create the network instance
let network = NetworkManager::eth(config)
.await?
// install the channel through which the network sends incoming eth requests
.with_eth_request_handler(requests_tx)
// install the channel through which the network sends incoming transaction messages
.with_transactions(transactions_tx);
// get a handle to the network to interact with it
let handle = network.handle().clone();
tokio::task::spawn(async move {
// print network events
let mut events = handle.event_listener();
while let Some(event) = events.next().await {
println!("Received event: {event:?}");
}
});
let handle = network.handle().clone();
// spawn the network
tokio::task::spawn(network);
// spawn task to fetch a header using another peer/network
tokio::task::spawn(async move {
run_peer(handle).await.unwrap();
});
loop {
// receive incoming eth requests and transaction messages from the second peer
tokio::select! {
eth_request = requests_rx.recv() => {
let Some(eth_request) = eth_request else {break};
match eth_request {
IncomingEthRequest::GetBlockHeaders { peer_id, request, response } => {
println!("Received block headers request: {peer_id:?}, {request:?}");
response.send(Ok(vec![DEV.genesis_header().clone()].into())).unwrap();
}
IncomingEthRequest::GetBlockBodies { .. } => {}
IncomingEthRequest::GetNodeData { .. } => {}
IncomingEthRequest::GetReceipts { .. } => {}
IncomingEthRequest::GetReceipts69 { .. } => {}
}
}
transaction_message = transactions_rx.recv() => {
let Some(transaction_message) = transaction_message else {break};
match transaction_message {
NetworkTransactionEvent::IncomingTransactions { .. } => {}
NetworkTransactionEvent::IncomingPooledTransactionHashes { peer_id, msg } => {
println!("Received incoming tx hashes broadcast: {peer_id:?}, {msg:?}");
}
NetworkTransactionEvent::GetPooledTransactions { .. } => {}
NetworkTransactionEvent::GetTransactionsHandle(_) => {}
}
}
}
}
Ok(())
}
/// Launches another network/peer, connects to the first peer and sends requests/messages to the
/// first peer.
async fn run_peer(handle: NetworkHandle) -> eyre::Result<()> {
// create another peer
let config = NetworkConfig::builder(rng_secret_key())
// use random ports
.with_unused_ports()
.build_with_noop_provider(DEV.clone());
let network = NetworkManager::eth(config).await?;
let peer = network.handle().clone();
// spawn additional peer
tokio::task::spawn(network);
// add the other peer as trusted
// this will establish a connection to the first peer
peer.add_trusted_peer(*handle.peer_id(), handle.local_addr());
// obtain the client that can emit requests
let client: FetchClient = peer.fetch_client().await?;
let header = client.get_header(BlockHashOrNumber::Number(0)).await.unwrap();
println!("Got header: {header:?}");
// send a (bogus) hashes message
let hashes = NewPooledTransactionHashes68 {
types: vec![1],
sizes: vec![2],
hashes: vec![Default::default()],
};
peer.send_transactions_hashes(*handle.peer_id(), hashes.into());
Ok(())
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/exex-hello-world/src/main.rs | examples/exex-hello-world/src/main.rs | //! Example for a simple Execution Extension
//!
//! Run with
//!
//! ```sh
//! cargo run -p example-exex-hello-world -- node --dev --dev.block-time 5s
//! ```
use clap::Parser;
use futures::TryStreamExt;
use reth_ethereum::{
chainspec::EthereumHardforks,
exex::{ExExContext, ExExEvent, ExExNotification},
node::{
api::{FullNodeComponents, NodeTypes},
builder::rpc::RpcHandle,
EthereumNode,
},
rpc::api::eth::helpers::FullEthApi,
};
use reth_tracing::tracing::info;
use tokio::sync::oneshot;
/// Additional CLI arguments
#[derive(Parser)]
struct ExExArgs {
/// whether to launch an op-reth node
#[arg(long)]
optimism: bool,
}
/// A basic subscription loop of new blocks.
async fn my_exex<Node: FullNodeComponents>(mut ctx: ExExContext<Node>) -> eyre::Result<()> {
while let Some(notification) = ctx.notifications.try_next().await? {
match ¬ification {
ExExNotification::ChainCommitted { new } => {
info!(committed_chain = ?new.range(), "Received commit");
}
ExExNotification::ChainReorged { old, new } => {
info!(from_chain = ?old.range(), to_chain = ?new.range(), "Received reorg");
}
ExExNotification::ChainReverted { old } => {
info!(reverted_chain = ?old.range(), "Received revert");
}
};
if let Some(committed_chain) = notification.committed_chain() {
ctx.events.send(ExExEvent::FinishedHeight(committed_chain.tip().num_hash()))?;
}
}
Ok(())
}
/// This is an example of how to access the [`RpcHandle`] inside an ExEx. It receives the
/// [`RpcHandle`] once the node is launched fully.
///
/// This function supports both Opstack Eth API and ethereum Eth API.
///
/// The received handle gives access to the `EthApi` has full access to all eth api functionality
/// [`FullEthApi`]. And also gives access to additional eth related rpc method handlers, such as eth
/// filter.
async fn ethapi_exex<Node, EthApi>(
mut ctx: ExExContext<Node>,
rpc_handle: oneshot::Receiver<RpcHandle<Node, EthApi>>,
) -> eyre::Result<()>
where
Node: FullNodeComponents<Types: NodeTypes<ChainSpec: EthereumHardforks>>,
EthApi: FullEthApi,
{
// Wait for the ethapi to be sent from the main function
let rpc_handle = rpc_handle.await?;
info!("Received rpc handle inside exex");
// obtain the ethapi from the rpc handle
let ethapi = rpc_handle.eth_api();
// EthFilter type that provides all eth_getlogs related logic
let _eth_filter = rpc_handle.eth_handlers().filter.clone();
// EthPubSub type that provides all eth_subscribe logic
let _eth_pubsub = rpc_handle.eth_handlers().pubsub.clone();
// The TraceApi type that provides all the trace_ handlers
let _trace_api = rpc_handle.trace_api();
// The DebugApi type that provides all the trace_ handlers
let _debug_api = rpc_handle.debug_api();
while let Some(notification) = ctx.notifications.try_next().await? {
if let Some(committed_chain) = notification.committed_chain() {
ctx.events.send(ExExEvent::FinishedHeight(committed_chain.tip().num_hash()))?;
// can use the eth api to interact with the node
let _rpc_block = ethapi.rpc_block(committed_chain.tip().hash().into(), true).await?;
}
}
Ok(())
}
fn main() -> eyre::Result<()> {
let args = ExExArgs::parse();
if args.optimism {
reth_op::cli::Cli::parse_args().run(|builder, _| {
let (rpc_handle_tx, rpc_handle_rx) = oneshot::channel();
Box::pin(async move {
let handle = builder
.node(reth_op::node::OpNode::default())
.install_exex("my-exex", async move |ctx| Ok(my_exex(ctx)))
.install_exex("ethapi-exex", async move |ctx| {
Ok(ethapi_exex(ctx, rpc_handle_rx))
})
.launch()
.await?;
// Retrieve the rpc handle from the node and send it to the exex
rpc_handle_tx
.send(handle.node.add_ons_handle.clone())
.expect("Failed to send ethapi to ExEx");
handle.wait_for_node_exit().await
})
})
} else {
reth_ethereum::cli::Cli::parse_args().run(|builder, _| {
Box::pin(async move {
let (rpc_handle_tx, rpc_handle_rx) = oneshot::channel();
let handle = builder
.node(EthereumNode::default())
.install_exex("my-exex", async move |ctx| Ok(my_exex(ctx)))
.install_exex("ethapi-exex", async move |ctx| {
Ok(ethapi_exex(ctx, rpc_handle_rx))
})
.launch()
.await?;
// Retrieve the rpc handle from the node and send it to the exex
rpc_handle_tx
.send(handle.node.add_ons_handle.clone())
.expect("Failed to send ethapi to ExEx");
handle.wait_for_node_exit().await
})
})
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/db-access/src/main.rs | examples/db-access/src/main.rs | #![warn(unused_crate_dependencies)]
use alloy_primitives::{Address, B256};
use reth_ethereum::{
chainspec::ChainSpecBuilder,
node::EthereumNode,
primitives::{AlloyBlockHeader, SealedBlock, SealedHeader},
provider::{
providers::ReadOnlyConfig, AccountReader, BlockReader, BlockSource, HeaderProvider,
ReceiptProvider, StateProvider, TransactionVariant, TransactionsProvider,
},
rpc::eth::primitives::Filter,
TransactionSigned,
};
// Providers are zero cost abstractions on top of an opened MDBX Transaction
// exposing a familiar API to query the chain's information without requiring knowledge
// of the inner tables.
//
// These abstractions do not include any caching and the user is responsible for doing that.
// Other parts of the code which include caching are parts of the `EthApi` abstraction.
fn main() -> eyre::Result<()> {
// The path to data directory, e.g. "~/.local/reth/share/mainnet"
let datadir = std::env::var("RETH_DATADIR")?;
// Instantiate a provider factory for Ethereum mainnet using the provided datadir path.
let spec = ChainSpecBuilder::mainnet().build();
let factory = EthereumNode::provider_factory_builder()
.open_read_only(spec.into(), ReadOnlyConfig::from_datadir(datadir))?;
// This call opens a RO transaction on the database. To write to the DB you'd need to call
// the `provider_rw` function and look for the `Writer` variants of the traits.
let provider = factory.provider()?;
// Run basic queries against the DB
let block_num = 100;
header_provider_example(&provider, block_num)?;
block_provider_example(&provider, block_num)?;
txs_provider_example(&provider)?;
receipts_provider_example(&provider)?;
// Closes the RO transaction opened in the `factory.provider()` call. This is optional and
// would happen anyway at the end of the function scope.
drop(provider);
// Run the example against latest state
state_provider_example(factory.latest()?)?;
// Run it with historical state
state_provider_example(factory.history_by_block_number(block_num)?)?;
Ok(())
}
/// The `HeaderProvider` allows querying the headers-related tables.
fn header_provider_example<T: HeaderProvider>(provider: T, number: u64) -> eyre::Result<()> {
// Can query the header by number
let header = provider.header_by_number(number)?.ok_or(eyre::eyre!("header not found"))?;
// We can convert a header to a sealed header which contains the hash w/o needing to recompute
// it every time.
let sealed_header = SealedHeader::seal_slow(header);
// Can also query the header by hash!
let header_by_hash =
provider.header(&sealed_header.hash())?.ok_or(eyre::eyre!("header by hash not found"))?;
assert_eq!(sealed_header.header(), &header_by_hash);
// The header's total difficulty is stored in a separate table, so we have a separate call for
// it. This is not needed for post PoS transition chains.
let td = provider.header_td_by_number(number)?.ok_or(eyre::eyre!("header td not found"))?;
assert!(!td.is_zero());
// Can query headers by range as well, already sealed!
let headers = provider.sealed_headers_range(100..200)?;
assert_eq!(headers.len(), 100);
Ok(())
}
/// The `TransactionsProvider` allows querying transaction-related information
fn txs_provider_example<T: TransactionsProvider<Transaction = TransactionSigned>>(
provider: T,
) -> eyre::Result<()> {
// Try the 5th tx
let txid = 5;
// Query a transaction by its primary ordered key in the db
let tx = provider.transaction_by_id(txid)?.ok_or(eyre::eyre!("transaction not found"))?;
// Can query the tx by hash
let tx_by_hash =
provider.transaction_by_hash(*tx.tx_hash())?.ok_or(eyre::eyre!("txhash not found"))?;
assert_eq!(tx, tx_by_hash);
// Can query the tx by hash with info about the block it was included in
let (tx, meta) = provider
.transaction_by_hash_with_meta(*tx.tx_hash())?
.ok_or(eyre::eyre!("txhash not found"))?;
assert_eq!(*tx.tx_hash(), meta.tx_hash);
// Can reverse lookup the key too
let id = provider.transaction_id(*tx.tx_hash())?.ok_or(eyre::eyre!("txhash not found"))?;
assert_eq!(id, txid);
// Can find the block of a transaction given its key
let _block = provider.transaction_block(txid)?;
// Can query the txs in the range [100, 200)
let _txs_by_tx_range = provider.transactions_by_tx_range(100..200)?;
// Can query the txs in the _block_ range [100, 200)]
let _txs_by_block_range = provider.transactions_by_block_range(100..200)?;
Ok(())
}
/// The `BlockReader` allows querying the headers-related tables.
fn block_provider_example<T: BlockReader<Block = reth_ethereum::Block>>(
provider: T,
number: u64,
) -> eyre::Result<()> {
// Can query a block by number
let block = provider.block(number.into())?.ok_or(eyre::eyre!("block num not found"))?;
assert_eq!(block.number, number);
// Can query a block with its senders, this is useful when you'd want to execute a block and do
// not want to manually recover the senders for each transaction (as each transaction is
// stored on disk with its v,r,s but not its `from` field.).
let _recovered_block = provider
.sealed_block_with_senders(number.into(), TransactionVariant::WithHash)?
.ok_or(eyre::eyre!("block num not found"))?;
// Can seal the block to cache the hash, like the Header above.
let sealed_block = SealedBlock::seal_slow(block.clone());
// Can also query the block by hash directly
let block_by_hash = provider
.block_by_hash(sealed_block.hash())?
.ok_or(eyre::eyre!("block by hash not found"))?;
assert_eq!(block, block_by_hash);
// Or by relying in the internal conversion
let block_by_hash2 = provider
.block(sealed_block.hash().into())?
.ok_or(eyre::eyre!("block by hash not found"))?;
assert_eq!(block, block_by_hash2);
// Or you can also specify the datasource. For this provider this always return `None`, but
// the blockchain tree is also able to access pending state not available in the db yet.
let block_by_hash3 = provider
.find_block_by_hash(sealed_block.hash(), BlockSource::Any)?
.ok_or(eyre::eyre!("block hash not found"))?;
assert_eq!(block, block_by_hash3);
Ok(())
}
/// The `ReceiptProvider` allows querying the receipts tables.
fn receipts_provider_example<
T: ReceiptProvider<Receipt = reth_ethereum::Receipt>
+ TransactionsProvider<Transaction = TransactionSigned>
+ HeaderProvider,
>(
provider: T,
) -> eyre::Result<()> {
let txid = 5;
let header_num = 100;
// Query a receipt by txid
let receipt = provider.receipt(txid)?.ok_or(eyre::eyre!("tx receipt not found"))?;
// Can query receipt by txhash too
let tx = provider.transaction_by_id(txid)?.unwrap();
let receipt_by_hash = provider
.receipt_by_hash(*tx.tx_hash())?
.ok_or(eyre::eyre!("tx receipt by hash not found"))?;
assert_eq!(receipt, receipt_by_hash);
// Can query all the receipts in a block
let _receipts = provider
.receipts_by_block(100.into())?
.ok_or(eyre::eyre!("no receipts found for block"))?;
// Can check if an address/topic filter is present in a header, if it is we query the block and
// receipts and do something with the data
// 1. get the bloom from the header
let header = provider.header_by_number(header_num)?.unwrap();
let bloom = header.logs_bloom();
// 2. Construct the address/topics filters
// For a hypothetical address, we'll want to filter down for a specific indexed topic (e.g.
// `from`).
let addr = Address::random();
let topic = B256::random();
// TODO: Make it clearer how to choose between event_signature(topic0) (event name) and the
// other 3 indexed topics. This API is a bit clunky and not obvious to use at the moment.
let filter = Filter::new().address(addr).event_signature(topic);
// 3. If the address & topics filters match do something. We use the outer check against the
// bloom filter stored in the header to avoid having to query the receipts table when there
// is no instance of any event that matches the filter in the header.
if filter.matches_bloom(bloom) {
let receipts = provider.receipt(header_num)?.ok_or(eyre::eyre!("receipt not found"))?;
for log in &receipts.logs {
if filter.matches(log) {
// Do something with the log e.g. decode it.
println!("Matching log found! {log:?}")
}
}
}
Ok(())
}
fn state_provider_example<T: StateProvider + AccountReader>(provider: T) -> eyre::Result<()> {
let address = Address::random();
let storage_key = B256::random();
// Can get account / storage state with simple point queries
let _account = provider.basic_account(&address)?;
let _code = provider.account_code(&address)?;
let _storage = provider.storage(address, storage_key)?;
// TODO: unimplemented.
// let _proof = provider.proof(address, &[])?;
Ok(())
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/custom-payload-builder/src/generator.rs | examples/custom-payload-builder/src/generator.rs | use crate::job::EmptyBlockPayloadJob;
use alloy_eips::BlockNumberOrTag;
use reth_basic_payload_builder::{
BasicPayloadJobGeneratorConfig, HeaderForPayload, PayloadBuilder, PayloadConfig,
};
use reth_ethereum::{
node::api::{Block, PayloadBuilderAttributes},
primitives::SealedHeader,
provider::{BlockReaderIdExt, BlockSource, StateProviderFactory},
tasks::TaskSpawner,
};
use reth_payload_builder::{PayloadBuilderError, PayloadJobGenerator};
use std::sync::Arc;
/// The generator type that creates new jobs that builds empty blocks.
#[derive(Debug)]
pub struct EmptyBlockPayloadJobGenerator<Client, Tasks, Builder> {
/// The client that can interact with the chain.
client: Client,
/// How to spawn building tasks
executor: Tasks,
/// The configuration for the job generator.
_config: BasicPayloadJobGeneratorConfig,
/// The type responsible for building payloads.
///
/// See [PayloadBuilder]
builder: Builder,
}
// === impl EmptyBlockPayloadJobGenerator ===
impl<Client, Tasks, Builder> EmptyBlockPayloadJobGenerator<Client, Tasks, Builder> {
/// Creates a new [EmptyBlockPayloadJobGenerator] with the given config and custom
/// [PayloadBuilder]
pub fn with_builder(
client: Client,
executor: Tasks,
config: BasicPayloadJobGeneratorConfig,
builder: Builder,
) -> Self {
Self { client, executor, _config: config, builder }
}
}
impl<Client, Tasks, Builder> PayloadJobGenerator
for EmptyBlockPayloadJobGenerator<Client, Tasks, Builder>
where
Client: StateProviderFactory
+ BlockReaderIdExt<Header = HeaderForPayload<Builder::BuiltPayload>>
+ Clone
+ Unpin
+ 'static,
Tasks: TaskSpawner + Clone + Unpin + 'static,
Builder: PayloadBuilder + Unpin + 'static,
Builder::Attributes: Unpin + Clone,
Builder::BuiltPayload: Unpin + Clone,
{
type Job = EmptyBlockPayloadJob<Tasks, Builder>;
/// This is invoked when the node receives payload attributes from the beacon node via
/// `engine_forkchoiceUpdatedV1`
fn new_payload_job(
&self,
attributes: Builder::Attributes,
) -> Result<Self::Job, PayloadBuilderError> {
let parent_block = if attributes.parent().is_zero() {
// use latest block if parent is zero: genesis block
self.client
.block_by_number_or_tag(BlockNumberOrTag::Latest)?
.ok_or_else(|| PayloadBuilderError::MissingParentBlock(attributes.parent()))?
.seal_slow()
} else {
let block = self
.client
.find_block_by_hash(attributes.parent(), BlockSource::Any)?
.ok_or_else(|| PayloadBuilderError::MissingParentBlock(attributes.parent()))?;
// we already know the hash, so we can seal it
block.seal_unchecked(attributes.parent())
};
let hash = parent_block.hash();
let header = SealedHeader::new(parent_block.header().clone(), hash);
let config = PayloadConfig::new(Arc::new(header), attributes);
Ok(EmptyBlockPayloadJob {
_executor: self.executor.clone(),
builder: self.builder.clone(),
config,
})
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/custom-payload-builder/src/main.rs | examples/custom-payload-builder/src/main.rs | //! Example for how hook into the node via the CLI extension mechanism without registering
//! additional arguments
//!
//! Run with
//!
//! ```sh
//! cargo run -p custom-payload-builder -- node
//! ```
//!
//! This launches a regular reth node overriding the engine api payload builder with our custom.
#![warn(unused_crate_dependencies)]
use crate::generator::EmptyBlockPayloadJobGenerator;
use reth_basic_payload_builder::BasicPayloadJobGeneratorConfig;
use reth_ethereum::{
chainspec::ChainSpec,
cli::interface::Cli,
node::{
api::{node::FullNodeTypes, NodeTypes},
builder::{components::PayloadServiceBuilder, BuilderContext},
core::cli::config::PayloadBuilderConfig,
node::EthereumAddOns,
EthEngineTypes, EthEvmConfig, EthereumNode,
},
pool::{PoolTransaction, TransactionPool},
provider::CanonStateSubscriptions,
EthPrimitives, TransactionSigned,
};
use reth_ethereum_payload_builder::EthereumBuilderConfig;
use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService};
pub mod generator;
pub mod job;
#[derive(Debug, Clone, Copy, Default)]
#[non_exhaustive]
pub struct CustomPayloadBuilder;
impl<Node, Pool> PayloadServiceBuilder<Node, Pool, EthEvmConfig> for CustomPayloadBuilder
where
Node: FullNodeTypes<
Types: NodeTypes<
Payload = EthEngineTypes,
ChainSpec = ChainSpec,
Primitives = EthPrimitives,
>,
>,
Pool: TransactionPool<Transaction: PoolTransaction<Consensus = TransactionSigned>>
+ Unpin
+ 'static,
{
async fn spawn_payload_builder_service(
self,
ctx: &BuilderContext<Node>,
pool: Pool,
evm_config: EthEvmConfig,
) -> eyre::Result<PayloadBuilderHandle<<Node::Types as NodeTypes>::Payload>> {
tracing::info!("Spawning a custom payload builder");
let payload_builder = reth_ethereum_payload_builder::EthereumPayloadBuilder::new(
ctx.provider().clone(),
pool,
evm_config,
EthereumBuilderConfig::new(),
);
let conf = ctx.payload_builder_config();
let payload_job_config = BasicPayloadJobGeneratorConfig::default()
.interval(conf.interval())
.deadline(conf.deadline())
.max_payload_tasks(conf.max_payload_tasks());
let payload_generator = EmptyBlockPayloadJobGenerator::with_builder(
ctx.provider().clone(),
ctx.task_executor().clone(),
payload_job_config,
payload_builder,
);
let (payload_service, payload_builder) =
PayloadBuilderService::new(payload_generator, ctx.provider().canonical_state_stream());
ctx.task_executor()
.spawn_critical("custom payload builder service", Box::pin(payload_service));
Ok(payload_builder)
}
}
fn main() {
Cli::parse_args()
.run(|builder, _| async move {
let handle = builder
.with_types::<EthereumNode>()
// Configure the components of the node
// use default ethereum components but use our custom payload builder
.with_components(
EthereumNode::components().payload(CustomPayloadBuilder::default()),
)
.with_add_ons(EthereumAddOns::default())
.launch()
.await?;
handle.wait_for_node_exit().await
})
.unwrap();
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/custom-payload-builder/src/job.rs | examples/custom-payload-builder/src/job.rs | use futures_util::Future;
use reth_basic_payload_builder::{HeaderForPayload, PayloadBuilder, PayloadConfig};
use reth_ethereum::{
node::api::{PayloadBuilderAttributes, PayloadKind},
tasks::TaskSpawner,
};
use reth_payload_builder::{KeepPayloadJobAlive, PayloadBuilderError, PayloadJob};
use std::{
pin::Pin,
task::{Context, Poll},
};
/// A [PayloadJob] that builds empty blocks.
pub struct EmptyBlockPayloadJob<Tasks, Builder>
where
Builder: PayloadBuilder,
{
/// The configuration for how the payload will be created.
pub(crate) config: PayloadConfig<Builder::Attributes, HeaderForPayload<Builder::BuiltPayload>>,
/// How to spawn building tasks
pub(crate) _executor: Tasks,
/// The type responsible for building payloads.
///
/// See [PayloadBuilder]
pub(crate) builder: Builder,
}
impl<Tasks, Builder> PayloadJob for EmptyBlockPayloadJob<Tasks, Builder>
where
Tasks: TaskSpawner + Clone + 'static,
Builder: PayloadBuilder + Unpin + 'static,
Builder::Attributes: Unpin + Clone,
Builder::BuiltPayload: Unpin + Clone,
{
type PayloadAttributes = Builder::Attributes;
type ResolvePayloadFuture =
futures_util::future::Ready<Result<Self::BuiltPayload, PayloadBuilderError>>;
type BuiltPayload = Builder::BuiltPayload;
fn best_payload(&self) -> Result<Self::BuiltPayload, PayloadBuilderError> {
let payload = self.builder.build_empty_payload(self.config.clone())?;
Ok(payload)
}
fn payload_attributes(&self) -> Result<Self::PayloadAttributes, PayloadBuilderError> {
Ok(self.config.attributes.clone())
}
fn payload_timestamp(&self) -> Result<u64, PayloadBuilderError> {
Ok(self.config.attributes.timestamp())
}
fn resolve_kind(
&mut self,
_kind: PayloadKind,
) -> (Self::ResolvePayloadFuture, KeepPayloadJobAlive) {
let payload = self.best_payload();
(futures_util::future::ready(payload), KeepPayloadJobAlive::No)
}
}
/// A [PayloadJob] is a future that's being polled by the `PayloadBuilderService`
impl<Tasks, Builder> Future for EmptyBlockPayloadJob<Tasks, Builder>
where
Tasks: TaskSpawner + Clone + 'static,
Builder: PayloadBuilder + Unpin + 'static,
Builder::Attributes: Unpin + Clone,
Builder::BuiltPayload: Unpin + Clone,
{
type Output = Result<(), PayloadBuilderError>;
fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Self::Output> {
Poll::Pending
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/custom-engine-types/src/main.rs | examples/custom-engine-types/src/main.rs | //! This example shows how to implement a custom [EngineTypes].
//!
//! The [EngineTypes] trait can be implemented to configure the engine to work with custom types,
//! as long as those types implement certain traits.
//!
//! Custom payload attributes can be supported by implementing two main traits:
//!
//! [PayloadAttributes] can be implemented for payload attributes types that are used as
//! arguments to the `engine_forkchoiceUpdated` method. This type should be used to define and
//! _spawn_ payload jobs.
//!
//! [PayloadBuilderAttributes] can be implemented for payload attributes types that _describe_
//! running payload jobs.
//!
//! Once traits are implemented and custom types are defined, the [EngineTypes] trait can be
//! implemented:
#![warn(unused_crate_dependencies)]
use alloy_eips::eip4895::Withdrawals;
use alloy_genesis::Genesis;
use alloy_primitives::{Address, B256};
use alloy_rpc_types::{
engine::{
ExecutionData, ExecutionPayloadEnvelopeV2, ExecutionPayloadEnvelopeV3,
ExecutionPayloadEnvelopeV4, ExecutionPayloadEnvelopeV5, ExecutionPayloadV1,
PayloadAttributes as EthPayloadAttributes, PayloadId,
},
Withdrawal,
};
use reth_basic_payload_builder::{BuildArguments, BuildOutcome, PayloadBuilder, PayloadConfig};
use reth_ethereum::{
chainspec::{Chain, ChainSpec, ChainSpecProvider},
node::{
api::{
payload::{EngineApiMessageVersion, EngineObjectValidationError, PayloadOrAttributes},
validate_version_specific_fields, AddOnsContext, EngineApiValidator, EngineTypes,
FullNodeComponents, FullNodeTypes, InvalidPayloadAttributesError, NewPayloadError,
NodeTypes, PayloadAttributes, PayloadBuilderAttributes, PayloadTypes, PayloadValidator,
},
builder::{
components::{BasicPayloadServiceBuilder, ComponentsBuilder, PayloadBuilderBuilder},
rpc::{PayloadValidatorBuilder, RpcAddOns},
BuilderContext, Node, NodeAdapter, NodeBuilder,
},
core::{args::RpcServerArgs, node_config::NodeConfig},
node::{
EthereumConsensusBuilder, EthereumExecutorBuilder, EthereumNetworkBuilder,
EthereumPoolBuilder,
},
EthEvmConfig, EthereumEthApiBuilder,
},
pool::{PoolTransaction, TransactionPool},
primitives::{Block, RecoveredBlock, SealedBlock},
provider::{EthStorage, StateProviderFactory},
rpc::types::engine::ExecutionPayload,
tasks::TaskManager,
EthPrimitives, TransactionSigned,
};
use reth_ethereum_payload_builder::{EthereumBuilderConfig, EthereumExecutionPayloadValidator};
use reth_payload_builder::{EthBuiltPayload, EthPayloadBuilderAttributes, PayloadBuilderError};
use reth_tracing::{RethTracer, Tracer};
use serde::{Deserialize, Serialize};
use std::{convert::Infallible, sync::Arc};
use thiserror::Error;
/// A custom payload attributes type.
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct CustomPayloadAttributes {
/// An inner payload type
#[serde(flatten)]
pub inner: EthPayloadAttributes,
/// A custom field
pub custom: u64,
}
/// Custom error type used in payload attributes validation
#[derive(Debug, Error)]
pub enum CustomError {
#[error("Custom field is not zero")]
CustomFieldIsNotZero,
}
impl PayloadAttributes for CustomPayloadAttributes {
fn timestamp(&self) -> u64 {
self.inner.timestamp()
}
fn withdrawals(&self) -> Option<&Vec<Withdrawal>> {
self.inner.withdrawals()
}
fn parent_beacon_block_root(&self) -> Option<B256> {
self.inner.parent_beacon_block_root()
}
}
/// New type around the payload builder attributes type
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct CustomPayloadBuilderAttributes(EthPayloadBuilderAttributes);
impl PayloadBuilderAttributes for CustomPayloadBuilderAttributes {
type RpcPayloadAttributes = CustomPayloadAttributes;
type Error = Infallible;
fn try_new(
parent: B256,
attributes: CustomPayloadAttributes,
_version: u8,
) -> Result<Self, Infallible> {
Ok(Self(EthPayloadBuilderAttributes::new(parent, attributes.inner)))
}
fn payload_id(&self) -> PayloadId {
self.0.id
}
fn parent(&self) -> B256 {
self.0.parent
}
fn timestamp(&self) -> u64 {
self.0.timestamp
}
fn parent_beacon_block_root(&self) -> Option<B256> {
self.0.parent_beacon_block_root
}
fn suggested_fee_recipient(&self) -> Address {
self.0.suggested_fee_recipient
}
fn prev_randao(&self) -> B256 {
self.0.prev_randao
}
fn withdrawals(&self) -> &Withdrawals {
&self.0.withdrawals
}
}
/// Custom engine types - uses a custom payload attributes RPC type, but uses the default
/// payload builder attributes type.
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
#[non_exhaustive]
pub struct CustomEngineTypes;
impl PayloadTypes for CustomEngineTypes {
type ExecutionData = ExecutionData;
type BuiltPayload = EthBuiltPayload;
type PayloadAttributes = CustomPayloadAttributes;
type PayloadBuilderAttributes = CustomPayloadBuilderAttributes;
fn block_to_payload(
block: SealedBlock<
<<Self::BuiltPayload as reth_ethereum::node::api::BuiltPayload>::Primitives as reth_ethereum::node::api::NodePrimitives>::Block,
>,
) -> ExecutionData {
let (payload, sidecar) =
ExecutionPayload::from_block_unchecked(block.hash(), &block.into_block());
ExecutionData { payload, sidecar }
}
}
impl EngineTypes for CustomEngineTypes {
type ExecutionPayloadEnvelopeV1 = ExecutionPayloadV1;
type ExecutionPayloadEnvelopeV2 = ExecutionPayloadEnvelopeV2;
type ExecutionPayloadEnvelopeV3 = ExecutionPayloadEnvelopeV3;
type ExecutionPayloadEnvelopeV4 = ExecutionPayloadEnvelopeV4;
type ExecutionPayloadEnvelopeV5 = ExecutionPayloadEnvelopeV5;
}
/// Custom engine validator
#[derive(Debug, Clone)]
pub struct CustomEngineValidator {
inner: EthereumExecutionPayloadValidator<ChainSpec>,
}
impl CustomEngineValidator {
/// Instantiates a new validator.
pub const fn new(chain_spec: Arc<ChainSpec>) -> Self {
Self { inner: EthereumExecutionPayloadValidator::new(chain_spec) }
}
/// Returns the chain spec used by the validator.
#[inline]
fn chain_spec(&self) -> &ChainSpec {
self.inner.chain_spec()
}
}
impl PayloadValidator<CustomEngineTypes> for CustomEngineValidator {
type Block = reth_ethereum::Block;
fn ensure_well_formed_payload(
&self,
payload: ExecutionData,
) -> Result<RecoveredBlock<Self::Block>, NewPayloadError> {
let sealed_block = self.inner.ensure_well_formed_payload(payload)?;
sealed_block.try_recover().map_err(|e| NewPayloadError::Other(e.into()))
}
fn validate_payload_attributes_against_header(
&self,
_attr: &CustomPayloadAttributes,
_header: &<Self::Block as Block>::Header,
) -> Result<(), InvalidPayloadAttributesError> {
// skip default timestamp validation
Ok(())
}
}
impl EngineApiValidator<CustomEngineTypes> for CustomEngineValidator {
fn validate_version_specific_fields(
&self,
version: EngineApiMessageVersion,
payload_or_attrs: PayloadOrAttributes<'_, ExecutionData, CustomPayloadAttributes>,
) -> Result<(), EngineObjectValidationError> {
validate_version_specific_fields(self.chain_spec(), version, payload_or_attrs)
}
fn ensure_well_formed_attributes(
&self,
version: EngineApiMessageVersion,
attributes: &CustomPayloadAttributes,
) -> Result<(), EngineObjectValidationError> {
validate_version_specific_fields(
self.chain_spec(),
version,
PayloadOrAttributes::<ExecutionData, CustomPayloadAttributes>::PayloadAttributes(
attributes,
),
)?;
// custom validation logic - ensure that the custom field is not zero
if attributes.custom == 0 {
return Err(EngineObjectValidationError::invalid_params(
CustomError::CustomFieldIsNotZero,
))
}
Ok(())
}
}
/// Custom engine validator builder
#[derive(Debug, Default, Clone, Copy)]
#[non_exhaustive]
pub struct CustomEngineValidatorBuilder;
impl<N> PayloadValidatorBuilder<N> for CustomEngineValidatorBuilder
where
N: FullNodeComponents<Types = MyCustomNode, Evm = EthEvmConfig>,
{
type Validator = CustomEngineValidator;
async fn build(self, ctx: &AddOnsContext<'_, N>) -> eyre::Result<Self::Validator> {
Ok(CustomEngineValidator::new(ctx.config.chain.clone()))
}
}
#[derive(Debug, Clone, Default)]
#[non_exhaustive]
struct MyCustomNode;
/// Configure the node types
impl NodeTypes for MyCustomNode {
type Primitives = EthPrimitives;
type ChainSpec = ChainSpec;
type Storage = EthStorage;
type Payload = CustomEngineTypes;
}
/// Custom addons configuring RPC types
pub type MyNodeAddOns<N> = RpcAddOns<N, EthereumEthApiBuilder, CustomEngineValidatorBuilder>;
/// Implement the Node trait for the custom node
///
/// This provides a preset configuration for the node
impl<N> Node<N> for MyCustomNode
where
N: FullNodeTypes<Types = Self>,
{
type ComponentsBuilder = ComponentsBuilder<
N,
EthereumPoolBuilder,
BasicPayloadServiceBuilder<CustomPayloadBuilderBuilder>,
EthereumNetworkBuilder,
EthereumExecutorBuilder,
EthereumConsensusBuilder,
>;
type AddOns = MyNodeAddOns<NodeAdapter<N>>;
fn components_builder(&self) -> Self::ComponentsBuilder {
ComponentsBuilder::default()
.node_types::<N>()
.pool(EthereumPoolBuilder::default())
.executor(EthereumExecutorBuilder::default())
.payload(BasicPayloadServiceBuilder::default())
.network(EthereumNetworkBuilder::default())
.consensus(EthereumConsensusBuilder::default())
}
fn add_ons(&self) -> Self::AddOns {
MyNodeAddOns::default()
}
}
/// A custom payload service builder that supports the custom engine types
#[derive(Debug, Default, Clone)]
#[non_exhaustive]
pub struct CustomPayloadBuilderBuilder;
impl<Node, Pool> PayloadBuilderBuilder<Node, Pool, EthEvmConfig> for CustomPayloadBuilderBuilder
where
Node: FullNodeTypes<
Types: NodeTypes<
Payload = CustomEngineTypes,
ChainSpec = ChainSpec,
Primitives = EthPrimitives,
>,
>,
Pool: TransactionPool<Transaction: PoolTransaction<Consensus = TransactionSigned>>
+ Unpin
+ 'static,
{
type PayloadBuilder = CustomPayloadBuilder<Pool, Node::Provider>;
async fn build_payload_builder(
self,
ctx: &BuilderContext<Node>,
pool: Pool,
evm_config: EthEvmConfig,
) -> eyre::Result<Self::PayloadBuilder> {
let payload_builder = CustomPayloadBuilder {
inner: reth_ethereum_payload_builder::EthereumPayloadBuilder::new(
ctx.provider().clone(),
pool,
evm_config,
EthereumBuilderConfig::new(),
),
};
Ok(payload_builder)
}
}
/// The type responsible for building custom payloads
#[derive(Debug, Clone)]
#[non_exhaustive]
pub struct CustomPayloadBuilder<Pool, Client> {
inner: reth_ethereum_payload_builder::EthereumPayloadBuilder<Pool, Client>,
}
impl<Pool, Client> PayloadBuilder for CustomPayloadBuilder<Pool, Client>
where
Client: StateProviderFactory + ChainSpecProvider<ChainSpec = ChainSpec> + Clone,
Pool: TransactionPool<Transaction: PoolTransaction<Consensus = TransactionSigned>>,
{
type Attributes = CustomPayloadBuilderAttributes;
type BuiltPayload = EthBuiltPayload;
fn try_build(
&self,
args: BuildArguments<Self::Attributes, Self::BuiltPayload>,
) -> Result<BuildOutcome<Self::BuiltPayload>, PayloadBuilderError> {
let BuildArguments { cached_reads, config, cancel, best_payload } = args;
let PayloadConfig { parent_header, attributes } = config;
// This reuses the default EthereumPayloadBuilder to build the payload
// but any custom logic can be implemented here
self.inner.try_build(BuildArguments {
cached_reads,
config: PayloadConfig { parent_header, attributes: attributes.0 },
cancel,
best_payload,
})
}
fn build_empty_payload(
&self,
config: PayloadConfig<Self::Attributes>,
) -> Result<Self::BuiltPayload, PayloadBuilderError> {
let PayloadConfig { parent_header, attributes } = config;
self.inner.build_empty_payload(PayloadConfig { parent_header, attributes: attributes.0 })
}
}
#[tokio::main]
async fn main() -> eyre::Result<()> {
let _guard = RethTracer::new().init()?;
let tasks = TaskManager::current();
// create optimism genesis with canyon at block 2
let spec = ChainSpec::builder()
.chain(Chain::mainnet())
.genesis(Genesis::default())
.london_activated()
.paris_activated()
.shanghai_activated()
.build();
// create node config
let node_config =
NodeConfig::test().with_rpc(RpcServerArgs::default().with_http()).with_chain(spec);
let handle = NodeBuilder::new(node_config)
.testing_node(tasks.executor())
.launch_node(MyCustomNode::default())
.await
.unwrap();
println!("Node started");
handle.node_exit_future.await
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/custom-rlpx-subprotocol/src/main.rs | examples/custom-rlpx-subprotocol/src/main.rs | //! Example for how to customize the network layer by adding a custom rlpx subprotocol.
//!
//! Run with
//!
//! ```sh
//! cargo run -p example-custom-rlpx-subprotocol -- node
//! ```
//!
//! This launches a regular reth node with a custom rlpx subprotocol.
#![warn(unused_crate_dependencies)]
mod subprotocol;
use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4};
use reth_ethereum::{
network::{
api::{test_utils::PeersHandleProvider, NetworkInfo},
config::rng_secret_key,
protocol::IntoRlpxSubProtocol,
NetworkConfig, NetworkManager, NetworkProtocols,
},
node::{builder::NodeHandle, EthereumNode},
};
use subprotocol::{
connection::CustomCommand,
protocol::{
event::ProtocolEvent,
handler::{CustomRlpxProtoHandler, ProtocolState},
},
};
use tokio::sync::{mpsc, oneshot};
use tracing::info;
fn main() -> eyre::Result<()> {
reth_ethereum::cli::Cli::parse_args().run(|builder, _args| async move {
// launch the node
let NodeHandle { node, node_exit_future } =
builder.node(EthereumNode::default()).launch().await?;
let peer_id = node.network.peer_id();
let peer_addr = node.network.local_addr();
// add the custom network subprotocol to the launched node
let (tx, mut from_peer0) = mpsc::unbounded_channel();
let custom_rlpx_handler = CustomRlpxProtoHandler { state: ProtocolState { events: tx } };
node.network.add_rlpx_sub_protocol(custom_rlpx_handler.into_rlpx_sub_protocol());
// creates a separate network instance and adds the custom network subprotocol
let secret_key = rng_secret_key();
let (tx, mut from_peer1) = mpsc::unbounded_channel();
let custom_rlpx_handler_2 = CustomRlpxProtoHandler { state: ProtocolState { events: tx } };
let net_cfg = NetworkConfig::builder(secret_key)
.listener_addr(SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, 0)))
.disable_discovery()
.add_rlpx_sub_protocol(custom_rlpx_handler_2.into_rlpx_sub_protocol())
.build_with_noop_provider(node.chain_spec());
// spawn the second network instance
let subnetwork = NetworkManager::eth(net_cfg).await?;
let subnetwork_peer_id = *subnetwork.peer_id();
let subnetwork_peer_addr = subnetwork.local_addr();
let subnetwork_handle = subnetwork.peers_handle();
node.task_executor.spawn(subnetwork);
// connect the launched node to the subnetwork
node.network.peers_handle().add_peer(subnetwork_peer_id, subnetwork_peer_addr);
// connect the subnetwork to the launched node
subnetwork_handle.add_peer(*peer_id, peer_addr);
// establish connection between peer0 and peer1
let peer0_to_peer1 = from_peer0.recv().await.expect("peer0 connecting to peer1");
let peer0_conn = match peer0_to_peer1 {
ProtocolEvent::Established { direction: _, peer_id, to_connection } => {
assert_eq!(peer_id, subnetwork_peer_id);
to_connection
}
};
// establish connection between peer1 and peer0
let peer1_to_peer0 = from_peer1.recv().await.expect("peer1 connecting to peer0");
let peer1_conn = match peer1_to_peer0 {
ProtocolEvent::Established { direction: _, peer_id: peer1_id, to_connection } => {
assert_eq!(peer1_id, *peer_id);
to_connection
}
};
info!(target:"rlpx-subprotocol", "Connection established!");
// send a ping message from peer0 to peer1
let (tx, rx) = oneshot::channel();
peer0_conn.send(CustomCommand::Message { msg: "hello!".to_string(), response: tx })?;
let response = rx.await?;
assert_eq!(response, "hello!");
info!(target:"rlpx-subprotocol", ?response, "New message received");
// send a ping message from peer1 to peer0
let (tx, rx) = oneshot::channel();
peer1_conn.send(CustomCommand::Message { msg: "world!".to_string(), response: tx })?;
let response = rx.await?;
assert_eq!(response, "world!");
info!(target:"rlpx-subprotocol", ?response, "New message received");
info!(target:"rlpx-subprotocol", "Peers connected via custom rlpx subprotocol!");
node_exit_future.await
})
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/custom-rlpx-subprotocol/src/subprotocol/mod.rs | examples/custom-rlpx-subprotocol/src/subprotocol/mod.rs | pub(crate) mod connection;
pub(crate) mod protocol;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/custom-rlpx-subprotocol/src/subprotocol/connection/mod.rs | examples/custom-rlpx-subprotocol/src/subprotocol/connection/mod.rs | use super::protocol::proto::{CustomRlpxProtoMessage, CustomRlpxProtoMessageKind};
use alloy_primitives::bytes::BytesMut;
use futures::{Stream, StreamExt};
use reth_ethereum::network::eth_wire::multiplex::ProtocolConnection;
use std::{
pin::Pin,
task::{ready, Context, Poll},
};
use tokio::sync::oneshot;
use tokio_stream::wrappers::UnboundedReceiverStream;
pub(crate) mod handler;
/// We define some custom commands that the subprotocol supports.
pub(crate) enum CustomCommand {
/// Sends a message to the peer
Message {
msg: String,
/// The response will be sent to this channel.
response: oneshot::Sender<String>,
},
}
/// The connection handler for the custom RLPx protocol.
pub(crate) struct CustomRlpxConnection {
conn: ProtocolConnection,
initial_ping: Option<CustomRlpxProtoMessage>,
commands: UnboundedReceiverStream<CustomCommand>,
pending_pong: Option<oneshot::Sender<String>>,
}
impl Stream for CustomRlpxConnection {
type Item = BytesMut;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = self.get_mut();
if let Some(initial_ping) = this.initial_ping.take() {
return Poll::Ready(Some(initial_ping.encoded()))
}
loop {
if let Poll::Ready(Some(cmd)) = this.commands.poll_next_unpin(cx) {
return match cmd {
CustomCommand::Message { msg, response } => {
this.pending_pong = Some(response);
Poll::Ready(Some(CustomRlpxProtoMessage::ping_message(msg).encoded()))
}
}
}
let Some(msg) = ready!(this.conn.poll_next_unpin(cx)) else { return Poll::Ready(None) };
let Some(msg) = CustomRlpxProtoMessage::decode_message(&mut &msg[..]) else {
return Poll::Ready(None)
};
match msg.message {
CustomRlpxProtoMessageKind::Ping => {
return Poll::Ready(Some(CustomRlpxProtoMessage::pong().encoded()))
}
CustomRlpxProtoMessageKind::Pong => {}
CustomRlpxProtoMessageKind::PingMessage(msg) => {
return Poll::Ready(Some(CustomRlpxProtoMessage::pong_message(msg).encoded()))
}
CustomRlpxProtoMessageKind::PongMessage(msg) => {
if let Some(sender) = this.pending_pong.take() {
sender.send(msg).ok();
}
continue
}
}
return Poll::Pending
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/custom-rlpx-subprotocol/src/subprotocol/connection/handler.rs | examples/custom-rlpx-subprotocol/src/subprotocol/connection/handler.rs | use super::CustomRlpxConnection;
use crate::subprotocol::protocol::{
event::ProtocolEvent, handler::ProtocolState, proto::CustomRlpxProtoMessage,
};
use reth_ethereum::network::{
api::{Direction, PeerId},
eth_wire::{capability::SharedCapabilities, multiplex::ProtocolConnection, protocol::Protocol},
protocol::{ConnectionHandler, OnNotSupported},
};
use tokio::sync::mpsc;
use tokio_stream::wrappers::UnboundedReceiverStream;
/// The connection handler for the custom RLPx protocol.
pub(crate) struct CustomRlpxConnectionHandler {
pub(crate) state: ProtocolState,
}
impl ConnectionHandler for CustomRlpxConnectionHandler {
type Connection = CustomRlpxConnection;
fn protocol(&self) -> Protocol {
CustomRlpxProtoMessage::protocol()
}
fn on_unsupported_by_peer(
self,
_supported: &SharedCapabilities,
_direction: Direction,
_peer_id: PeerId,
) -> OnNotSupported {
OnNotSupported::KeepAlive
}
fn into_connection(
self,
direction: Direction,
peer_id: PeerId,
conn: ProtocolConnection,
) -> Self::Connection {
let (tx, rx) = mpsc::unbounded_channel();
self.state
.events
.send(ProtocolEvent::Established { direction, peer_id, to_connection: tx })
.ok();
CustomRlpxConnection {
conn,
initial_ping: direction.is_outgoing().then(CustomRlpxProtoMessage::ping),
commands: UnboundedReceiverStream::new(rx),
pending_pong: None,
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/custom-rlpx-subprotocol/src/subprotocol/protocol/event.rs | examples/custom-rlpx-subprotocol/src/subprotocol/protocol/event.rs | use crate::subprotocol::connection::CustomCommand;
use reth_ethereum::network::{api::PeerId, Direction};
use tokio::sync::mpsc;
/// The events that can be emitted by our custom protocol.
#[derive(Debug)]
pub(crate) enum ProtocolEvent {
Established {
#[expect(dead_code)]
direction: Direction,
peer_id: PeerId,
to_connection: mpsc::UnboundedSender<CustomCommand>,
},
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/custom-rlpx-subprotocol/src/subprotocol/protocol/mod.rs | examples/custom-rlpx-subprotocol/src/subprotocol/protocol/mod.rs | pub(crate) mod event;
pub(crate) mod handler;
pub(crate) mod proto;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/custom-rlpx-subprotocol/src/subprotocol/protocol/handler.rs | examples/custom-rlpx-subprotocol/src/subprotocol/protocol/handler.rs | use super::event::ProtocolEvent;
use crate::subprotocol::connection::handler::CustomRlpxConnectionHandler;
use reth_ethereum::network::{api::PeerId, protocol::ProtocolHandler};
use std::net::SocketAddr;
use tokio::sync::mpsc;
/// Protocol state is an helper struct to store the protocol events.
#[derive(Clone, Debug)]
pub(crate) struct ProtocolState {
pub(crate) events: mpsc::UnboundedSender<ProtocolEvent>,
}
/// The protocol handler takes care of incoming and outgoing connections.
#[derive(Debug)]
pub(crate) struct CustomRlpxProtoHandler {
pub state: ProtocolState,
}
impl ProtocolHandler for CustomRlpxProtoHandler {
type ConnectionHandler = CustomRlpxConnectionHandler;
fn on_incoming(&self, _socket_addr: SocketAddr) -> Option<Self::ConnectionHandler> {
Some(CustomRlpxConnectionHandler { state: self.state.clone() })
}
fn on_outgoing(
&self,
_socket_addr: SocketAddr,
_peer_id: PeerId,
) -> Option<Self::ConnectionHandler> {
Some(CustomRlpxConnectionHandler { state: self.state.clone() })
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/custom-rlpx-subprotocol/src/subprotocol/protocol/proto.rs | examples/custom-rlpx-subprotocol/src/subprotocol/protocol/proto.rs | //! Simple RLPx Ping Pong protocol that also support sending messages,
//! following [RLPx specs](https://github.com/ethereum/devp2p/blob/master/rlpx.md)
use alloy_primitives::bytes::{Buf, BufMut, BytesMut};
use reth_ethereum::network::eth_wire::{protocol::Protocol, Capability};
#[repr(u8)]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub(crate) enum CustomRlpxProtoMessageId {
Ping = 0x00,
Pong = 0x01,
PingMessage = 0x02,
PongMessage = 0x03,
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub(crate) enum CustomRlpxProtoMessageKind {
Ping,
Pong,
PingMessage(String),
PongMessage(String),
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub(crate) struct CustomRlpxProtoMessage {
pub message_type: CustomRlpxProtoMessageId,
pub message: CustomRlpxProtoMessageKind,
}
impl CustomRlpxProtoMessage {
/// Returns the capability for the `custom_rlpx` protocol.
pub fn capability() -> Capability {
Capability::new_static("custom_rlpx", 1)
}
/// Returns the protocol for the `custom_rlpx` protocol.
pub fn protocol() -> Protocol {
Protocol::new(Self::capability(), 4)
}
/// Creates a ping message
pub fn ping_message(msg: impl Into<String>) -> Self {
Self {
message_type: CustomRlpxProtoMessageId::PingMessage,
message: CustomRlpxProtoMessageKind::PingMessage(msg.into()),
}
}
/// Creates a pong message
pub fn pong_message(msg: impl Into<String>) -> Self {
Self {
message_type: CustomRlpxProtoMessageId::PongMessage,
message: CustomRlpxProtoMessageKind::PongMessage(msg.into()),
}
}
/// Creates a ping message
pub fn ping() -> Self {
Self {
message_type: CustomRlpxProtoMessageId::Ping,
message: CustomRlpxProtoMessageKind::Ping,
}
}
/// Creates a pong message
pub fn pong() -> Self {
Self {
message_type: CustomRlpxProtoMessageId::Pong,
message: CustomRlpxProtoMessageKind::Pong,
}
}
/// Creates a new `CustomRlpxProtoMessage` with the given message ID and payload.
pub fn encoded(&self) -> BytesMut {
let mut buf = BytesMut::new();
buf.put_u8(self.message_type as u8);
match &self.message {
CustomRlpxProtoMessageKind::Ping | CustomRlpxProtoMessageKind::Pong => {}
CustomRlpxProtoMessageKind::PingMessage(msg) |
CustomRlpxProtoMessageKind::PongMessage(msg) => {
buf.put(msg.as_bytes());
}
}
buf
}
/// Decodes a `CustomRlpxProtoMessage` from the given message buffer.
pub fn decode_message(buf: &mut &[u8]) -> Option<Self> {
if buf.is_empty() {
return None
}
let id = buf[0];
buf.advance(1);
let message_type = match id {
0x00 => CustomRlpxProtoMessageId::Ping,
0x01 => CustomRlpxProtoMessageId::Pong,
0x02 => CustomRlpxProtoMessageId::PingMessage,
0x03 => CustomRlpxProtoMessageId::PongMessage,
_ => return None,
};
let message = match message_type {
CustomRlpxProtoMessageId::Ping => CustomRlpxProtoMessageKind::Ping,
CustomRlpxProtoMessageId::Pong => CustomRlpxProtoMessageKind::Pong,
CustomRlpxProtoMessageId::PingMessage => CustomRlpxProtoMessageKind::PingMessage(
String::from_utf8_lossy(&buf[..]).into_owned(),
),
CustomRlpxProtoMessageId::PongMessage => CustomRlpxProtoMessageKind::PongMessage(
String::from_utf8_lossy(&buf[..]).into_owned(),
),
};
Some(Self { message_type, message })
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs | examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs | use crate::BeaconSidecarConfig;
use alloy_consensus::{BlockHeader, Signed, Transaction as _, TxEip4844WithSidecar, Typed2718};
use alloy_eips::eip7594::BlobTransactionSidecarVariant;
use alloy_primitives::B256;
use alloy_rpc_types_beacon::sidecar::{BeaconBlobBundle, SidecarIterator};
use eyre::Result;
use futures_util::{stream::FuturesUnordered, Future, Stream, StreamExt};
use reqwest::{Error, StatusCode};
use reth_ethereum::{
pool::{BlobStoreError, TransactionPoolExt},
primitives::RecoveredBlock,
provider::CanonStateNotification,
PooledTransactionVariant,
};
use serde::{Deserialize, Serialize};
use std::{
collections::VecDeque,
pin::Pin,
sync::Arc,
task::{Context, Poll},
};
use thiserror::Error;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BlockMetadata {
pub block_hash: B256,
pub block_number: u64,
pub gas_used: u64,
}
#[derive(Debug, Clone)]
pub struct MinedBlob {
pub transaction: Signed<TxEip4844WithSidecar<BlobTransactionSidecarVariant>>,
pub block_metadata: BlockMetadata,
}
#[derive(Debug, Clone)]
pub struct ReorgedBlob {
pub transaction_hash: B256,
pub block_metadata: BlockMetadata,
}
#[derive(Debug, Clone)]
#[expect(clippy::large_enum_variant)]
pub enum BlobTransactionEvent {
Mined(MinedBlob),
Reorged(ReorgedBlob),
}
/// SideCarError Handles Errors from both EL and CL
#[derive(Debug, Error)]
pub enum SideCarError {
#[error("Reqwest encountered an error: {0}")]
ReqwestError(Error),
#[error("Failed to fetch transactions from the blobstore: {0}")]
TransactionPoolError(BlobStoreError),
#[error("400: {0}")]
InvalidBlockID(String),
#[error("404: {0}")]
BlockNotFound(String),
#[error("500: {0}")]
InternalError(String),
#[error("Network error: {0}")]
NetworkError(String),
#[error("Data parsing error: {0}")]
DeserializationError(String),
#[error("{0} Error: {1}")]
UnknownError(u16, String),
}
/// Futures associated with retrieving blob data from the beacon client
type SidecarsFuture =
Pin<Box<dyn Future<Output = Result<Vec<BlobTransactionEvent>, SideCarError>> + Send>>;
/// A Stream that processes CanonStateNotifications and retrieves BlobTransactions from the beacon
/// client.
///
/// First checks if the blob sidecar for a given EIP4844 is stored locally, if not attempts to
/// retrieve it from the CL Layer
#[must_use = "streams do nothing unless polled"]
pub struct MinedSidecarStream<St, P> {
pub events: St,
pub pool: P,
pub beacon_config: BeaconSidecarConfig,
pub client: reqwest::Client,
pub pending_requests: FuturesUnordered<SidecarsFuture>,
pub queued_actions: VecDeque<BlobTransactionEvent>,
}
impl<St, P> MinedSidecarStream<St, P>
where
St: Stream<Item = CanonStateNotification> + Send + Unpin + 'static,
P: TransactionPoolExt + Unpin + 'static,
{
fn process_block(&mut self, block: &RecoveredBlock<reth_ethereum::Block>) {
let txs: Vec<_> = block
.body()
.transactions()
.filter(|tx| tx.is_eip4844())
.map(|tx| (tx.clone(), tx.blob_count().unwrap_or(0) as usize))
.collect();
let mut all_blobs_available = true;
let mut actions_to_queue: Vec<BlobTransactionEvent> = Vec::new();
if txs.is_empty() {
return
}
match self.pool.get_all_blobs_exact(txs.iter().map(|(tx, _)| *tx.tx_hash()).collect()) {
Ok(blobs) => {
actions_to_queue.reserve_exact(txs.len());
for ((tx, _), sidecar) in txs.iter().zip(blobs.into_iter()) {
if let PooledTransactionVariant::Eip4844(transaction) = tx
.clone()
.try_into_pooled_eip4844(Arc::unwrap_or_clone(sidecar))
.expect("should not fail to convert blob tx if it is already eip4844")
{
let block_metadata = BlockMetadata {
block_hash: block.hash(),
block_number: block.number,
gas_used: block.gas_used,
};
actions_to_queue.push(BlobTransactionEvent::Mined(MinedBlob {
transaction,
block_metadata,
}));
}
}
}
Err(_err) => {
all_blobs_available = false;
}
};
// if any blob is missing we must instead query the consensus layer.
if all_blobs_available {
self.queued_actions.extend(actions_to_queue);
} else {
let client_clone = self.client.clone();
let block_root = block.hash();
let block_clone = block.clone();
let sidecar_url = self.beacon_config.sidecar_url(block_root);
let query =
Box::pin(fetch_blobs_for_block(client_clone, sidecar_url, block_clone, txs));
self.pending_requests.push(query);
}
}
}
impl<St, P> Stream for MinedSidecarStream<St, P>
where
St: Stream<Item = CanonStateNotification> + Send + Unpin + 'static,
P: TransactionPoolExt + Unpin + 'static,
{
type Item = Result<BlobTransactionEvent, SideCarError>;
/// Attempt to pull the next BlobTransaction from the stream.
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = self.get_mut();
// Request locally first, otherwise request from CL
loop {
if let Some(mined_sidecar) = this.queued_actions.pop_front() {
return Poll::Ready(Some(Ok(mined_sidecar)))
}
// Check if any pending requests are ready and append to buffer
while let Poll::Ready(Some(pending_result)) = this.pending_requests.poll_next_unpin(cx)
{
match pending_result {
Ok(mined_sidecars) => {
for sidecar in mined_sidecars {
this.queued_actions.push_back(sidecar);
}
}
Err(err) => return Poll::Ready(Some(Err(err))),
}
}
while let Poll::Ready(Some(notification)) = this.events.poll_next_unpin(cx) {
{
match notification {
CanonStateNotification::Commit { new } => {
for (_, block) in new.blocks().iter() {
this.process_block(block);
}
}
CanonStateNotification::Reorg { old, new } => {
// handle reorged blocks
for (_, block) in old.blocks().iter() {
let txs: Vec<BlobTransactionEvent> = block
.body()
.transactions()
.filter(|tx| tx.is_eip4844())
.map(|tx| {
let transaction_hash = *tx.tx_hash();
let block_metadata = BlockMetadata {
block_hash: new.tip().hash(),
block_number: new.tip().number(),
gas_used: new.tip().gas_used(),
};
BlobTransactionEvent::Reorged(ReorgedBlob {
transaction_hash,
block_metadata,
})
})
.collect();
this.queued_actions.extend(txs);
}
for (_, block) in new.blocks().iter() {
this.process_block(block);
}
}
}
}
}
}
}
}
/// Query the Beacon Layer for missing BlobTransactions
async fn fetch_blobs_for_block(
client: reqwest::Client,
url: String,
block: RecoveredBlock<reth_ethereum::Block>,
txs: Vec<(reth_ethereum::TransactionSigned, usize)>,
) -> Result<Vec<BlobTransactionEvent>, SideCarError> {
let response = match client.get(url).header("Accept", "application/json").send().await {
Ok(response) => response,
Err(err) => return Err(SideCarError::ReqwestError(err)),
};
if !response.status().is_success() {
return match response.status() {
StatusCode::BAD_REQUEST => {
Err(SideCarError::InvalidBlockID("Invalid request to server.".to_string()))
}
StatusCode::NOT_FOUND => {
Err(SideCarError::BlockNotFound("Requested block not found.".to_string()))
}
StatusCode::INTERNAL_SERVER_ERROR => {
Err(SideCarError::InternalError("Server encountered an error.".to_string()))
}
_ => Err(SideCarError::UnknownError(
response.status().as_u16(),
"Unhandled HTTP status.".to_string(),
)),
}
}
let bytes = match response.bytes().await {
Ok(b) => b,
Err(e) => return Err(SideCarError::NetworkError(e.to_string())),
};
let blobs_bundle: BeaconBlobBundle = match serde_json::from_slice(&bytes) {
Ok(b) => b,
Err(e) => return Err(SideCarError::DeserializationError(e.to_string())),
};
let mut sidecar_iterator = SidecarIterator::new(blobs_bundle);
let sidecars: Vec<BlobTransactionEvent> = txs
.iter()
.filter_map(|(tx, blob_len)| {
sidecar_iterator.next_sidecar(*blob_len).and_then(|sidecar| {
if let PooledTransactionVariant::Eip4844(transaction) = tx
.clone()
.try_into_pooled_eip4844(BlobTransactionSidecarVariant::Eip4844(sidecar))
.expect("should not fail to convert blob tx if it is already eip4844")
{
let block_metadata = BlockMetadata {
block_hash: block.hash(),
block_number: block.number,
gas_used: block.gas_used,
};
Some(BlobTransactionEvent::Mined(MinedBlob { transaction, block_metadata }))
} else {
None
}
})
})
.collect();
Ok(sidecars)
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/beacon-api-sidecar-fetcher/src/main.rs | examples/beacon-api-sidecar-fetcher/src/main.rs | //! Run with
//!
//! ```sh
//! cargo run -p example-beacon-api-sidecar-fetcher -- node --full
//! ```
//!
//! This launches a regular reth instance and subscribes to payload attributes event stream.
//!
//! **NOTE**: This expects that the CL client is running an http server on `localhost:5052` and is
//! configured to emit payload attributes events.
//!
//! See beacon Node API: <https://ethereum.github.io/beacon-APIs/>
#![warn(unused_crate_dependencies)]
use std::{
collections::VecDeque,
net::{IpAddr, Ipv4Addr},
};
use alloy_primitives::B256;
use clap::Parser;
use futures_util::{stream::FuturesUnordered, StreamExt};
use mined_sidecar::MinedSidecarStream;
use reth_ethereum::{
cli::{chainspec::EthereumChainSpecParser, interface::Cli},
node::{builder::NodeHandle, EthereumNode},
provider::CanonStateSubscriptions,
};
pub mod mined_sidecar;
fn main() {
Cli::<EthereumChainSpecParser, BeaconSidecarConfig>::parse()
.run(|builder, beacon_config| async move {
// launch the node
let NodeHandle { node, node_exit_future } =
builder.node(EthereumNode::default()).launch().await?;
let notifications: reth_ethereum::provider::CanonStateNotificationStream =
node.provider.canonical_state_stream();
let pool = node.pool.clone();
node.task_executor.spawn(async move {
let mut sidecar_stream = MinedSidecarStream {
events: notifications,
pool,
beacon_config,
client: reqwest::Client::new(),
pending_requests: FuturesUnordered::new(),
queued_actions: VecDeque::new(),
};
while let Some(result) = sidecar_stream.next().await {
match result {
Ok(blob_transaction) => {
// Handle successful transaction
println!("Processed BlobTransaction: {blob_transaction:?}");
}
Err(e) => {
// Handle errors specifically
eprintln!("Failed to process transaction: {e:?}");
}
}
}
});
node_exit_future.await
})
.unwrap();
}
/// Our custom cli args extension that adds one flag to reth default CLI.
#[derive(Debug, Clone, clap::Parser)]
pub struct BeaconSidecarConfig {
/// Beacon Node http server address
#[arg(long = "cl.addr", default_value_t = IpAddr::V4(Ipv4Addr::LOCALHOST))]
pub cl_addr: IpAddr,
/// Beacon Node http server port to listen on
#[arg(long = "cl.port", default_value_t = 5052)]
pub cl_port: u16,
}
impl Default for BeaconSidecarConfig {
/// Default setup for lighthouse client
fn default() -> Self {
Self {
cl_addr: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), // Equivalent to Ipv4Addr::LOCALHOST
cl_port: 5052,
}
}
}
impl BeaconSidecarConfig {
/// Returns the http url of the beacon node
pub fn http_base_url(&self) -> String {
format!("http://{}:{}", self.cl_addr, self.cl_port)
}
/// Returns the URL to the beacon sidecars endpoint <https://ethereum.github.io/beacon-APIs/#/Beacon/getBlobSidecars>
pub fn sidecar_url(&self, block_root: B256) -> String {
format!("{}/eth/v1/beacon/blob_sidecars/{}", self.http_base_url(), block_root)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/node-custom-rpc/src/main.rs | examples/node-custom-rpc/src/main.rs | //! Example of how to use additional rpc namespaces in the reth CLI
//!
//! Run with
//!
//! ```sh
//! cargo run -p node-custom-rpc -- node --http --ws --enable-ext
//! ```
//!
//! This installs an additional RPC method `txpoolExt_transactionCount` that can be queried via [cast](https://github.com/foundry-rs/foundry)
//!
//! ```sh
//! cast rpc txpoolExt_transactionCount
//! ```
#![warn(unused_crate_dependencies)]
use clap::Parser;
use jsonrpsee::{
core::{RpcResult, SubscriptionResult},
proc_macros::rpc,
PendingSubscriptionSink, SubscriptionMessage,
};
use reth_ethereum::{
cli::{chainspec::EthereumChainSpecParser, interface::Cli},
node::EthereumNode,
pool::TransactionPool,
};
use std::time::Duration;
use tokio::time::sleep;
fn main() {
Cli::<EthereumChainSpecParser, RethCliTxpoolExt>::parse()
.run(|builder, args| async move {
let handle = builder
// configure default ethereum node
.node(EthereumNode::default())
// extend the rpc modules with our custom `TxpoolExt` endpoints
.extend_rpc_modules(move |ctx| {
if !args.enable_ext {
return Ok(())
}
// here we get the configured pool.
let pool = ctx.pool().clone();
let ext = TxpoolExt { pool };
// now we merge our extension namespace into all configured transports
ctx.modules.merge_configured(ext.into_rpc())?;
println!("txpool extension enabled");
Ok(())
})
// launch the node with custom rpc
.launch()
.await?;
handle.wait_for_node_exit().await
})
.unwrap();
}
/// Our custom cli args extension that adds one flag to reth default CLI.
#[derive(Debug, Clone, Copy, Default, clap::Args)]
struct RethCliTxpoolExt {
/// CLI flag to enable the txpool extension namespace
#[arg(long)]
pub enable_ext: bool,
}
/// trait interface for a custom rpc namespace: `txpool`
///
/// This defines an additional namespace where all methods are configured as trait functions.
#[cfg_attr(not(test), rpc(server, namespace = "txpoolExt"))]
#[cfg_attr(test, rpc(server, client, namespace = "txpoolExt"))]
pub trait TxpoolExtApi {
/// Returns the number of transactions in the pool.
#[method(name = "transactionCount")]
fn transaction_count(&self) -> RpcResult<usize>;
/// Clears the transaction pool.
#[method(name = "clearTxpool")]
fn clear_txpool(&self) -> RpcResult<()>;
/// Creates a subscription that returns the number of transactions in the pool every 10s.
#[subscription(name = "subscribeTransactionCount", item = usize)]
fn subscribe_transaction_count(
&self,
#[argument(rename = "delay")] delay: Option<u64>,
) -> SubscriptionResult;
}
/// The type that implements the `txpool` rpc namespace trait
pub struct TxpoolExt<Pool> {
pool: Pool,
}
#[cfg(not(test))]
impl<Pool> TxpoolExtApiServer for TxpoolExt<Pool>
where
Pool: TransactionPool + Clone + 'static,
{
fn transaction_count(&self) -> RpcResult<usize> {
Ok(self.pool.pool_size().total)
}
fn clear_txpool(&self) -> RpcResult<()> {
let all_tx_hashes = self.pool.all_transaction_hashes();
self.pool.remove_transactions(all_tx_hashes);
Ok(())
}
fn subscribe_transaction_count(
&self,
pending_subscription_sink: PendingSubscriptionSink,
delay: Option<u64>,
) -> SubscriptionResult {
let pool = self.pool.clone();
let delay = delay.unwrap_or(10);
tokio::spawn(async move {
let sink = match pending_subscription_sink.accept().await {
Ok(sink) => sink,
Err(e) => {
println!("failed to accept subscription: {e}");
return;
}
};
loop {
sleep(Duration::from_secs(delay)).await;
let msg = SubscriptionMessage::from(
serde_json::value::to_raw_value(&pool.pool_size().total).expect("serialize"),
);
let _ = sink.send(msg).await;
}
});
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use jsonrpsee::{
http_client::HttpClientBuilder, server::ServerBuilder, ws_client::WsClientBuilder,
};
use reth_ethereum::pool::noop::NoopTransactionPool;
#[cfg(test)]
impl<Pool> TxpoolExtApiServer for TxpoolExt<Pool>
where
Pool: TransactionPool + Clone + 'static,
{
fn transaction_count(&self) -> RpcResult<usize> {
Ok(self.pool.pool_size().total)
}
fn clear_txpool(&self) -> RpcResult<()> {
let all_tx_hashes = self.pool.all_transaction_hashes();
self.pool.remove_transactions(all_tx_hashes);
Ok(())
}
fn subscribe_transaction_count(
&self,
pending: PendingSubscriptionSink,
delay: Option<u64>,
) -> SubscriptionResult {
let delay = delay.unwrap_or(10);
let pool = self.pool.clone();
tokio::spawn(async move {
// Accept the subscription
let sink = match pending.accept().await {
Ok(sink) => sink,
Err(err) => {
eprintln!("failed to accept subscription: {err}");
return;
}
};
// Send pool size repeatedly, with a 10-second delay
loop {
sleep(Duration::from_millis(delay)).await;
let message = SubscriptionMessage::from(
serde_json::value::to_raw_value(&pool.pool_size().total)
.expect("serialize usize"),
);
// Just ignore errors if a client has dropped
let _ = sink.send(message).await;
}
});
Ok(())
}
}
#[tokio::test(flavor = "multi_thread")]
async fn test_call_transaction_count_http() {
let server_addr = start_server().await;
let uri = format!("http://{server_addr}");
let client = HttpClientBuilder::default().build(&uri).unwrap();
let count = TxpoolExtApiClient::transaction_count(&client).await.unwrap();
assert_eq!(count, 0);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_call_clear_txpool_http() {
let server_addr = start_server().await;
let uri = format!("http://{server_addr}");
let client = HttpClientBuilder::default().build(&uri).unwrap();
TxpoolExtApiClient::clear_txpool(&client).await.unwrap();
let count = TxpoolExtApiClient::transaction_count(&client).await.unwrap();
assert_eq!(count, 0);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_subscribe_transaction_count_ws() {
let server_addr = start_server().await;
let ws_url = format!("ws://{server_addr}");
let client = WsClientBuilder::default().build(&ws_url).await.unwrap();
let mut sub = TxpoolExtApiClient::subscribe_transaction_count(&client, None)
.await
.expect("failed to subscribe");
let first = sub.next().await.unwrap().unwrap();
assert_eq!(first, 0, "expected initial count to be 0");
let second = sub.next().await.unwrap().unwrap();
assert_eq!(second, 0, "still expected 0 from our NoopTransactionPool");
}
async fn start_server() -> std::net::SocketAddr {
let server = ServerBuilder::default().build("127.0.0.1:0").await.unwrap();
let addr = server.local_addr().unwrap();
let pool = NoopTransactionPool::default();
let api = TxpoolExt { pool };
let server_handle = server.start(api.into_rpc());
tokio::spawn(server_handle.stopped());
addr
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/custom-evm/src/main.rs | examples/custom-evm/src/main.rs | //! This example shows how to implement a node with a custom EVM
#![warn(unused_crate_dependencies)]
use alloy_evm::{
eth::EthEvmContext,
precompiles::PrecompilesMap,
revm::{
handler::EthPrecompiles,
precompile::{Precompile, PrecompileId},
},
EvmFactory,
};
use alloy_genesis::Genesis;
use alloy_primitives::{address, Bytes};
use reth_ethereum::{
chainspec::{Chain, ChainSpec},
evm::{
primitives::{Database, EvmEnv},
revm::{
context::{Context, TxEnv},
context_interface::result::{EVMError, HaltReason},
inspector::{Inspector, NoOpInspector},
interpreter::interpreter::EthInterpreter,
precompile::{PrecompileOutput, PrecompileResult, Precompiles},
primitives::hardfork::SpecId,
MainBuilder, MainContext,
},
EthEvm, EthEvmConfig,
},
node::{
api::{FullNodeTypes, NodeTypes},
builder::{components::ExecutorBuilder, BuilderContext, NodeBuilder},
core::{args::RpcServerArgs, node_config::NodeConfig},
node::EthereumAddOns,
EthereumNode,
},
tasks::TaskManager,
EthPrimitives,
};
use reth_tracing::{RethTracer, Tracer};
use std::sync::OnceLock;
/// Custom EVM configuration.
#[derive(Debug, Clone, Default)]
#[non_exhaustive]
pub struct MyEvmFactory;
impl EvmFactory for MyEvmFactory {
type Evm<DB: Database, I: Inspector<EthEvmContext<DB>, EthInterpreter>> =
EthEvm<DB, I, Self::Precompiles>;
type Tx = TxEnv;
type Error<DBError: core::error::Error + Send + Sync + 'static> = EVMError<DBError>;
type HaltReason = HaltReason;
type Context<DB: Database> = EthEvmContext<DB>;
type Spec = SpecId;
type Precompiles = PrecompilesMap;
fn create_evm<DB: Database>(&self, db: DB, input: EvmEnv) -> Self::Evm<DB, NoOpInspector> {
let spec = input.cfg_env.spec;
let mut evm = Context::mainnet()
.with_db(db)
.with_cfg(input.cfg_env)
.with_block(input.block_env)
.build_mainnet_with_inspector(NoOpInspector {})
.with_precompiles(PrecompilesMap::from_static(EthPrecompiles::default().precompiles));
if spec == SpecId::PRAGUE {
evm = evm.with_precompiles(PrecompilesMap::from_static(prague_custom()));
}
EthEvm::new(evm, false)
}
fn create_evm_with_inspector<DB: Database, I: Inspector<Self::Context<DB>, EthInterpreter>>(
&self,
db: DB,
input: EvmEnv,
inspector: I,
) -> Self::Evm<DB, I> {
EthEvm::new(self.create_evm(db, input).into_inner().with_inspector(inspector), true)
}
}
/// Builds a regular ethereum block executor that uses the custom EVM.
#[derive(Debug, Default, Clone, Copy)]
#[non_exhaustive]
pub struct MyExecutorBuilder;
impl<Node> ExecutorBuilder<Node> for MyExecutorBuilder
where
Node: FullNodeTypes<Types: NodeTypes<ChainSpec = ChainSpec, Primitives = EthPrimitives>>,
{
type EVM = EthEvmConfig<ChainSpec, MyEvmFactory>;
async fn build_evm(self, ctx: &BuilderContext<Node>) -> eyre::Result<Self::EVM> {
let evm_config =
EthEvmConfig::new_with_evm_factory(ctx.chain_spec(), MyEvmFactory::default());
Ok(evm_config)
}
}
/// Returns precompiles for Fjor spec.
pub fn prague_custom() -> &'static Precompiles {
static INSTANCE: OnceLock<Precompiles> = OnceLock::new();
INSTANCE.get_or_init(|| {
let mut precompiles = Precompiles::prague().clone();
// Custom precompile.
let precompile = Precompile::new(
PrecompileId::custom("custom"),
address!("0x0000000000000000000000000000000000000999"),
|_, _| PrecompileResult::Ok(PrecompileOutput::new(0, Bytes::new())),
);
precompiles.extend([precompile]);
precompiles
})
}
#[tokio::main]
async fn main() -> eyre::Result<()> {
let _guard = RethTracer::new().init()?;
let tasks = TaskManager::current();
// create a custom chain spec
let spec = ChainSpec::builder()
.chain(Chain::mainnet())
.genesis(Genesis::default())
.london_activated()
.paris_activated()
.shanghai_activated()
.cancun_activated()
.prague_activated()
.build();
let node_config =
NodeConfig::test().with_rpc(RpcServerArgs::default().with_http()).with_chain(spec);
let handle = NodeBuilder::new(node_config)
.testing_node(tasks.executor())
// configure the node with regular ethereum types
.with_types::<EthereumNode>()
// use default ethereum components but with our executor
.with_components(EthereumNode::components().executor(MyExecutorBuilder::default()))
.with_add_ons(EthereumAddOns::default())
.launch()
.await
.unwrap();
println!("Node started");
handle.node_exit_future.await
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/op-db-access/src/main.rs | examples/op-db-access/src/main.rs | //! Shows how manually access the database
use reth_op::{chainspec::BASE_MAINNET, node::OpNode, provider::providers::ReadOnlyConfig};
// Providers are zero cost abstractions on top of an opened MDBX Transaction
// exposing a familiar API to query the chain's information without requiring knowledge
// of the inner tables.
//
// These abstractions do not include any caching and the user is responsible for doing that.
// Other parts of the code which include caching are parts of the `EthApi` abstraction.
fn main() -> eyre::Result<()> {
// The path to data directory, e.g. "~/.local/reth/share/base"
let datadir = std::env::var("RETH_DATADIR")?;
// Instantiate a provider factory for Ethereum mainnet using the provided datadir path.
let factory = OpNode::provider_factory_builder()
.open_read_only(BASE_MAINNET.clone(), ReadOnlyConfig::from_datadir(datadir))?;
// obtain a provider access that has direct access to the database.
let _provider = factory.provider();
Ok(())
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/precompile-cache/src/main.rs | examples/precompile-cache/src/main.rs | //! This example shows how to implement a node with a custom EVM that uses a stateful precompile
#![warn(unused_crate_dependencies)]
use alloy_evm::{
eth::EthEvmContext,
precompiles::{DynPrecompile, Precompile, PrecompileInput, PrecompilesMap},
revm::{handler::EthPrecompiles, precompile::PrecompileId},
Evm, EvmFactory,
};
use alloy_genesis::Genesis;
use alloy_primitives::Bytes;
use parking_lot::RwLock;
use reth_ethereum::{
chainspec::{Chain, ChainSpec},
evm::{
primitives::{Database, EvmEnv},
revm::{
context::{Context, TxEnv},
context_interface::result::{EVMError, HaltReason},
inspector::{Inspector, NoOpInspector},
interpreter::interpreter::EthInterpreter,
precompile::PrecompileResult,
primitives::hardfork::SpecId,
MainBuilder, MainContext,
},
},
node::{
api::{FullNodeTypes, NodeTypes},
builder::{components::ExecutorBuilder, BuilderContext, NodeBuilder},
core::{args::RpcServerArgs, node_config::NodeConfig},
evm::EthEvm,
node::EthereumAddOns,
EthEvmConfig, EthereumNode,
},
tasks::TaskManager,
EthPrimitives,
};
use reth_tracing::{RethTracer, Tracer};
use schnellru::{ByLength, LruMap};
use std::sync::Arc;
/// Type alias for the LRU cache used within the [`PrecompileCache`].
type PrecompileLRUCache = LruMap<(Bytes, u64), PrecompileResult>;
/// A cache for precompile inputs / outputs.
///
/// This cache works with standard precompiles that take input data and gas limit as parameters.
/// The cache key is composed of the input bytes and gas limit, and the cached value is the
/// precompile execution result.
#[derive(Debug)]
pub struct PrecompileCache {
/// Caches for each precompile input / output.
cache: PrecompileLRUCache,
}
/// Custom EVM factory.
#[derive(Debug, Clone)]
#[non_exhaustive]
pub struct MyEvmFactory {
precompile_cache: Arc<RwLock<PrecompileCache>>,
}
impl EvmFactory for MyEvmFactory {
type Evm<DB: Database, I: Inspector<EthEvmContext<DB>, EthInterpreter>> =
EthEvm<DB, I, PrecompilesMap>;
type Tx = TxEnv;
type Error<DBError: core::error::Error + Send + Sync + 'static> = EVMError<DBError>;
type HaltReason = HaltReason;
type Context<DB: Database> = EthEvmContext<DB>;
type Spec = SpecId;
type Precompiles = PrecompilesMap;
fn create_evm<DB: Database>(&self, db: DB, input: EvmEnv) -> Self::Evm<DB, NoOpInspector> {
let new_cache = self.precompile_cache.clone();
let evm = Context::mainnet()
.with_db(db)
.with_cfg(input.cfg_env)
.with_block(input.block_env)
.build_mainnet_with_inspector(NoOpInspector {})
.with_precompiles(PrecompilesMap::from_static(EthPrecompiles::default().precompiles));
let mut evm = EthEvm::new(evm, false);
evm.precompiles_mut().map_precompiles(|_, precompile| {
WrappedPrecompile::wrap(precompile, new_cache.clone())
});
evm
}
fn create_evm_with_inspector<DB: Database, I: Inspector<Self::Context<DB>, EthInterpreter>>(
&self,
db: DB,
input: EvmEnv,
inspector: I,
) -> Self::Evm<DB, I> {
EthEvm::new(self.create_evm(db, input).into_inner().with_inspector(inspector), true)
}
}
/// A custom precompile that contains the cache and precompile it wraps.
#[derive(Clone)]
pub struct WrappedPrecompile {
/// The precompile to wrap.
precompile: DynPrecompile,
/// The cache to use.
cache: Arc<RwLock<PrecompileCache>>,
}
impl WrappedPrecompile {
fn new(precompile: DynPrecompile, cache: Arc<RwLock<PrecompileCache>>) -> Self {
Self { precompile, cache }
}
/// Given a [`DynPrecompile`] and cache for a specific precompiles, create a
/// wrapper that can be used inside Evm.
fn wrap(precompile: DynPrecompile, cache: Arc<RwLock<PrecompileCache>>) -> DynPrecompile {
let precompile_id = precompile.precompile_id().clone();
let wrapped = Self::new(precompile, cache);
(precompile_id, move |input: PrecompileInput<'_>| -> PrecompileResult {
wrapped.call(input)
})
.into()
}
}
impl Precompile for WrappedPrecompile {
fn precompile_id(&self) -> &PrecompileId {
self.precompile.precompile_id()
}
fn call(&self, input: PrecompileInput<'_>) -> PrecompileResult {
let mut cache = self.cache.write();
let key = (Bytes::copy_from_slice(input.data), input.gas);
// get the result if it exists
if let Some(result) = cache.cache.get(&key) {
return result.clone()
}
// call the precompile if cache miss
let output = self.precompile.call(input);
// insert the result into the cache
cache.cache.insert(key, output.clone());
output
}
}
/// Builds a regular ethereum block executor that uses the custom EVM.
#[derive(Debug, Clone)]
#[non_exhaustive]
pub struct MyExecutorBuilder {
/// The precompile cache to use for all executors.
precompile_cache: Arc<RwLock<PrecompileCache>>,
}
impl Default for MyExecutorBuilder {
fn default() -> Self {
let precompile_cache = PrecompileCache {
cache: LruMap::<(Bytes, u64), PrecompileResult>::new(ByLength::new(100)),
};
Self { precompile_cache: Arc::new(RwLock::new(precompile_cache)) }
}
}
impl<Node> ExecutorBuilder<Node> for MyExecutorBuilder
where
Node: FullNodeTypes<Types: NodeTypes<ChainSpec = ChainSpec, Primitives = EthPrimitives>>,
{
type EVM = EthEvmConfig<ChainSpec, MyEvmFactory>;
async fn build_evm(self, ctx: &BuilderContext<Node>) -> eyre::Result<Self::EVM> {
let evm_config = EthEvmConfig::new_with_evm_factory(
ctx.chain_spec(),
MyEvmFactory { precompile_cache: self.precompile_cache.clone() },
);
Ok(evm_config)
}
}
#[tokio::main]
async fn main() -> eyre::Result<()> {
let _guard = RethTracer::new().init()?;
let tasks = TaskManager::current();
// create a custom chain spec
let spec = ChainSpec::builder()
.chain(Chain::mainnet())
.genesis(Genesis::default())
.london_activated()
.paris_activated()
.shanghai_activated()
.cancun_activated()
.build();
let node_config =
NodeConfig::test().with_rpc(RpcServerArgs::default().with_http()).with_chain(spec);
let handle = NodeBuilder::new(node_config)
.testing_node(tasks.executor())
// configure the node with regular ethereum types
.with_types::<EthereumNode>()
// use default ethereum components but with our executor
.with_components(EthereumNode::components().executor(MyExecutorBuilder::default()))
.with_add_ons(EthereumAddOns::default())
.launch()
.await
.unwrap();
println!("Node started");
handle.node_exit_future.await
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/exex-test/src/wal_test.rs | examples/exex-test/src/wal_test.rs | use eyre::Result;
use futures_util::StreamExt;
use reth_ethereum::{
exex::{ExExContext, ExExEvent},
node::api::{FullNodeComponents, NodeTypes},
EthPrimitives,
};
use std::sync::{
atomic::{AtomicBool, Ordering},
Arc,
};
/// ExEx tests - WAL behavior
#[expect(dead_code)]
pub async fn wal_test_exex<
Node: FullNodeComponents<Types: NodeTypes<Primitives = EthPrimitives>>,
>(
mut ctx: ExExContext<Node>,
) -> Result<()> {
// We can't access the WAL handle directly as it's private
// So we'll adapt our test to work without it
// Track the latest finalized block
let mut latest_finalized_block = 0;
let wal_cleared = Arc::new(AtomicBool::new(false));
println!("WAL test ExEx started");
// Process notifications
while let Some(result) = ctx.notifications.next().await {
// Handle the Result with ?
let notification = result?;
if let Some(committed_chain) = notification.committed_chain() {
println!("WAL test: Received committed chain: {:?}", committed_chain.range());
// Send finished height event
ctx.events.send(ExExEvent::FinishedHeight(committed_chain.tip().num_hash()))?;
if committed_chain.tip().number > 3 {
latest_finalized_block = 3; // Assuming block 3 was finalized
// Since we don't have access to the WAL handle, we'll simulate the check
println!("WAL test: Block finalized at height: {latest_finalized_block}");
wal_cleared.store(true, Ordering::SeqCst);
}
}
}
// Make assertions
if latest_finalized_block > 0 {
// asserting true since we manually set wal_cleared to true above
assert!(wal_cleared.load(Ordering::SeqCst), "WAL was not cleared after finalization");
}
Ok(())
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/exex-test/src/main.rs | examples/exex-test/src/main.rs | use futures_util::StreamExt;
use reth_e2e_test_utils::testsuite::{
actions::ProduceBlocks,
setup::{NetworkSetup, Setup},
TestBuilder,
};
use reth_ethereum::{
chainspec::{ChainSpecBuilder, MAINNET},
exex::{ExExContext, ExExEvent},
node::{
api::{FullNodeComponents, NodeTypes},
EthEngineTypes, EthereumNode,
},
EthPrimitives,
};
use std::sync::{
atomic::{AtomicBool, AtomicU64, Ordering},
Arc,
};
mod wal_test;
#[allow(unfulfilled_lint_expectations)]
struct TestState {
received_blocks: AtomicU64,
saw_trie_updates: AtomicBool,
last_finalized_block: AtomicU64,
}
/// ExEx that tests assertions about notifications and state
#[expect(dead_code)]
async fn test_assertion_exex<
Node: FullNodeComponents<Types: NodeTypes<Primitives = EthPrimitives>>,
>(
mut ctx: ExExContext<Node>,
) -> eyre::Result<()> {
let state = Arc::new(TestState {
received_blocks: AtomicU64::new(0),
saw_trie_updates: AtomicBool::new(false),
last_finalized_block: AtomicU64::new(0),
});
println!("Assertion ExEx started");
// Clone state for the async block
let state_clone = state.clone();
// Process notifications
while let Some(result) = ctx.notifications.next().await {
// Handle the Result with ?
let notification = result?;
// Check for committed chain
if let Some(committed_chain) = notification.committed_chain() {
let range = committed_chain.range();
let blocks_count = *range.end() - *range.start() + 1;
println!("Received committed chain: {range:?}");
// Increment blocks count
#[allow(clippy::unnecessary_cast)]
state_clone.received_blocks.fetch_add(blocks_count as u64, Ordering::SeqCst);
// Send event that we've processed this height
ctx.events.send(ExExEvent::FinishedHeight(committed_chain.tip().num_hash()))?;
// Check for finalization
state_clone.last_finalized_block.store(committed_chain.tip().number, Ordering::SeqCst);
}
// For example, if we see any block, we'll set saw_trie_updates to true
// This is a simplification
state_clone.saw_trie_updates.store(true, Ordering::SeqCst);
}
// Report results at the end
report_test_results(&state);
Ok(())
}
/// Verify test assertions after completion
fn report_test_results(state: &TestState) {
let blocks_received = state.received_blocks.load(Ordering::SeqCst);
let saw_trie_updates = state.saw_trie_updates.load(Ordering::SeqCst);
let last_finalized = state.last_finalized_block.load(Ordering::SeqCst);
println!("========= ExEx Test Report =========");
println!("Total blocks received: {blocks_received}");
println!("Trie updates observed: {saw_trie_updates}");
println!("Last finalized block: {last_finalized}");
println!("====================================");
assert!(blocks_received > 0, "No blocks were received by the ExEx");
assert!(saw_trie_updates, "No trie updates were observed in any notifications");
assert!(last_finalized > 0, "No finalization events were observed");
}
async fn run_exex_test() -> eyre::Result<()> {
println!("Starting ExEx test...");
// Set up the test environment
let setup = Setup::default()
.with_chain_spec(Arc::new(
ChainSpecBuilder::default()
.chain(MAINNET.chain)
.genesis(
serde_json::from_str(include_str!(
"../../../crates/e2e-test-utils/src/testsuite/assets/genesis.json"
))
.unwrap(),
)
.cancun_activated()
.build(),
))
.with_network(NetworkSetup::single_node());
println!("Test environment set up");
let test = TestBuilder::new()
.with_setup(setup)
.with_action(ProduceBlocks::<EthEngineTypes>::new(5))
.with_action(ProduceBlocks::<EthEngineTypes>::new(2));
println!("Test built, running...");
test.run::<EthereumNode>().await?;
println!("Test completed successfully");
Ok(())
}
#[tokio::main]
async fn main() -> eyre::Result<()> {
println!("Starting ExEx test example");
run_exex_test().await
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/network/src/main.rs | examples/network/src/main.rs | //! Example of how to use the network as a standalone component
//!
//! Run with
//!
//! ```sh
//! cargo run --release -p network
//! ```
#![warn(unused_crate_dependencies)]
use futures::StreamExt;
use reth_ethereum::{
network::{
config::rng_secret_key, NetworkConfig, NetworkEventListenerProvider, NetworkManager,
},
provider::test_utils::NoopProvider,
};
#[tokio::main]
async fn main() -> eyre::Result<()> {
// This block provider implementation is used for testing purposes.
let client = NoopProvider::default();
// The key that's used for encrypting sessions and to identify our node.
let local_key = rng_secret_key();
// Configure the network
let config = NetworkConfig::builder(local_key).mainnet_boot_nodes().build(client);
// create the network instance
let network = NetworkManager::eth(config).await?;
// get a handle to the network to interact with it
let handle = network.handle().clone();
// spawn the network
tokio::task::spawn(network);
// interact with the network
let mut events = handle.event_listener();
while let Some(event) = events.next().await {
println!("Received event: {event:?}");
}
Ok(())
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/custom-beacon-withdrawals/src/main.rs | examples/custom-beacon-withdrawals/src/main.rs | //! Example for how to modify a block post-execution step. It credits beacon withdrawals with a
//! custom mechanism instead of minting native tokens
#![warn(unused_crate_dependencies)]
use alloy_eips::eip4895::Withdrawal;
use alloy_evm::{
block::{BlockExecutorFactory, BlockExecutorFor, CommitChanges, ExecutableTx},
eth::{EthBlockExecutionCtx, EthBlockExecutor},
precompiles::PrecompilesMap,
EthEvm, EthEvmFactory,
};
use alloy_sol_macro::sol;
use alloy_sol_types::SolCall;
use reth_ethereum::{
chainspec::ChainSpec,
cli::interface::Cli,
evm::{
primitives::{
execute::{BlockExecutionError, BlockExecutor, InternalBlockExecutionError},
Database, Evm, EvmEnv, EvmEnvFor, ExecutionCtxFor, InspectorFor,
NextBlockEnvAttributes, OnStateHook,
},
revm::{
context::{result::ExecutionResult, TxEnv},
db::State,
primitives::{address, hardfork::SpecId, Address},
DatabaseCommit,
},
EthBlockAssembler, EthEvmConfig, RethReceiptBuilder,
},
node::{
api::{ConfigureEngineEvm, ConfigureEvm, ExecutableTxIterator, FullNodeTypes, NodeTypes},
builder::{components::ExecutorBuilder, BuilderContext},
node::EthereumAddOns,
EthereumNode,
},
primitives::{Header, SealedBlock, SealedHeader},
provider::BlockExecutionResult,
rpc::types::engine::ExecutionData,
Block, EthPrimitives, Receipt, TransactionSigned,
};
use std::{fmt::Display, sync::Arc};
pub const SYSTEM_ADDRESS: Address = address!("0xfffffffffffffffffffffffffffffffffffffffe");
pub const WITHDRAWALS_ADDRESS: Address = address!("0x4200000000000000000000000000000000000000");
fn main() {
Cli::parse_args()
.run(|builder, _| async move {
let handle = builder
// use the default ethereum node types
.with_types::<EthereumNode>()
// Configure the components of the node
// use default ethereum components but use our custom pool
.with_components(
EthereumNode::components().executor(CustomExecutorBuilder::default()),
)
.with_add_ons(EthereumAddOns::default())
.launch()
.await?;
handle.wait_for_node_exit().await
})
.unwrap();
}
/// A custom executor builder
#[derive(Debug, Default, Clone, Copy)]
#[non_exhaustive]
pub struct CustomExecutorBuilder;
impl<Types, Node> ExecutorBuilder<Node> for CustomExecutorBuilder
where
Types: NodeTypes<ChainSpec = ChainSpec, Primitives = EthPrimitives>,
Node: FullNodeTypes<Types = Types>,
{
type EVM = CustomEvmConfig;
async fn build_evm(self, ctx: &BuilderContext<Node>) -> eyre::Result<Self::EVM> {
let evm_config = CustomEvmConfig { inner: EthEvmConfig::new(ctx.chain_spec()) };
Ok(evm_config)
}
}
#[derive(Debug, Clone)]
pub struct CustomEvmConfig {
inner: EthEvmConfig,
}
impl BlockExecutorFactory for CustomEvmConfig {
type EvmFactory = EthEvmFactory;
type ExecutionCtx<'a> = EthBlockExecutionCtx<'a>;
type Transaction = TransactionSigned;
type Receipt = Receipt;
fn evm_factory(&self) -> &Self::EvmFactory {
self.inner.evm_factory()
}
fn create_executor<'a, DB, I>(
&'a self,
evm: EthEvm<&'a mut State<DB>, I, PrecompilesMap>,
ctx: EthBlockExecutionCtx<'a>,
) -> impl BlockExecutorFor<'a, Self, DB, I>
where
DB: Database + 'a,
I: InspectorFor<Self, &'a mut State<DB>> + 'a,
{
CustomBlockExecutor {
inner: EthBlockExecutor::new(
evm,
ctx,
self.inner.chain_spec(),
self.inner.executor_factory.receipt_builder(),
),
}
}
}
impl ConfigureEvm for CustomEvmConfig {
type Primitives = <EthEvmConfig as ConfigureEvm>::Primitives;
type Error = <EthEvmConfig as ConfigureEvm>::Error;
type NextBlockEnvCtx = <EthEvmConfig as ConfigureEvm>::NextBlockEnvCtx;
type BlockExecutorFactory = Self;
type BlockAssembler = EthBlockAssembler<ChainSpec>;
fn block_executor_factory(&self) -> &Self::BlockExecutorFactory {
self
}
fn block_assembler(&self) -> &Self::BlockAssembler {
self.inner.block_assembler()
}
fn evm_env(&self, header: &Header) -> EvmEnv<SpecId> {
self.inner.evm_env(header)
}
fn next_evm_env(
&self,
parent: &Header,
attributes: &NextBlockEnvAttributes,
) -> Result<EvmEnv<SpecId>, Self::Error> {
self.inner.next_evm_env(parent, attributes)
}
fn context_for_block<'a>(&self, block: &'a SealedBlock<Block>) -> EthBlockExecutionCtx<'a> {
self.inner.context_for_block(block)
}
fn context_for_next_block(
&self,
parent: &SealedHeader,
attributes: Self::NextBlockEnvCtx,
) -> EthBlockExecutionCtx<'_> {
self.inner.context_for_next_block(parent, attributes)
}
}
impl ConfigureEngineEvm<ExecutionData> for CustomEvmConfig {
fn evm_env_for_payload(&self, payload: &ExecutionData) -> EvmEnvFor<Self> {
self.inner.evm_env_for_payload(payload)
}
fn context_for_payload<'a>(&self, payload: &'a ExecutionData) -> ExecutionCtxFor<'a, Self> {
self.inner.context_for_payload(payload)
}
fn tx_iterator_for_payload(&self, payload: &ExecutionData) -> impl ExecutableTxIterator<Self> {
self.inner.tx_iterator_for_payload(payload)
}
}
pub struct CustomBlockExecutor<'a, Evm> {
/// Inner Ethereum execution strategy.
inner: EthBlockExecutor<'a, Evm, &'a Arc<ChainSpec>, &'a RethReceiptBuilder>,
}
impl<'db, DB, E> BlockExecutor for CustomBlockExecutor<'_, E>
where
DB: Database + 'db,
E: Evm<DB = &'db mut State<DB>, Tx = TxEnv>,
{
type Transaction = TransactionSigned;
type Receipt = Receipt;
type Evm = E;
fn apply_pre_execution_changes(&mut self) -> Result<(), BlockExecutionError> {
self.inner.apply_pre_execution_changes()
}
fn execute_transaction_with_commit_condition(
&mut self,
tx: impl ExecutableTx<Self>,
f: impl FnOnce(&ExecutionResult<<Self::Evm as Evm>::HaltReason>) -> CommitChanges,
) -> Result<Option<u64>, BlockExecutionError> {
self.inner.execute_transaction_with_commit_condition(tx, f)
}
fn finish(mut self) -> Result<(Self::Evm, BlockExecutionResult<Receipt>), BlockExecutionError> {
if let Some(withdrawals) = self.inner.ctx.withdrawals.clone() {
apply_withdrawals_contract_call(withdrawals.as_ref(), self.inner.evm_mut())?;
}
// Invoke inner finish method to apply Ethereum post-execution changes
self.inner.finish()
}
fn set_state_hook(&mut self, _hook: Option<Box<dyn OnStateHook>>) {
self.inner.set_state_hook(_hook)
}
fn evm_mut(&mut self) -> &mut Self::Evm {
self.inner.evm_mut()
}
fn evm(&self) -> &Self::Evm {
self.inner.evm()
}
}
sol!(
function withdrawals(
uint64[] calldata amounts,
address[] calldata addresses
);
);
/// Applies the post-block call to the withdrawal / deposit contract, using the given block,
/// [`ChainSpec`], EVM.
pub fn apply_withdrawals_contract_call(
withdrawals: &[Withdrawal],
evm: &mut impl Evm<Error: Display, DB: DatabaseCommit>,
) -> Result<(), BlockExecutionError> {
let mut state = match evm.transact_system_call(
SYSTEM_ADDRESS,
WITHDRAWALS_ADDRESS,
withdrawalsCall {
amounts: withdrawals.iter().map(|w| w.amount).collect::<Vec<_>>(),
addresses: withdrawals.iter().map(|w| w.address).collect::<Vec<_>>(),
}
.abi_encode()
.into(),
) {
Ok(res) => res.state,
Err(e) => {
return Err(BlockExecutionError::Internal(InternalBlockExecutionError::Other(
format!("withdrawal contract system call revert: {e}").into(),
)))
}
};
// Clean-up post system tx context
state.remove(&SYSTEM_ADDRESS);
state.remove(&evm.block().beneficiary);
evm.db_mut().commit(state);
Ok(())
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/engine-api-access/src/main.rs | examples/engine-api-access/src/main.rs | //! Example demonstrating how to access the Engine API instance during construction.
//!
//! Run with
//!
//! ```sh
//! cargo run -p example-engine-api-access
//! ```
use reth_db::test_utils::create_test_rw_db;
use reth_node_builder::{EngineApiExt, FullNodeComponents, NodeBuilder, NodeConfig};
use reth_optimism_chainspec::BASE_MAINNET;
use reth_optimism_node::{
args::RollupArgs, node::OpEngineValidatorBuilder, OpAddOns, OpEngineApiBuilder, OpNode,
};
use tokio::sync::oneshot;
#[tokio::main]
async fn main() {
// Op node configuration and setup
let config = NodeConfig::new(BASE_MAINNET.clone());
let db = create_test_rw_db();
let args = RollupArgs::default();
let op_node = OpNode::new(args);
let (engine_api_tx, _engine_api_rx) = oneshot::channel();
let engine_api =
EngineApiExt::new(OpEngineApiBuilder::<OpEngineValidatorBuilder>::default(), move |api| {
let _ = engine_api_tx.send(api);
});
let _builder = NodeBuilder::new(config)
.with_database(db)
.with_types::<OpNode>()
.with_components(op_node.components())
.with_add_ons(OpAddOns::default().with_engine_api(engine_api))
.on_component_initialized(move |ctx| {
let _provider = ctx.provider();
Ok(())
})
.on_node_started(|_full_node| Ok(()))
.on_rpc_started(|_ctx, handles| {
let _client = handles.rpc.http_client();
Ok(())
})
.check_launch();
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/beacon-api-sse/src/main.rs | examples/beacon-api-sse/src/main.rs | //! Example of how to subscribe to beacon chain events via SSE.
//!
//! See also [ethereum-beacon-API eventstream](https://ethereum.github.io/beacon-APIs/#/Events/eventstream)
//!
//! Run with
//!
//! ```sh
//! cargo run -p example-beacon-api-sse -- node
//! ```
//!
//! This launches a regular reth instance and subscribes to payload attributes event stream.
//!
//! **NOTE**: This expects that the CL client is running an http server on `localhost:5052` and is
//! configured to emit payload attributes events.
//!
//! See lighthouse beacon Node API: <https://lighthouse-book.sigmaprime.io/api_bn.html#beacon-node-api>
#![warn(unused_crate_dependencies)]
use alloy_rpc_types_beacon::events::PayloadAttributesEvent;
use clap::Parser;
use futures_util::stream::StreamExt;
use mev_share_sse::{client::EventStream, EventClient};
use reth_ethereum::{
cli::{chainspec::EthereumChainSpecParser, interface::Cli},
node::EthereumNode,
};
use std::net::{IpAddr, Ipv4Addr};
use tracing::{info, warn};
fn main() {
Cli::<EthereumChainSpecParser, BeaconEventsConfig>::parse()
.run(|builder, args| async move {
let handle = builder.node(EthereumNode::default()).launch().await?;
handle.node.task_executor.spawn(Box::pin(args.run()));
handle.wait_for_node_exit().await
})
.unwrap();
}
/// Our custom cli args extension that adds one flag to reth default CLI.
#[derive(Debug, Clone, clap::Parser)]
struct BeaconEventsConfig {
/// Beacon Node http server address
#[arg(long = "cl.addr", default_value_t = IpAddr::V4(Ipv4Addr::LOCALHOST))]
pub cl_addr: IpAddr,
/// Beacon Node http server port to listen on
#[arg(long = "cl.port", default_value_t = 5052)]
pub cl_port: u16,
}
impl BeaconEventsConfig {
/// Returns the http url of the beacon node
pub fn http_base_url(&self) -> String {
format!("http://{}:{}", self.cl_addr, self.cl_port)
}
/// Returns the URL to the events endpoint
pub fn events_url(&self) -> String {
format!("{}/eth/v1/events", self.http_base_url())
}
/// Service that subscribes to beacon chain payload attributes events
async fn run(self) {
let client = EventClient::default();
let mut subscription = self.new_payload_attributes_subscription(&client).await;
while let Some(event) = subscription.next().await {
info!("Received payload attributes: {:?}", event);
}
}
// It can take a bit until the CL endpoint is live so we retry a few times
async fn new_payload_attributes_subscription(
&self,
client: &EventClient,
) -> EventStream<PayloadAttributesEvent> {
let payloads_url = format!("{}?topics=payload_attributes", self.events_url());
loop {
match client.subscribe(&payloads_url).await {
Ok(subscription) => return subscription,
Err(err) => {
warn!(
"Failed to subscribe to payload attributes events: {:?}\nRetrying in 5 seconds...",
err
);
tokio::time::sleep(std::time::Duration::from_secs(5)).await;
}
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn parse_config() {
let args = BeaconEventsConfig::try_parse_from(["reth"]);
assert!(args.is_ok());
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/custom-dev-node/src/main.rs | examples/custom-dev-node/src/main.rs | //! This example shows how to run a custom dev node programmatically and submit a transaction
//! through rpc.
#![warn(unused_crate_dependencies)]
use std::sync::Arc;
use alloy_genesis::Genesis;
use alloy_primitives::{b256, hex};
use futures_util::StreamExt;
use reth_ethereum::{
chainspec::ChainSpec,
node::{
builder::{NodeBuilder, NodeHandle},
core::{args::RpcServerArgs, node_config::NodeConfig},
EthereumNode,
},
provider::CanonStateSubscriptions,
rpc::api::eth::helpers::EthTransactions,
tasks::TaskManager,
};
#[tokio::main]
async fn main() -> eyre::Result<()> {
let tasks = TaskManager::current();
// create node config
let node_config = NodeConfig::test()
.dev()
.with_rpc(RpcServerArgs::default().with_http())
.with_chain(custom_chain());
let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config)
.testing_node(tasks.executor())
.node(EthereumNode::default())
.launch()
.await?;
let mut notifications = node.provider.canonical_state_stream();
// submit tx through rpc
let raw_tx = hex!(
"02f876820a28808477359400847735940082520894ab0840c0e43688012c1adb0f5e3fc665188f83d28a029d394a5d630544000080c080a0a044076b7e67b5deecc63f61a8d7913fab86ca365b344b5759d1fe3563b4c39ea019eab979dd000da04dfc72bb0377c092d30fd9e1cab5ae487de49586cc8b0090"
);
let eth_api = node.rpc_registry.eth_api();
let hash = eth_api.send_raw_transaction(raw_tx.into()).await?;
let expected = b256!("0xb1c6512f4fc202c04355fbda66755e0e344b152e633010e8fd75ecec09b63398");
assert_eq!(hash, expected);
println!("submitted transaction: {hash}");
let head = notifications.next().await.unwrap();
let tx = &head.tip().body().transactions().next().unwrap();
assert_eq!(*tx.tx_hash(), hash);
println!("mined transaction: {hash}");
Ok(())
}
fn custom_chain() -> Arc<ChainSpec> {
let custom_genesis = r#"
{
"nonce": "0x42",
"timestamp": "0x0",
"extraData": "0x5343",
"gasLimit": "0x5208",
"difficulty": "0x400000000",
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"coinbase": "0x0000000000000000000000000000000000000000",
"alloc": {
"0x6Be02d1d3665660d22FF9624b7BE0551ee1Ac91b": {
"balance": "0x4a47e3c12448f4ad000000"
}
},
"number": "0x0",
"gasUsed": "0x0",
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"config": {
"ethash": {},
"chainId": 2600,
"homesteadBlock": 0,
"eip150Block": 0,
"eip155Block": 0,
"eip158Block": 0,
"byzantiumBlock": 0,
"constantinopleBlock": 0,
"petersburgBlock": 0,
"istanbulBlock": 0,
"berlinBlock": 0,
"londonBlock": 0,
"terminalTotalDifficulty": 0,
"terminalTotalDifficultyPassed": true,
"shanghaiTime": 0
}
}
"#;
let genesis: Genesis = serde_json::from_str(custom_genesis).unwrap();
Arc::new(genesis.into())
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/node-event-hooks/src/main.rs | examples/node-event-hooks/src/main.rs | //! Example for how hook into the node via the CLI extension mechanism without registering
//! additional arguments
//!
//! Run with
//!
//! ```sh
//! cargo run -p node-event-hooks -- node
//! ```
//!
//! This launches a regular reth node and also print:
//! > "All components initialized" – once all components have been initialized
//! > "Node started" – once the node has been started.
#![warn(unused_crate_dependencies)]
use reth_ethereum::{cli::interface::Cli, node::EthereumNode};
fn main() {
Cli::parse_args()
.run(|builder, _| async move {
let handle = builder
.node(EthereumNode::default())
.on_node_started(|_ctx| {
println!("Node started");
Ok(())
})
.on_rpc_started(|_ctx, _handles| {
println!("RPC started");
Ok(())
})
.on_component_initialized(|_ctx| {
println!("All components initialized");
Ok(())
})
.launch()
.await?;
handle.wait_for_node_exit().await
})
.unwrap();
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/txpool-tracing/src/submit.rs | examples/txpool-tracing/src/submit.rs | //! Transaction submission functionality for the txpool tracing example
#![allow(unused)]
#![allow(clippy::too_many_arguments)]
use alloy_network::{Ethereum, EthereumWallet, NetworkWallet, TransactionBuilder};
use alloy_primitives::{Address, TxHash, U256};
use futures_util::StreamExt;
use reth_ethereum::{
node::api::{FullNodeComponents, NodeTypes},
pool::{
AddedTransactionOutcome, PoolTransaction, TransactionEvent, TransactionOrigin,
TransactionPool,
},
primitives::SignerRecoverable,
rpc::eth::primitives::TransactionRequest,
EthPrimitives, TransactionSigned,
};
/// Submit a transaction to the transaction pool
///
/// This function demonstrates how to create, sign, and submit a transaction
/// to the reth transaction pool.
pub async fn submit_transaction<FC>(
node: &FC,
wallet: &EthereumWallet,
to: Address,
data: Vec<u8>,
nonce: u64,
chain_id: u64,
gas_limit: u64,
max_priority_fee_per_gas: u128,
max_fee_per_gas: u128,
) -> eyre::Result<TxHash>
where
// This enforces `EthPrimitives` types for this node, this unlocks the proper conversions when
FC: FullNodeComponents<Types: NodeTypes<Primitives = EthPrimitives>>,
{
// Create the transaction request
let request = TransactionRequest::default()
.with_to(to)
.with_input(data)
.with_nonce(nonce)
.with_chain_id(chain_id)
.with_gas_limit(gas_limit)
.with_max_priority_fee_per_gas(max_priority_fee_per_gas)
.with_max_fee_per_gas(max_fee_per_gas);
// Sign the transaction
let transaction: TransactionSigned =
NetworkWallet::<Ethereum>::sign_request(wallet, request).await?.into();
// Get the transaction hash before submitting
let tx_hash = *transaction.hash();
// Recover the transaction
let transaction = transaction.try_into_recovered()?;
let mut tx_events = node
.pool()
.add_consensus_transaction_and_subscribe(transaction, TransactionOrigin::Local)
.await
.map_err(|e| eyre::eyre!("Pool error: {e}"))?;
// Wait for the transaction to be added to the pool
while let Some(event) = tx_events.next().await {
match event {
TransactionEvent::Mined(_) => {
println!("Transaction was mined: {:?}", tx_events.hash());
break;
}
TransactionEvent::Pending => {
println!("Transaction added to pending pool: {:?}", tx_events.hash());
break;
}
TransactionEvent::Discarded => {
return Err(eyre::eyre!("Transaction discarded: {:?}", tx_events.hash(),));
}
_ => {
// Continue waiting for added or rejected event
}
}
}
Ok(tx_hash)
}
/// Helper function to submit a simple ETH transfer transaction
///
/// This will first populate a tx request, sign it then submit to the pool in the required format.
pub async fn submit_eth_transfer<FC>(
node: &FC,
wallet: &EthereumWallet,
to: Address,
value: U256,
nonce: u64,
chain_id: u64,
gas_limit: u64,
max_priority_fee_per_gas: u128,
max_fee_per_gas: u128,
) -> eyre::Result<AddedTransactionOutcome>
where
FC: FullNodeComponents<Types: NodeTypes<Primitives = EthPrimitives>>,
{
// Create the transaction request for ETH transfer
let request = TransactionRequest::default()
.with_to(to)
.with_value(value)
.with_nonce(nonce)
.with_chain_id(chain_id)
.with_gas_limit(gas_limit)
.with_max_priority_fee_per_gas(max_priority_fee_per_gas)
.with_max_fee_per_gas(max_fee_per_gas);
// Sign the transaction
let transaction: TransactionSigned =
NetworkWallet::<Ethereum>::sign_request(wallet, request).await?.into();
// Recover the transaction
let transaction = transaction.try_into_recovered()?;
// Submit the transaction to the pool
node.pool()
.add_consensus_transaction(transaction, TransactionOrigin::Local)
.await
.map_err(|e| eyre::eyre!("Pool error: {e}"))
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/examples/txpool-tracing/src/main.rs | examples/txpool-tracing/src/main.rs | //! Example of how to trace new pending transactions in the reth CLI
//!
//! Run with
//!
//! ```sh
//! cargo run --release -p txpool-tracing -- node --http --ws --recipients 0x....,0x....
//! ```
//!
//! If no recipients are specified, all transactions will be traced.
#![warn(unused_crate_dependencies)]
use alloy_primitives::Address;
use alloy_rpc_types_trace::{parity::TraceType, tracerequest::TraceCallRequest};
use clap::Parser;
use futures_util::StreamExt;
use reth_ethereum::{
cli::{chainspec::EthereumChainSpecParser, interface::Cli},
node::{builder::NodeHandle, EthereumNode},
pool::TransactionPool,
rpc::eth::primitives::TransactionRequest,
};
mod submit;
fn main() {
Cli::<EthereumChainSpecParser, RethCliTxpoolExt>::parse()
.run(|builder, args| async move {
// launch the node
let NodeHandle { node, node_exit_future } =
builder.node(EthereumNode::default()).launch().await?;
// create a new subscription to pending transactions
let mut pending_transactions = node.pool.new_pending_pool_transactions_listener();
// get an instance of the `trace_` API handler
let traceapi = node.rpc_registry.trace_api();
println!("Spawning trace task!");
// Spawn an async block to listen for transactions.
node.task_executor.spawn(Box::pin(async move {
// Waiting for new transactions
while let Some(event) = pending_transactions.next().await {
let tx = event.transaction;
println!("Transaction received: {tx:?}");
if let Some(recipient) = tx.to() {
if args.is_match(&recipient) {
// trace the transaction with `trace_call`
let callrequest =
TransactionRequest::from_recovered_transaction(tx.to_consensus());
let tracerequest = TraceCallRequest::new(callrequest)
.with_trace_type(TraceType::Trace);
if let Ok(trace_result) = traceapi.trace_call(tracerequest).await {
let hash = tx.hash();
println!("trace result for transaction {hash}: {trace_result:?}");
}
}
}
}
}));
node_exit_future.await
})
.unwrap();
}
/// Our custom cli args extension that adds one flag to reth default CLI.
#[derive(Debug, Clone, Default, clap::Args)]
struct RethCliTxpoolExt {
/// recipients addresses that we want to trace
#[arg(long, value_delimiter = ',')]
pub recipients: Vec<Address>,
}
impl RethCliTxpoolExt {
/// Check if the recipient is in the list of recipients to trace.
pub fn is_match(&self, recipient: &Address) -> bool {
self.recipients.is_empty() || self.recipients.contains(recipient)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/testing/ef-tests/src/assert.rs | testing/ef-tests/src/assert.rs | //! Various assertion helpers.
use crate::Error;
use std::fmt::Debug;
/// A helper like `assert_eq!` that instead returns `Err(Error::Assertion)` on failure.
pub fn assert_equal<T>(left: T, right: T, msg: &str) -> Result<(), Error>
where
T: PartialEq + Debug,
{
if left == right {
Ok(())
} else {
Err(Error::Assertion(format!("{msg}\n left `{left:?}`,\n right `{right:?}`")))
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/testing/ef-tests/src/case.rs | testing/ef-tests/src/case.rs | //! Test case definitions
use crate::result::{CaseResult, Error};
use rayon::iter::{IntoParallelRefIterator, ParallelIterator};
use std::{
fmt::Debug,
path::{Path, PathBuf},
};
/// A single test case, capable of loading a JSON description of itself and running it.
///
/// See <https://ethereum-tests.readthedocs.io/> for test specs.
pub trait Case: Debug + Sync + Sized {
/// A description of the test.
fn description(&self) -> String {
"no description".to_string()
}
/// Load the test from the given file path.
///
/// The file can be assumed to be a valid EF test case as described on <https://ethereum-tests.readthedocs.io/>.
fn load(path: &Path) -> Result<Self, Error>;
/// Run the test.
fn run(&self) -> Result<(), Error>;
}
/// A container for multiple test cases.
#[derive(Debug)]
pub struct Cases<T> {
/// The contained test cases and the path to each test.
pub test_cases: Vec<(PathBuf, T)>,
}
impl<T: Case> Cases<T> {
/// Run the contained test cases.
pub fn run(&self) -> Vec<CaseResult> {
self.test_cases
.par_iter()
.map(|(path, case)| CaseResult::new(path, case, case.run()))
.collect()
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/testing/ef-tests/src/lib.rs | testing/ef-tests/src/lib.rs | //! Abstractions and runners for EF tests.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
use reth_revm as _;
use revm as _;
pub mod case;
pub mod result;
pub mod suite;
pub mod assert;
pub mod cases;
pub mod models;
pub use case::{Case, Cases};
pub use result::{CaseResult, Error};
pub use suite::Suite;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/testing/ef-tests/src/suite.rs | testing/ef-tests/src/suite.rs | //! Abstractions for groups of tests.
use crate::{
case::{Case, Cases},
result::assert_tests_pass,
};
use std::path::{Path, PathBuf};
use walkdir::{DirEntry, WalkDir};
/// A collection of tests.
pub trait Suite {
/// The type of test cases in this suite.
type Case: Case;
/// The path to the test suite directory.
fn suite_path(&self) -> &Path;
/// Run all test cases in the suite.
fn run(&self) {
let suite_path = self.suite_path();
for entry in WalkDir::new(suite_path).min_depth(1).max_depth(1) {
let entry = entry.expect("Failed to read directory");
if entry.file_type().is_dir() {
self.run_only(entry.file_name().to_string_lossy().as_ref());
}
}
}
/// Load and run each contained test case for the provided sub-folder.
///
/// # Note
///
/// This recursively finds every test description in the resulting path.
fn run_only(&self, name: &str) {
// Build the path to the test suite directory
let suite_path = self.suite_path().join(name);
// Verify that the path exists
assert!(suite_path.exists(), "Test suite path does not exist: {suite_path:?}");
// Find all files with the ".json" extension in the test suite directory
let test_cases = find_all_files_with_extension(&suite_path, ".json")
.into_iter()
.map(|test_case_path| {
let case = Self::Case::load(&test_case_path).expect("test case should load");
(test_case_path, case)
})
.collect();
// Run the test cases and collect the results
let results = Cases { test_cases }.run();
// Assert that all tests in the suite pass
assert_tests_pass(name, &suite_path, &results);
}
}
/// Recursively find all files with a given extension.
fn find_all_files_with_extension(path: &Path, extension: &str) -> Vec<PathBuf> {
WalkDir::new(path)
.into_iter()
.filter_map(Result::ok)
.filter(|e| e.file_name().to_string_lossy().ends_with(extension))
.map(DirEntry::into_path)
.collect()
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/testing/ef-tests/src/result.rs | testing/ef-tests/src/result.rs | //! Test results and errors
use crate::Case;
use reth_db::DatabaseError;
use reth_provider::ProviderError;
use std::path::{Path, PathBuf};
use thiserror::Error;
/// Test errors
///
/// # Note
///
/// `Error::Skipped` should not be treated as a test failure.
#[derive(Debug, Error)]
#[non_exhaustive]
pub enum Error {
/// The test was skipped
#[error("test was skipped")]
Skipped,
/// Block processing failed
/// Note: This includes but is not limited to execution.
/// For example, the header number could be incorrect.
#[error("block {block_number} failed to process: {err}")]
BlockProcessingFailed {
/// The block number for the block that failed
block_number: u64,
/// The specific error
#[source]
err: Box<dyn std::error::Error + Send + Sync>,
},
/// An IO error occurred
#[error("an error occurred interacting with the file system at {path}: {error}")]
Io {
/// The path to the file or directory
path: PathBuf,
/// The specific error
#[source]
error: std::io::Error,
},
/// A deserialization error occurred
#[error("an error occurred deserializing the test at {path}: {error}")]
CouldNotDeserialize {
/// The path to the file we wanted to deserialize
path: PathBuf,
/// The specific error
#[source]
error: serde_json::Error,
},
/// A database error occurred.
#[error(transparent)]
Database(#[from] DatabaseError),
/// A test assertion failed.
#[error("test failed: {0}")]
Assertion(String),
/// An error internally in reth occurred.
#[error("test failed: {0}")]
Provider(#[from] ProviderError),
/// An error occurred while decoding RLP.
#[error("an error occurred deserializing RLP: {0}")]
RlpDecodeError(#[from] alloy_rlp::Error),
/// A consensus error occurred.
#[error("an error occurred during consensus checks: {0}")]
ConsensusError(#[from] reth_consensus::ConsensusError),
}
impl Error {
/// Create a new [`Error::BlockProcessingFailed`] error.
pub fn block_failed(
block_number: u64,
err: impl std::error::Error + Send + Sync + 'static,
) -> Self {
Self::BlockProcessingFailed { block_number, err: Box::new(err) }
}
}
/// The result of running a test.
#[derive(Debug)]
pub struct CaseResult {
/// A description of the test.
pub desc: String,
/// The full path to the test.
pub path: PathBuf,
/// The result of the test.
pub result: Result<(), Error>,
}
impl CaseResult {
/// Create a new test result.
pub fn new(path: &Path, case: &impl Case, result: Result<(), Error>) -> Self {
Self { desc: case.description(), path: path.into(), result }
}
}
/// Assert that all the given tests passed and print the results to stdout.
pub(crate) fn assert_tests_pass(suite_name: &str, path: &Path, results: &[CaseResult]) {
let (passed, failed, skipped) = categorize_results(results);
print_results(suite_name, path, &passed, &failed, &skipped);
assert!(failed.is_empty(), "Some tests failed (see above)");
}
/// Categorize test results into `(passed, failed, skipped)`.
pub(crate) fn categorize_results(
results: &[CaseResult],
) -> (Vec<&CaseResult>, Vec<&CaseResult>, Vec<&CaseResult>) {
let mut passed = Vec::new();
let mut failed = Vec::new();
let mut skipped = Vec::new();
for case in results {
match case.result.as_ref().err() {
Some(Error::Skipped) => skipped.push(case),
Some(_) => failed.push(case),
None => passed.push(case),
}
}
(passed, failed, skipped)
}
/// Display the given test results to stdout.
pub(crate) fn print_results(
suite_name: &str,
path: &Path,
passed: &[&CaseResult],
failed: &[&CaseResult],
skipped: &[&CaseResult],
) {
println!("Suite: {suite_name} (at {})", path.display());
println!(
"Ran {} tests ({} passed, {} failed, {} skipped)",
passed.len() + failed.len() + skipped.len(),
passed.len(),
failed.len(),
skipped.len()
);
for case in skipped {
println!("[S] Case {} skipped", case.path.display());
}
for case in failed {
let error = case.result.as_ref().unwrap_err();
println!("[!] Case {} failed (description: {}): {}", case.path.display(), case.desc, error);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/testing/ef-tests/src/models.rs | testing/ef-tests/src/models.rs | //! Shared models for <https://github.com/ethereum/tests>
use crate::{assert::assert_equal, Error};
use alloy_consensus::Header as RethHeader;
use alloy_eips::eip4895::Withdrawals;
use alloy_primitives::{keccak256, Address, Bloom, Bytes, B256, B64, U256};
use reth_chainspec::{ChainSpec, ChainSpecBuilder, EthereumHardfork, ForkCondition};
use reth_db_api::{cursor::DbDupCursorRO, tables, transaction::DbTx};
use reth_primitives_traits::SealedHeader;
use seismic_alloy_genesis::GenesisAccount;
use serde::Deserialize;
use std::{collections::BTreeMap, ops::Deref};
use alloy_primitives::FlaggedStorage;
/// The definition of a blockchain test.
#[derive(Debug, PartialEq, Eq, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct BlockchainTest {
/// Genesis block header.
pub genesis_block_header: Header,
/// RLP encoded genesis block.
#[serde(rename = "genesisRLP")]
pub genesis_rlp: Option<Bytes>,
/// Block data.
pub blocks: Vec<Block>,
/// The expected post state.
pub post_state: Option<BTreeMap<Address, Account>>,
/// The test pre-state.
pub pre: State,
/// Hash of the best block.
pub lastblockhash: B256,
/// Network spec.
pub network: ForkSpec,
#[serde(default)]
/// Engine spec.
pub seal_engine: SealEngine,
}
/// A block header in an Ethereum blockchain test.
#[derive(Debug, PartialEq, Eq, Clone, Deserialize, Default)]
#[serde(rename_all = "camelCase")]
pub struct Header {
/// Bloom filter.
pub bloom: Bloom,
/// Coinbase.
pub coinbase: Address,
/// Difficulty.
pub difficulty: U256,
/// Extra data.
pub extra_data: Bytes,
/// Gas limit.
pub gas_limit: U256,
/// Gas used.
pub gas_used: U256,
/// Block Hash.
pub hash: B256,
/// Mix hash.
pub mix_hash: B256,
/// Seal nonce.
pub nonce: B64,
/// Block number.
pub number: U256,
/// Parent hash.
pub parent_hash: B256,
/// Receipt trie.
pub receipt_trie: B256,
/// State root.
pub state_root: B256,
/// Timestamp.
pub timestamp: U256,
/// Transactions trie.
pub transactions_trie: B256,
/// Uncle hash.
pub uncle_hash: B256,
/// Base fee per gas.
pub base_fee_per_gas: Option<U256>,
/// Withdrawals root.
pub withdrawals_root: Option<B256>,
/// Blob gas used.
pub blob_gas_used: Option<U256>,
/// Excess blob gas.
pub excess_blob_gas: Option<U256>,
/// Parent beacon block root.
pub parent_beacon_block_root: Option<B256>,
/// Requests root.
pub requests_hash: Option<B256>,
/// Target blobs per block.
pub target_blobs_per_block: Option<U256>,
}
impl From<Header> for SealedHeader {
fn from(value: Header) -> Self {
let header = RethHeader {
base_fee_per_gas: value.base_fee_per_gas.map(|v| v.to::<u64>()),
beneficiary: value.coinbase,
difficulty: value.difficulty,
extra_data: value.extra_data,
gas_limit: value.gas_limit.to::<u64>(),
gas_used: value.gas_used.to::<u64>(),
mix_hash: value.mix_hash,
nonce: u64::from_be_bytes(value.nonce.0).into(),
number: value.number.to::<u64>(),
timestamp: value.timestamp.to::<u64>(),
transactions_root: value.transactions_trie,
receipts_root: value.receipt_trie,
ommers_hash: value.uncle_hash,
state_root: value.state_root,
parent_hash: value.parent_hash,
logs_bloom: value.bloom,
withdrawals_root: value.withdrawals_root,
blob_gas_used: value.blob_gas_used.map(|v| v.to::<u64>()),
excess_blob_gas: value.excess_blob_gas.map(|v| v.to::<u64>()),
parent_beacon_block_root: value.parent_beacon_block_root,
requests_hash: value.requests_hash,
};
Self::new(header, value.hash)
}
}
/// A block in an Ethereum blockchain test.
#[derive(Debug, PartialEq, Eq, Deserialize, Default)]
#[serde(rename_all = "camelCase")]
pub struct Block {
/// Block header.
pub block_header: Option<Header>,
/// RLP encoded block bytes
pub rlp: Bytes,
/// If the execution of the block should fail,
/// `expect_exception` is `Some`.
/// Its contents detail the reason for the failure.
pub expect_exception: Option<String>,
/// Transactions
pub transactions: Option<Vec<Transaction>>,
/// Uncle/ommer headers
pub uncle_headers: Option<Vec<Header>>,
/// Transaction Sequence
pub transaction_sequence: Option<Vec<TransactionSequence>>,
/// Withdrawals
pub withdrawals: Option<Withdrawals>,
}
/// Transaction sequence in block
#[derive(Debug, PartialEq, Eq, Deserialize, Default)]
#[serde(deny_unknown_fields)]
#[serde(rename_all = "camelCase")]
pub struct TransactionSequence {
exception: String,
raw_bytes: Bytes,
valid: String,
}
/// Ethereum blockchain test data state.
#[derive(Clone, Debug, Eq, PartialEq, Deserialize, Default)]
pub struct State(BTreeMap<Address, Account>);
impl State {
/// Return state as genesis state.
pub fn into_genesis_state(self) -> BTreeMap<Address, GenesisAccount> {
let is_private = false; // testing helper assume no private state
self.0
.into_iter()
.map(|(address, account)| {
let storage = account
.storage
.iter()
.filter(|(_, v)| !v.is_zero())
.map(|(k, v)| {
(
B256::from_slice(&k.to_be_bytes::<32>()),
FlaggedStorage::new(
U256::from_be_bytes(v.to_be_bytes::<32>()),
is_private,
),
)
})
.collect();
let account = GenesisAccount {
balance: account.balance,
nonce: Some(account.nonce.try_into().unwrap()),
code: Some(account.code).filter(|c| !c.is_empty()),
storage: Some(storage),
private_key: None,
};
(address, account)
})
.collect::<BTreeMap<_, _>>()
}
}
impl Deref for State {
type Target = BTreeMap<Address, Account>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
/// An account.
#[derive(Debug, PartialEq, Eq, Deserialize, Clone, Default)]
#[serde(deny_unknown_fields)]
pub struct Account {
/// Balance.
pub balance: U256,
/// Code.
pub code: Bytes,
/// Nonce.
pub nonce: U256,
/// Storage.
pub storage: BTreeMap<U256, U256>,
}
impl Account {
/// Check that the account matches what is in the database.
///
/// In case of a mismatch, `Err(Error::Assertion)` is returned.
pub fn assert_db(&self, address: Address, tx: &impl DbTx) -> Result<(), Error> {
let account =
tx.get_by_encoded_key::<tables::PlainAccountState>(&address)?.ok_or_else(|| {
Error::Assertion(format!(
"Expected account ({address}) is missing from DB: {self:?}"
))
})?;
assert_equal(self.balance, account.balance, "Balance does not match")?;
assert_equal(self.nonce.to(), account.nonce, "Nonce does not match")?;
if let Some(bytecode_hash) = account.bytecode_hash {
assert_equal(keccak256(&self.code), bytecode_hash, "Bytecode does not match")?;
} else {
assert_equal(
self.code.is_empty(),
true,
"Expected empty bytecode, got bytecode in db.",
)?;
}
let mut storage_cursor = tx.cursor_dup_read::<tables::PlainStorageState>()?;
for (slot, value) in &self.storage {
if let Some(entry) =
storage_cursor.seek_by_key_subkey(address, B256::new(slot.to_be_bytes()))?
{
if U256::from_be_bytes(entry.key.0) == *slot {
assert_equal(
*value,
entry.value.value,
&format!("Storage for slot {slot:?} does not match"),
)?;
} else {
return Err(Error::Assertion(format!(
"Slot {slot:?} is missing from the database. Expected {value:?}"
)))
}
} else {
return Err(Error::Assertion(format!(
"Slot {slot:?} is missing from the database. Expected {value:?}"
)))
}
}
Ok(())
}
}
/// Fork specification.
#[derive(Debug, PartialEq, Eq, PartialOrd, Hash, Ord, Clone, Copy, Deserialize)]
pub enum ForkSpec {
/// Frontier
Frontier,
/// Frontier to Homestead
FrontierToHomesteadAt5,
/// Homestead
Homestead,
/// Homestead to Tangerine
HomesteadToDaoAt5,
/// Homestead to Tangerine
HomesteadToEIP150At5,
/// Tangerine
EIP150,
/// Spurious Dragon
EIP158, // EIP-161: State trie clearing
/// Spurious Dragon to Byzantium
EIP158ToByzantiumAt5,
/// Byzantium
Byzantium,
/// Byzantium to Constantinople
ByzantiumToConstantinopleAt5, // SKIPPED
/// Byzantium to Constantinople
ByzantiumToConstantinopleFixAt5,
/// Constantinople
Constantinople, // SKIPPED
/// Constantinople fix
ConstantinopleFix,
/// Istanbul
Istanbul,
/// Berlin
Berlin,
/// Berlin to London
BerlinToLondonAt5,
/// London
London,
/// Paris aka The Merge
#[serde(alias = "Paris")]
Merge,
/// Paris to Shanghai at time 15k
ParisToShanghaiAtTime15k,
/// Shanghai
Shanghai,
/// Shanghai to Cancun at time 15k
ShanghaiToCancunAtTime15k,
/// Merge EOF test
#[serde(alias = "Merge+3540+3670")]
MergeEOF,
/// After Merge Init Code test
#[serde(alias = "Merge+3860")]
MergeMeterInitCode,
/// After Merge plus new PUSH0 opcode
#[serde(alias = "Merge+3855")]
MergePush0,
/// Cancun
Cancun,
/// Cancun to Prague at time 15k
CancunToPragueAtTime15k,
/// Prague
Prague,
}
impl From<ForkSpec> for ChainSpec {
fn from(fork_spec: ForkSpec) -> Self {
let spec_builder = ChainSpecBuilder::mainnet().reset();
match fork_spec {
ForkSpec::Frontier => spec_builder.frontier_activated(),
ForkSpec::FrontierToHomesteadAt5 => spec_builder
.frontier_activated()
.with_fork(EthereumHardfork::Homestead, ForkCondition::Block(5)),
ForkSpec::Homestead => spec_builder.homestead_activated(),
ForkSpec::HomesteadToDaoAt5 => spec_builder
.homestead_activated()
.with_fork(EthereumHardfork::Dao, ForkCondition::Block(5)),
ForkSpec::HomesteadToEIP150At5 => spec_builder
.homestead_activated()
.with_fork(EthereumHardfork::Tangerine, ForkCondition::Block(5)),
ForkSpec::EIP150 => spec_builder.tangerine_whistle_activated(),
ForkSpec::EIP158 => spec_builder.spurious_dragon_activated(),
ForkSpec::EIP158ToByzantiumAt5 => spec_builder
.spurious_dragon_activated()
.with_fork(EthereumHardfork::Byzantium, ForkCondition::Block(5)),
ForkSpec::Byzantium => spec_builder.byzantium_activated(),
ForkSpec::ByzantiumToConstantinopleAt5 => spec_builder
.byzantium_activated()
.with_fork(EthereumHardfork::Constantinople, ForkCondition::Block(5)),
ForkSpec::ByzantiumToConstantinopleFixAt5 => spec_builder
.byzantium_activated()
.with_fork(EthereumHardfork::Petersburg, ForkCondition::Block(5)),
ForkSpec::Constantinople => spec_builder.constantinople_activated(),
ForkSpec::ConstantinopleFix => spec_builder.petersburg_activated(),
ForkSpec::Istanbul => spec_builder.istanbul_activated(),
ForkSpec::Berlin => spec_builder.berlin_activated(),
ForkSpec::BerlinToLondonAt5 => spec_builder
.berlin_activated()
.with_fork(EthereumHardfork::London, ForkCondition::Block(5)),
ForkSpec::London => spec_builder.london_activated(),
ForkSpec::Merge |
ForkSpec::MergeEOF |
ForkSpec::MergeMeterInitCode |
ForkSpec::MergePush0 => spec_builder.paris_activated(),
ForkSpec::ParisToShanghaiAtTime15k => spec_builder
.paris_activated()
.with_fork(EthereumHardfork::Shanghai, ForkCondition::Timestamp(15_000)),
ForkSpec::Shanghai => spec_builder.shanghai_activated(),
ForkSpec::ShanghaiToCancunAtTime15k => spec_builder
.shanghai_activated()
.with_fork(EthereumHardfork::Cancun, ForkCondition::Timestamp(15_000)),
ForkSpec::Cancun => spec_builder.cancun_activated(),
ForkSpec::CancunToPragueAtTime15k => spec_builder
.cancun_activated()
.with_fork(EthereumHardfork::Prague, ForkCondition::Timestamp(15_000)),
ForkSpec::Prague => spec_builder.prague_activated(),
}
.build()
}
}
/// Possible seal engines.
#[derive(Debug, PartialEq, Eq, Default, Deserialize)]
pub enum SealEngine {
/// No consensus checks.
#[default]
NoProof,
}
/// Ethereum blockchain test transaction data.
#[derive(Debug, PartialEq, Eq, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Transaction {
/// Transaction type
#[serde(rename = "type")]
pub transaction_type: Option<U256>,
/// Data.
pub data: Bytes,
/// Gas limit.
pub gas_limit: U256,
/// Gas price.
pub gas_price: Option<U256>,
/// Nonce.
pub nonce: U256,
/// Signature r part.
pub r: U256,
/// Signature s part.
pub s: U256,
/// Parity bit.
pub v: U256,
/// Transaction value.
pub value: U256,
/// Chain ID.
pub chain_id: Option<U256>,
/// Access list.
pub access_list: Option<AccessList>,
/// Max fee per gas.
pub max_fee_per_gas: Option<U256>,
/// Max priority fee per gas
pub max_priority_fee_per_gas: Option<U256>,
/// Transaction hash.
pub hash: Option<B256>,
}
/// Access list item
#[derive(Debug, PartialEq, Eq, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct AccessListItem {
/// Account address
pub address: Address,
/// Storage key.
pub storage_keys: Vec<B256>,
}
/// Access list.
pub type AccessList = Vec<AccessListItem>;
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn header_deserialize() {
let test = r#"{
"baseFeePerGas" : "0x0a",
"bloom" : "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"coinbase" : "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba",
"difficulty" : "0x020000",
"extraData" : "0x00",
"gasLimit" : "0x10000000000000",
"gasUsed" : "0x10000000000000",
"hash" : "0x7ebfee2a2c785fef181b8ffd92d4a48a0660ec000f465f309757e3f092d13882",
"mixHash" : "0x0000000000000000000000000000000000000000000000000000000000000000",
"nonce" : "0x0000000000000000",
"number" : "0x01",
"parentHash" : "0xa8f2eb2ea9dccbf725801eef5a31ce59bada431e888dfd5501677cc4365dc3be",
"receiptTrie" : "0xbdd943f5c62ae0299324244a0f65524337ada9817e18e1764631cc1424f3a293",
"stateRoot" : "0xc9c6306ee3e5acbaabe8e2fa28a10c12e27bad1d1aacc271665149f70519f8b0",
"timestamp" : "0x03e8",
"transactionsTrie" : "0xf5893b055ca05e4f14d1792745586a1376e218180bd56bd96b2b024e1dc78300",
"uncleHash" : "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"
}"#;
let res = serde_json::from_str::<Header>(test);
assert!(res.is_ok(), "Failed to deserialize Header with error: {res:?}");
}
#[test]
fn transaction_deserialize() {
let test = r#"[
{
"accessList" : [
],
"chainId" : "0x01",
"data" : "0x693c61390000000000000000000000000000000000000000000000000000000000000000",
"gasLimit" : "0x10000000000000",
"maxFeePerGas" : "0x07d0",
"maxPriorityFeePerGas" : "0x00",
"nonce" : "0x01",
"r" : "0x5fecc3972a35c9e341b41b0c269d9a7325e13269fb01c2f64cbce1046b3441c8",
"s" : "0x7d4d0eda0e4ebd53c5d0b6fc35c600b317f8fa873b3963ab623ec9cec7d969bd",
"sender" : "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b",
"to" : "0xcccccccccccccccccccccccccccccccccccccccc",
"type" : "0x02",
"v" : "0x01",
"value" : "0x00"
}
]"#;
let res = serde_json::from_str::<Vec<Transaction>>(test);
assert!(res.is_ok(), "Failed to deserialize transaction with error: {res:?}");
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/testing/ef-tests/src/cases/blockchain_test.rs | testing/ef-tests/src/cases/blockchain_test.rs | //! Test runners for `BlockchainTests` in <https://github.com/ethereum/tests>
use crate::{
models::{BlockchainTest, ForkSpec},
Case, Error, Suite,
};
use alloy_rlp::{Decodable, Encodable};
use rayon::iter::{ParallelBridge, ParallelIterator};
use reth_chainspec::ChainSpec;
use reth_consensus::{Consensus, HeaderValidator};
use reth_db_common::init::{insert_genesis_hashes, insert_genesis_history, insert_genesis_state};
use reth_ethereum_consensus::{validate_block_post_execution, EthBeaconConsensus};
use reth_ethereum_primitives::Block;
use reth_evm::{execute::Executor, ConfigureEvm};
use reth_evm_ethereum::EthEvmConfig;
use reth_primitives_traits::{RecoveredBlock, SealedBlock};
use reth_provider::{
test_utils::create_test_provider_factory_with_chain_spec, BlockWriter, DatabaseProviderFactory,
ExecutionOutcome, HeaderProvider, HistoryWriter, OriginalValuesKnown, StateProofProvider,
StateWriter, StorageLocation,
};
use reth_revm::{database::StateProviderDatabase, witness::ExecutionWitnessRecord, State};
use reth_stateless::{validation::stateless_validation, ExecutionWitness};
use reth_trie::{HashedPostState, KeccakKeyHasher, StateRoot};
use reth_trie_db::DatabaseStateRoot;
use std::{
collections::BTreeMap,
fs,
path::{Path, PathBuf},
sync::Arc,
};
/// A handler for the blockchain test suite.
#[derive(Debug)]
pub struct BlockchainTests {
suite_path: PathBuf,
}
impl BlockchainTests {
/// Create a new suite for tests with blockchain tests format.
pub const fn new(suite_path: PathBuf) -> Self {
Self { suite_path }
}
}
impl Suite for BlockchainTests {
type Case = BlockchainTestCase;
fn suite_path(&self) -> &Path {
&self.suite_path
}
}
/// An Ethereum blockchain test.
#[derive(Debug, PartialEq, Eq)]
pub struct BlockchainTestCase {
tests: BTreeMap<String, BlockchainTest>,
skip: bool,
}
impl BlockchainTestCase {
/// Returns `true` if the fork is not supported.
const fn excluded_fork(network: ForkSpec) -> bool {
matches!(
network,
ForkSpec::ByzantiumToConstantinopleAt5 |
ForkSpec::Constantinople |
ForkSpec::ConstantinopleFix |
ForkSpec::MergeEOF |
ForkSpec::MergeMeterInitCode |
ForkSpec::MergePush0
)
}
/// Checks if the test case is a particular test called `UncleFromSideChain`
///
/// This fixture fails as expected, however it fails at the wrong block number.
/// Given we no longer have uncle blocks, this test case was pulled out such
/// that we ensure it still fails as expected, however we do not check the block number.
#[inline]
fn is_uncle_sidechain_case(name: &str) -> bool {
name.contains("UncleFromSideChain")
}
/// If the test expects an exception, return the block number
/// at which it must occur together with the original message.
///
/// Note: There is a +1 here because the genesis block is not included
/// in the set of blocks, so the first block is actually block number 1
/// and not block number 0.
#[inline]
fn expected_failure(case: &BlockchainTest) -> Option<(u64, String)> {
case.blocks.iter().enumerate().find_map(|(idx, blk)| {
blk.expect_exception.as_ref().map(|msg| ((idx + 1) as u64, msg.clone()))
})
}
/// Execute a single `BlockchainTest`, validating the outcome against the
/// expectations encoded in the JSON file.
fn run_single_case(name: &str, case: &BlockchainTest) -> Result<(), Error> {
let expectation = Self::expected_failure(case);
match run_case(case) {
// All blocks executed successfully.
Ok(()) => {
// Check if the test case specifies that it should have failed
if let Some((block, msg)) = expectation {
Err(Error::Assertion(format!(
"Test case: {name}\nExpected failure at block {block} - {msg}, but all blocks succeeded",
)))
} else {
Ok(())
}
}
// A block processing failure occurred.
err @ Err(Error::BlockProcessingFailed { block_number, .. }) => match expectation {
// It happened on exactly the block we were told to fail on
Some((expected, _)) if block_number == expected => Ok(()),
// Uncle side‑chain edge case, we accept as long as it failed.
// But we don't check the exact block number.
_ if Self::is_uncle_sidechain_case(name) => Ok(()),
// Expected failure, but block number does not match
Some((expected, _)) => Err(Error::Assertion(format!(
"Test case: {name}\nExpected failure at block {expected}\nGot failure at block {block_number}",
))),
// No failure expected at all - bubble up original error.
None => err,
},
// Non‑processing error – forward as‑is.
//
// This should only happen if we get an unexpected error from processing the block.
// Since it is unexpected, we treat it as a test failure.
//
// One reason for this happening is when one forgets to wrap the error from `run_case`
// so that it produces a `Error::BlockProcessingFailed`
Err(other) => Err(other),
}
}
}
impl Case for BlockchainTestCase {
fn load(path: &Path) -> Result<Self, Error> {
Ok(Self {
tests: {
let s = fs::read_to_string(path)
.map_err(|error| Error::Io { path: path.into(), error })?;
serde_json::from_str(&s)
.map_err(|error| Error::CouldNotDeserialize { path: path.into(), error })?
},
skip: should_skip(path),
})
}
/// Runs the test cases for the Ethereum Forks test suite.
///
/// # Errors
/// Returns an error if the test is flagged for skipping or encounters issues during execution.
fn run(&self) -> Result<(), Error> {
// If the test is marked for skipping, return a Skipped error immediately.
if self.skip {
return Err(Error::Skipped);
}
// Iterate through test cases, filtering by the network type to exclude specific forks.
self.tests
.iter()
.filter(|(_, case)| !Self::excluded_fork(case.network))
.par_bridge()
.try_for_each(|(name, case)| Self::run_single_case(name, case))?;
Ok(())
}
}
/// Executes a single `BlockchainTest`, returning an error if the blockchain state
/// does not match the expected outcome after all blocks are executed.
///
/// A `BlockchainTest` represents a self-contained scenario:
/// - It initializes a fresh blockchain state.
/// - It sequentially decodes, executes, and inserts a predefined set of blocks.
/// - It then verifies that the resulting blockchain state (post-state) matches the expected
/// outcome.
///
/// Returns:
/// - `Ok(())` if all blocks execute successfully and the final state is correct.
/// - `Err(Error)` if any block fails to execute correctly, or if the post-state validation fails.
fn run_case(case: &BlockchainTest) -> Result<(), Error> {
// Create a new test database and initialize a provider for the test case.
let chain_spec: Arc<ChainSpec> = Arc::new(case.network.into());
let factory = create_test_provider_factory_with_chain_spec(chain_spec.clone());
let provider = factory.database_provider_rw().unwrap();
// Insert initial test state into the provider.
let genesis_block = SealedBlock::<Block>::from_sealed_parts(
case.genesis_block_header.clone().into(),
Default::default(),
)
.try_recover()
.unwrap();
provider
.insert_block(genesis_block.clone(), StorageLocation::Database)
.map_err(|err| Error::block_failed(0, err))?;
let genesis_state = case.pre.clone().into_genesis_state();
insert_genesis_state(&provider, genesis_state.iter())
.map_err(|err| Error::block_failed(0, err))?;
insert_genesis_hashes(&provider, genesis_state.iter())
.map_err(|err| Error::block_failed(0, err))?;
insert_genesis_history(&provider, genesis_state.iter())
.map_err(|err| Error::block_failed(0, err))?;
// Decode blocks
let blocks = decode_blocks(&case.blocks)?;
let executor_provider = EthEvmConfig::ethereum(chain_spec.clone());
let mut parent = genesis_block;
let mut program_inputs = Vec::new();
for (block_index, block) in blocks.iter().enumerate() {
// Note: same as the comment on `decode_blocks` as to why we cannot use block.number
let block_number = (block_index + 1) as u64;
// Insert the block into the database
provider
.insert_block(block.clone(), StorageLocation::Database)
.map_err(|err| Error::block_failed(block_number, err))?;
// Consensus checks before block execution
pre_execution_checks(chain_spec.clone(), &parent, block)
.map_err(|err| Error::block_failed(block_number, err))?;
let mut witness_record = ExecutionWitnessRecord::default();
// Execute the block
let state_provider = provider.latest();
let state_db = StateProviderDatabase(&state_provider);
let executor = executor_provider.batch_executor(state_db);
let output = executor
.execute_with_state_closure(&(*block).clone(), |statedb: &State<_>| {
witness_record.record_executed_state(statedb);
})
.map_err(|err| Error::block_failed(block_number, err))?;
// Consensus checks after block execution
validate_block_post_execution(block, &chain_spec, &output.receipts, &output.requests)
.map_err(|err| Error::block_failed(block_number, err))?;
// Generate the stateless witness
// TODO: Most of this code is copy-pasted from debug_executionWitness
let ExecutionWitnessRecord { hashed_state, codes, keys, lowest_block_number } =
witness_record;
let state = state_provider.witness(Default::default(), hashed_state)?;
let mut exec_witness = ExecutionWitness { state, codes, keys, headers: Default::default() };
let smallest = lowest_block_number.unwrap_or_else(|| {
// Return only the parent header, if there were no calls to the
// BLOCKHASH opcode.
block_number.saturating_sub(1)
});
let range = smallest..block_number;
exec_witness.headers = provider
.headers_range(range)?
.into_iter()
.map(|header| {
let mut serialized_header = Vec::new();
header.encode(&mut serialized_header);
serialized_header.into()
})
.collect();
program_inputs.push((block.clone(), exec_witness));
// Compute and check the post state root
let hashed_state =
HashedPostState::from_bundle_state::<KeccakKeyHasher>(output.state.state());
let (computed_state_root, _) =
StateRoot::overlay_root_with_updates(provider.tx_ref(), hashed_state.clone())
.map_err(|err| Error::block_failed(block_number, err))?;
if computed_state_root != block.state_root {
return Err(Error::block_failed(
block_number,
Error::Assertion("state root mismatch".to_string()),
))
}
// Commit the post state/state diff to the database
provider
.write_state(
&ExecutionOutcome::single(block.number, output),
OriginalValuesKnown::Yes,
StorageLocation::Database,
)
.map_err(|err| Error::block_failed(block_number, err))?;
provider
.write_hashed_state(&hashed_state.into_sorted())
.map_err(|err| Error::block_failed(block_number, err))?;
provider
.update_history_indices(block.number..=block.number)
.map_err(|err| Error::block_failed(block_number, err))?;
// Since there were no errors, update the parent block
parent = block.clone()
}
match &case.post_state {
Some(expected_post_state) => {
// Validate the post-state for the test case.
//
// If we get here then it means that the post-state root checks
// made after we execute each block was successful.
//
// If an error occurs here, then it is:
// - Either an issue with the test setup
// - Possibly an error in the test case where the post-state root in the last block does
// not match the post-state values.
for (address, account) in expected_post_state {
account.assert_db(*address, provider.tx_ref())?;
}
}
None => {
// Some test may not have post-state (e.g., state-heavy benchmark tests).
// In this case, we can skip the post-state validation.
}
}
// Now validate using the stateless client if everything else passes
for (block, execution_witness) in program_inputs {
stateless_validation(
block,
execution_witness,
chain_spec.clone(),
EthEvmConfig::new(chain_spec.clone()),
)
.expect("stateless validation failed");
}
Ok(())
}
fn decode_blocks(
test_case_blocks: &[crate::models::Block],
) -> Result<Vec<RecoveredBlock<Block>>, Error> {
let mut blocks = Vec::with_capacity(test_case_blocks.len());
for (block_index, block) in test_case_blocks.iter().enumerate() {
// The blocks do not include the genesis block which is why we have the plus one.
// We also cannot use block.number because for invalid blocks, this may be incorrect.
let block_number = (block_index + 1) as u64;
let decoded = SealedBlock::<Block>::decode(&mut block.rlp.as_ref())
.map_err(|err| Error::block_failed(block_number, err))?;
let recovered_block =
decoded.clone().try_recover().map_err(|err| Error::block_failed(block_number, err))?;
blocks.push(recovered_block);
}
Ok(blocks)
}
fn pre_execution_checks(
chain_spec: Arc<ChainSpec>,
parent: &RecoveredBlock<Block>,
block: &RecoveredBlock<Block>,
) -> Result<(), Error> {
let consensus: EthBeaconConsensus<ChainSpec> = EthBeaconConsensus::new(chain_spec);
let sealed_header = block.sealed_header();
<EthBeaconConsensus<ChainSpec> as Consensus<Block>>::validate_body_against_header(
&consensus,
block.body(),
sealed_header,
)?;
consensus.validate_header_against_parent(sealed_header, parent.sealed_header())?;
consensus.validate_header(sealed_header)?;
consensus.validate_block_pre_execution(block)?;
Ok(())
}
/// Returns whether the test at the given path should be skipped.
///
/// Some tests are edge cases that cannot happen on mainnet, while others are skipped for
/// convenience (e.g. they take a long time to run) or are temporarily disabled.
///
/// The reason should be documented in a comment above the file name(s).
pub fn should_skip(path: &Path) -> bool {
let path_str = path.to_str().expect("Path is not valid UTF-8");
let name = path.file_name().unwrap().to_str().unwrap();
matches!(
name,
// funky test with `bigint 0x00` value in json :) not possible to happen on mainnet and require
// custom json parser. https://github.com/ethereum/tests/issues/971
| "ValueOverflow.json"
| "ValueOverflowParis.json"
// txbyte is of type 02 and we don't parse tx bytes for this test to fail.
| "typeTwoBerlin.json"
// Test checks if nonce overflows. We are handling this correctly but we are not parsing
// exception in testsuite There are more nonce overflow tests that are internal
// call/create, and those tests are passing and are enabled.
| "CreateTransactionHighNonce.json"
// Test check if gas price overflows, we handle this correctly but does not match tests specific
// exception.
| "HighGasPrice.json"
| "HighGasPriceParis.json"
// Skip test where basefee/accesslist/difficulty is present but it shouldn't be supported in
// London/Berlin/TheMerge. https://github.com/ethereum/tests/blob/5b7e1ab3ffaf026d99d20b17bb30f533a2c80c8b/GeneralStateTests/stExample/eip1559.json#L130
// It is expected to not execute these tests.
| "accessListExample.json"
| "basefeeExample.json"
| "eip1559.json"
| "mergeTest.json"
// These tests are passing, but they take a lot of time to execute so we are going to skip them.
| "loopExp.json"
| "Call50000_sha256.json"
| "static_Call50000_sha256.json"
| "loopMul.json"
| "CALLBlake2f_MaxRounds.json"
| "shiftCombinations.json"
// Skipped by revm as well: <https://github.com/bluealloy/revm/blob/be92e1db21f1c47b34c5a58cfbf019f6b97d7e4b/bins/revme/src/cmd/statetest/runner.rs#L115-L125>
| "RevertInCreateInInit_Paris.json"
| "RevertInCreateInInit.json"
| "dynamicAccountOverwriteEmpty.json"
| "dynamicAccountOverwriteEmpty_Paris.json"
| "RevertInCreateInInitCreate2Paris.json"
| "create2collisionStorage.json"
| "RevertInCreateInInitCreate2.json"
| "create2collisionStorageParis.json"
| "InitCollision.json"
| "InitCollisionParis.json"
)
// Ignore outdated EOF tests that haven't been updated for Cancun yet.
|| path_contains(path_str, &["EIPTests", "stEOF"])
}
/// `str::contains` but for a path. Takes into account the OS path separator (`/` or `\`).
fn path_contains(path_str: &str, rhs: &[&str]) -> bool {
let rhs = rhs.join(std::path::MAIN_SEPARATOR_STR);
path_str.contains(&rhs)
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/testing/ef-tests/src/cases/mod.rs | testing/ef-tests/src/cases/mod.rs | //! Specific test case handler implementations.
pub mod blockchain_test;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/testing/ef-tests/tests/tests.rs | testing/ef-tests/tests/tests.rs | #![allow(missing_docs)]
#![cfg(feature = "ef-tests")]
use ef_tests::{cases::blockchain_test::BlockchainTests, suite::Suite};
use std::path::PathBuf;
macro_rules! general_state_test {
($test_name:ident, $dir:ident) => {
#[test]
fn $test_name() {
reth_tracing::init_test_tracing();
let suite_path = PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("ethereum-tests")
.join("BlockchainTests");
BlockchainTests::new(suite_path)
.run_only(&format!("GeneralStateTests/{}", stringify!($dir)));
}
};
}
mod general_state_tests {
use super::*;
general_state_test!(shanghai, Shanghai);
general_state_test!(st_args_zero_one_balance, stArgsZeroOneBalance);
general_state_test!(st_attack, stAttackTest);
general_state_test!(st_bad_opcode, stBadOpcode);
general_state_test!(st_bugs, stBugs);
general_state_test!(st_call_codes, stCallCodes);
general_state_test!(st_call_create_call_code, stCallCreateCallCodeTest);
general_state_test!(
st_call_delegate_codes_call_code_homestead,
stCallDelegateCodesCallCodeHomestead
);
general_state_test!(st_call_delegate_codes_homestead, stCallDelegateCodesHomestead);
general_state_test!(st_chain_id, stChainId);
general_state_test!(st_code_copy_test, stCodeCopyTest);
general_state_test!(st_code_size_limit, stCodeSizeLimit);
general_state_test!(st_create2, stCreate2);
general_state_test!(st_create, stCreateTest);
general_state_test!(st_delegate_call_test_homestead, stDelegatecallTestHomestead);
general_state_test!(st_eip150_gas_prices, stEIP150singleCodeGasPrices);
general_state_test!(st_eip150, stEIP150Specific);
general_state_test!(st_eip158, stEIP158Specific);
general_state_test!(st_eip1559, stEIP1559);
general_state_test!(st_eip2930, stEIP2930);
general_state_test!(st_eip3607, stEIP3607);
general_state_test!(st_example, stExample);
general_state_test!(st_ext_codehash, stExtCodeHash);
general_state_test!(st_homestead, stHomesteadSpecific);
general_state_test!(st_init_code, stInitCodeTest);
general_state_test!(st_log, stLogTests);
general_state_test!(st_mem_expanding_eip150_calls, stMemExpandingEIP150Calls);
general_state_test!(st_memory_stress, stMemoryStressTest);
general_state_test!(st_memory, stMemoryTest);
general_state_test!(st_non_zero_calls, stNonZeroCallsTest);
general_state_test!(st_precompiles, stPreCompiledContracts);
general_state_test!(st_precompiles2, stPreCompiledContracts2);
general_state_test!(st_quadratic_complexity, stQuadraticComplexityTest);
general_state_test!(st_random, stRandom);
general_state_test!(st_random2, stRandom2);
general_state_test!(st_recursive_create, stRecursiveCreate);
general_state_test!(st_refund, stRefundTest);
general_state_test!(st_return, stReturnDataTest);
general_state_test!(st_revert, stRevertTest);
general_state_test!(st_self_balance, stSelfBalance);
general_state_test!(st_shift, stShift);
general_state_test!(st_sload, stSLoadTest);
general_state_test!(st_solidity, stSolidityTest);
general_state_test!(st_special, stSpecialTest);
general_state_test!(st_sstore, stSStoreTest);
general_state_test!(st_stack, stStackTests);
general_state_test!(st_static_call, stStaticCall);
general_state_test!(st_static_flag, stStaticFlagEnabled);
general_state_test!(st_system_operations, stSystemOperationsTest);
general_state_test!(st_time_consuming, stTimeConsuming);
general_state_test!(st_transaction, stTransactionTest);
general_state_test!(st_wallet, stWalletTest);
general_state_test!(st_zero_calls_revert, stZeroCallsRevert);
general_state_test!(st_zero_calls, stZeroCallsTest);
general_state_test!(st_zero_knowledge, stZeroKnowledge);
general_state_test!(st_zero_knowledge2, stZeroKnowledge2);
general_state_test!(vm_tests, VMTests);
}
macro_rules! blockchain_test {
($test_name:ident, $dir:ident) => {
#[test]
fn $test_name() {
reth_tracing::init_test_tracing();
let suite_path = PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("ethereum-tests")
.join("BlockchainTests");
BlockchainTests::new(suite_path).run_only(&format!("{}", stringify!($dir)));
}
};
}
blockchain_test!(valid_blocks, ValidBlocks);
blockchain_test!(invalid_blocks, InvalidBlocks);
#[test]
fn eest_fixtures() {
reth_tracing::init_test_tracing();
let suite_path = PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("execution-spec-tests")
.join("blockchain_tests");
BlockchainTests::new(suite_path).run();
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/testing/testing-utils/src/genesis_allocator.rs | testing/testing-utils/src/genesis_allocator.rs | //! Helps create a custom genesis alloc by making it easy to add funded accounts with known
//! signers to the genesis block.
use alloy_primitives::{Address, Bytes, B256, U256};
use reth_primitives_traits::crypto::secp256k1::public_key_to_address;
use secp256k1::{
rand::{thread_rng, RngCore},
Keypair, Secp256k1,
};
use seismic_alloy_genesis::GenesisAccount;
use std::{
collections::{hash_map::Entry, BTreeMap, HashMap},
fmt,
};
/// This helps create a custom genesis alloc by making it easy to add funded accounts with known
/// signers to the genesis block.
///
/// # Example
/// ```
/// # use alloy_primitives::{Address, U256, hex, Bytes};
/// # use reth_testing_utils::GenesisAllocator;
/// # use std::str::FromStr;
/// let mut allocator = GenesisAllocator::default();
///
/// // This will add a genesis account to the alloc builder, with the provided balance. The
/// // signer for the account will be returned.
/// let (_signer, _addr) = allocator.new_funded_account(U256::from(100_000_000_000_000_000u128));
///
/// // You can also provide code for the account.
/// let code = Bytes::from_str("0x1234").unwrap();
/// let (_second_signer, _second_addr) =
/// allocator.new_funded_account_with_code(U256::from(100_000_000_000_000_000u128), code);
///
/// // You can also add an account with a specific address.
/// // This will not return a signer, since the address is provided by the user and the signer
/// // may be unknown.
/// let addr = "0Ac1dF02185025F65202660F8167210A80dD5086".parse::<Address>().unwrap();
/// allocator.add_funded_account_with_address(addr, U256::from(100_000_000_000_000_000u128));
///
/// // Once you're done adding accounts, you can build the alloc.
/// let alloc = allocator.build();
/// ```
pub struct GenesisAllocator<'a> {
/// The genesis alloc to be built.
alloc: HashMap<Address, GenesisAccount>,
/// The rng to use for generating key pairs.
rng: Box<dyn RngCore + 'a>,
}
impl<'a> GenesisAllocator<'a> {
/// Initialize a new alloc builder with the provided rng.
pub fn new_with_rng<R>(rng: &'a mut R) -> Self
where
R: RngCore,
{
Self { alloc: HashMap::default(), rng: Box::new(rng) }
}
/// Use the provided rng for generating key pairs.
pub fn with_rng<R>(mut self, rng: &'a mut R) -> Self
where
R: RngCore + std::fmt::Debug,
{
self.rng = Box::new(rng);
self
}
/// Add a funded account to the genesis alloc.
///
/// Returns the key pair for the account and the account's address.
pub fn new_funded_account(&mut self, balance: U256) -> (Keypair, Address) {
let secp = Secp256k1::new();
let pair = Keypair::new(&secp, &mut self.rng);
let address = public_key_to_address(pair.public_key());
self.alloc.insert(address, GenesisAccount::default().with_balance(balance));
(pair, address)
}
/// Add a funded account to the genesis alloc with the provided code.
///
/// Returns the key pair for the account and the account's address.
pub fn new_funded_account_with_code(
&mut self,
balance: U256,
code: Bytes,
) -> (Keypair, Address) {
let secp = Secp256k1::new();
let pair = Keypair::new(&secp, &mut self.rng);
let address = public_key_to_address(pair.public_key());
self.alloc
.insert(address, GenesisAccount::default().with_balance(balance).with_code(Some(code)));
(pair, address)
}
/// Adds a funded account to the genesis alloc with the provided storage.
///
/// Returns the key pair for the account and the account's address.
pub fn new_funded_account_with_storage(
&mut self,
balance: U256,
storage: BTreeMap<B256, B256>,
) -> (Keypair, Address) {
let secp = Secp256k1::new();
let pair = Keypair::new(&secp, &mut self.rng);
let address = public_key_to_address(pair.public_key());
let storage = seismic_alloy_genesis::convert_fixedbytes_map_to_flagged_storage(storage);
self.alloc.insert(
address,
GenesisAccount::default().with_balance(balance).with_storage(Some(storage)),
);
(pair, address)
}
/// Adds an account with code and storage to the genesis alloc.
///
/// Returns the key pair for the account and the account's address.
pub fn new_account_with_code_and_storage(
&mut self,
code: Bytes,
storage: BTreeMap<B256, B256>,
) -> (Keypair, Address) {
let secp = Secp256k1::new();
let pair = Keypair::new(&secp, &mut self.rng);
let address = public_key_to_address(pair.public_key());
let storage = seismic_alloy_genesis::convert_fixedbytes_map_to_flagged_storage(storage);
self.alloc.insert(
address,
GenesisAccount::default().with_code(Some(code)).with_storage(Some(storage)),
);
(pair, address)
}
/// Adds an account with code to the genesis alloc.
///
/// Returns the key pair for the account and the account's address.
pub fn new_account_with_code(&mut self, code: Bytes) -> (Keypair, Address) {
let secp = Secp256k1::new();
let pair = Keypair::new(&secp, &mut self.rng);
let address = public_key_to_address(pair.public_key());
self.alloc.insert(address, GenesisAccount::default().with_code(Some(code)));
(pair, address)
}
/// Add a funded account to the genesis alloc with the provided address.
///
/// Neither the key pair nor the account will be returned, since the address is provided by
/// the user and the signer may be unknown.
pub fn add_funded_account_with_address(&mut self, address: Address, balance: U256) {
self.alloc.insert(address, GenesisAccount::default().with_balance(balance));
}
/// Adds the given [`GenesisAccount`] to the genesis alloc.
///
/// Returns the key pair for the account and the account's address.
pub fn add_account(&mut self, account: GenesisAccount) -> Address {
let secp = Secp256k1::new();
let pair = Keypair::new(&secp, &mut self.rng);
let address = public_key_to_address(pair.public_key());
self.alloc.insert(address, account);
address
}
/// Gets the account for the provided address.
///
/// If it does not exist, this returns `None`.
pub fn get_account(&self, address: &Address) -> Option<&GenesisAccount> {
self.alloc.get(address)
}
/// Gets a mutable version of the account for the provided address, if it exists.
pub fn get_account_mut(&mut self, address: &Address) -> Option<&mut GenesisAccount> {
self.alloc.get_mut(address)
}
/// Gets an [Entry] for the provided address.
pub fn account_entry(&mut self, address: Address) -> Entry<'_, Address, GenesisAccount> {
self.alloc.entry(address)
}
/// Build the genesis alloc.
pub fn build(self) -> HashMap<Address, GenesisAccount> {
self.alloc
}
}
impl Default for GenesisAllocator<'_> {
fn default() -> Self {
Self { alloc: HashMap::default(), rng: Box::new(thread_rng()) }
}
}
impl fmt::Debug for GenesisAllocator<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("GenesisAllocator").field("alloc", &self.alloc).finish_non_exhaustive()
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/testing/testing-utils/src/lib.rs | testing/testing-utils/src/lib.rs | //! Testing utilities.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
pub mod genesis_allocator;
pub use genesis_allocator::GenesisAllocator;
pub mod generators;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/testing/testing-utils/src/generators.rs | testing/testing-utils/src/generators.rs | //! Generators for different data structures like block headers, block bodies and ranges of those.
// TODO(rand): update ::random calls after rand_09 migration
use alloy_consensus::{Header, SignableTransaction, Transaction as _, TxLegacy};
use alloy_eips::{
eip1898::BlockWithParent,
eip4895::{Withdrawal, Withdrawals},
NumHash,
};
use alloy_primitives::{Address, BlockNumber, Bytes, FlaggedStorage, TxKind, B256, B64, U256};
pub use rand::Rng;
use rand::{distr::uniform::SampleRange, rngs::StdRng, SeedableRng};
use reth_ethereum_primitives::{Block, BlockBody, Receipt, Transaction, TransactionSigned};
use reth_primitives_traits::{
crypto::secp256k1::sign_message, proofs, Account, Block as _, Log, SealedBlock, SealedHeader,
StorageEntry,
};
use secp256k1::{Keypair, Secp256k1};
use std::{
cmp::{max, min},
collections::BTreeMap,
ops::{Range, RangeInclusive},
};
/// Used to pass arguments for random block generation function in tests
#[derive(Debug, Default)]
pub struct BlockParams {
/// The parent hash of the block.
pub parent: Option<B256>,
/// The number of transactions in the block.
pub tx_count: Option<u8>,
/// The number of ommers (uncles) in the block.
pub ommers_count: Option<u8>,
/// The number of requests in the block.
pub requests_count: Option<u8>,
/// The number of withdrawals in the block.
pub withdrawals_count: Option<u8>,
}
/// Used to pass arguments for random block generation function in tests
#[derive(Debug)]
pub struct BlockRangeParams {
/// The parent hash of the block.
pub parent: Option<B256>,
/// The range of transactions in the block.
/// If set, a random count between the range will be used.
/// If not set, a random number of transactions will be used.
pub tx_count: Range<u8>,
/// The number of requests in the block.
pub requests_count: Option<Range<u8>>,
/// The number of withdrawals in the block.
pub withdrawals_count: Option<Range<u8>>,
}
impl Default for BlockRangeParams {
fn default() -> Self {
Self {
parent: None,
tx_count: 0..u8::MAX / 2,
requests_count: None,
withdrawals_count: None,
}
}
}
/// Returns a random number generator that can be seeded using the `SEED` environment variable.
///
/// If `SEED` is not set, a random seed is used.
pub fn rng() -> StdRng {
if let Ok(seed) = std::env::var("SEED") {
rng_with_seed(seed.as_bytes())
} else {
StdRng::from_rng(&mut rand::rng())
}
}
/// Returns a random number generator from a specific seed, as bytes.
pub fn rng_with_seed(seed: &[u8]) -> StdRng {
let mut seed_bytes = [0u8; 32];
seed_bytes[..seed.len().min(32)].copy_from_slice(seed);
StdRng::from_seed(seed_bytes)
}
/// Generates a range of random [`SealedHeader`]s.
///
/// The parent hash of the first header
/// in the result will be equal to `head`.
///
/// The headers are assumed to not be correct if validated.
pub fn random_header_range<R: Rng>(
rng: &mut R,
range: Range<u64>,
head: B256,
) -> Vec<SealedHeader> {
let mut headers = Vec::with_capacity(range.end.saturating_sub(range.start) as usize);
for idx in range {
headers.push(random_header(
rng,
idx,
Some(headers.last().map(|h: &SealedHeader| h.hash()).unwrap_or(head)),
));
}
headers
}
/// Generate a random [`BlockWithParent`].
pub fn random_block_with_parent<R: Rng>(
rng: &mut R,
number: u64,
parent: Option<B256>,
) -> BlockWithParent {
BlockWithParent {
parent: parent.unwrap_or_default(),
block: NumHash::new(number, rng.random()),
}
}
/// Generate a random [`SealedHeader`].
///
/// The header is assumed to not be correct if validated.
pub fn random_header<R: Rng>(rng: &mut R, number: u64, parent: Option<B256>) -> SealedHeader {
let header = alloy_consensus::Header {
number,
nonce: B64::random(),
difficulty: U256::from(rng.random::<u32>()),
parent_hash: parent.unwrap_or_default(),
..Default::default()
};
SealedHeader::seal_slow(header)
}
/// Generates a random legacy [Transaction].
///
/// Every field is random, except:
///
/// - The chain ID, which is always 1
/// - The input, which is always nothing
pub fn random_tx<R: Rng>(rng: &mut R) -> Transaction {
Transaction::Legacy(TxLegacy {
chain_id: Some(1),
nonce: rng.random::<u16>().into(),
gas_price: rng.random::<u16>().into(),
gas_limit: rng.random::<u16>().into(),
to: TxKind::Call(Address::random()),
value: U256::from(rng.random::<u16>()),
input: Bytes::default(),
})
}
/// Generates a random legacy [Transaction] that is signed.
///
/// On top of the considerations of [`random_tx`], these apply as well:
///
/// - There is no guarantee that the nonce is not used twice for the same account
pub fn random_signed_tx<R: Rng>(rng: &mut R) -> TransactionSigned {
let tx = random_tx(rng);
sign_tx_with_random_key_pair(rng, tx)
}
/// Signs the [Transaction] with a random key pair.
pub fn sign_tx_with_random_key_pair<R: Rng>(_rng: &mut R, tx: Transaction) -> TransactionSigned {
let secp = Secp256k1::new();
// TODO: rand08
let key_pair = Keypair::new(&secp, &mut rand_08::thread_rng());
sign_tx_with_key_pair(key_pair, tx)
}
/// Signs the [Transaction] with the given key pair.
pub fn sign_tx_with_key_pair(key_pair: Keypair, tx: Transaction) -> TransactionSigned {
let signature =
sign_message(B256::from_slice(&key_pair.secret_bytes()[..]), tx.signature_hash()).unwrap();
tx.into_signed(signature).into()
}
/// Generates a new random [Keypair].
pub fn generate_key<R: Rng>(_rng: &mut R) -> Keypair {
let secp = Secp256k1::new();
Keypair::new(&secp, &mut rand_08::thread_rng())
}
/// Generates a set of [Keypair]s based on the desired count.
pub fn generate_keys<R: Rng>(_rng: &mut R, count: usize) -> Vec<Keypair> {
let secp = Secp256k1::new();
// TODO: rand08
(0..count).map(|_| Keypair::new(&secp, &mut rand_08::thread_rng())).collect()
}
/// Generate a random block filled with signed transactions (generated using
/// [`random_signed_tx`]). If no transaction count is provided, the number of transactions
/// will be random, otherwise the provided count will be used.
///
/// All fields use the default values (and are assumed to be invalid) except for:
///
/// - `parent_hash`
/// - `transactions_root`
/// - `ommers_hash`
///
/// Additionally, `gas_used` and `gas_limit` always exactly match the total `gas_limit` of all
/// transactions in the block.
///
/// The ommer headers are not assumed to be valid.
pub fn random_block<R: Rng>(
rng: &mut R,
number: u64,
block_params: BlockParams,
) -> SealedBlock<Block> {
// Generate transactions
let tx_count = block_params.tx_count.unwrap_or_else(|| rng.random::<u8>());
let transactions: Vec<TransactionSigned> =
(0..tx_count).map(|_| random_signed_tx(rng)).collect();
let total_gas = transactions.iter().fold(0, |sum, tx| sum + tx.gas_limit());
// Generate ommers
let ommers_count = block_params.ommers_count.unwrap_or_else(|| rng.random_range(0..2));
let ommers = (0..ommers_count)
.map(|_| random_header(rng, number, block_params.parent).unseal())
.collect::<Vec<_>>();
// Calculate roots
let transactions_root = proofs::calculate_transaction_root(&transactions);
let ommers_hash = proofs::calculate_ommers_root(&ommers);
let withdrawals = block_params.withdrawals_count.map(|count| {
(0..count)
.map(|i| Withdrawal {
amount: rng.random(),
index: i.into(),
validator_index: i.into(),
address: Address::random(),
})
.collect::<Vec<_>>()
});
let withdrawals_root = withdrawals.as_ref().map(|w| proofs::calculate_withdrawals_root(w));
let header = Header {
parent_hash: block_params.parent.unwrap_or_default(),
number,
gas_used: total_gas,
gas_limit: total_gas,
transactions_root,
ommers_hash,
base_fee_per_gas: Some(rng.random()),
// TODO(onbjerg): Proper EIP-7685 request support
requests_hash: None,
withdrawals_root,
..Default::default()
};
Block {
header,
body: BlockBody { transactions, ommers, withdrawals: withdrawals.map(Withdrawals::new) },
}
.seal_slow()
}
/// Generate a range of random blocks.
///
/// The parent hash of the first block
/// in the result will be equal to `head`.
///
/// See [`random_block`] for considerations when validating the generated blocks.
pub fn random_block_range<R: Rng>(
rng: &mut R,
block_numbers: RangeInclusive<BlockNumber>,
block_range_params: BlockRangeParams,
) -> Vec<SealedBlock<Block>> {
let mut blocks =
Vec::with_capacity(block_numbers.end().saturating_sub(*block_numbers.start()) as usize);
for idx in block_numbers {
let tx_count = block_range_params.tx_count.clone().sample_single(rng).unwrap();
let requests_count =
block_range_params.requests_count.clone().map(|r| r.sample_single(rng).unwrap());
let withdrawals_count =
block_range_params.withdrawals_count.clone().map(|r| r.sample_single(rng).unwrap());
let parent = block_range_params.parent.unwrap_or_default();
blocks.push(random_block(
rng,
idx,
BlockParams {
parent: Some(
blocks.last().map(|block: &SealedBlock<Block>| block.hash()).unwrap_or(parent),
),
tx_count: Some(tx_count),
ommers_count: None,
requests_count,
withdrawals_count,
},
));
}
blocks
}
/// Collection of account and storage entry changes
pub type ChangeSet = Vec<(Address, Account, Vec<StorageEntry>)>;
type AccountState = (Account, Vec<StorageEntry>);
/// Generate a range of changesets for given blocks and accounts.
///
/// Returns a Vec of account and storage changes for each block,
/// along with the final state of all accounts and storages.
pub fn random_changeset_range<'a, R: Rng, IBlk, IAcc>(
rng: &mut R,
blocks: IBlk,
accounts: IAcc,
n_storage_changes: Range<u64>,
key_range: Range<u64>,
) -> (Vec<ChangeSet>, BTreeMap<Address, AccountState>)
where
IBlk: IntoIterator<Item = &'a SealedBlock<Block>>,
IAcc: IntoIterator<Item = (Address, (Account, Vec<StorageEntry>))>,
{
let mut state: BTreeMap<_, _> = accounts
.into_iter()
.map(|(addr, (acc, st))| {
(addr, (acc, st.into_iter().map(|e| (e.key, e.value)).collect::<BTreeMap<_, _>>()))
})
.collect();
let valid_addresses = state.keys().copied().collect::<Vec<_>>();
let mut changesets = Vec::new();
for _block in blocks {
let mut changeset = Vec::new();
let (from, to, mut transfer, new_entries) = random_account_change(
rng,
&valid_addresses,
n_storage_changes.clone(),
key_range.clone(),
);
// extract from sending account
let (prev_from, _) = state.get_mut(&from).unwrap();
changeset.push((from, *prev_from, Vec::new()));
transfer = max(min(transfer, prev_from.balance), U256::from(1));
prev_from.balance = prev_from.balance.wrapping_sub(transfer);
// deposit in receiving account and update storage
let (prev_to, storage): &mut (Account, BTreeMap<B256, FlaggedStorage>) =
state.get_mut(&to).unwrap();
let mut old_entries: Vec<_> = new_entries
.into_iter()
.filter_map(|entry| {
let old = if entry.value.is_zero() {
let old = storage.remove(&entry.key);
if let Some(old_stored) = old {
if old_stored.is_zero() {
return None;
}
}
old
} else {
storage.insert(entry.key, entry.value)
};
match old {
Some(old_value) => {
return Some(StorageEntry { value: old_value, ..entry });
}
None => {
return Some(StorageEntry { key: entry.key, value: FlaggedStorage::ZERO });
}
}
})
.collect();
old_entries.sort_by_key(|entry| entry.key);
changeset.push((to, *prev_to, old_entries));
changeset.sort_by_key(|(address, _, _)| *address);
prev_to.balance = prev_to.balance.wrapping_add(transfer);
changesets.push(changeset);
}
let final_state = state
.into_iter()
.map(|(addr, (acc, storage))| {
(addr, (acc, storage.into_iter().map(|v| v.into()).collect()))
})
.collect();
(changesets, final_state)
}
/// Generate a random account change.
///
/// Returns two addresses, a `balance_change`, and a Vec of new storage entries.
pub fn random_account_change<R: Rng>(
rng: &mut R,
valid_addresses: &[Address],
n_storage_changes: Range<u64>,
key_range: Range<u64>,
) -> (Address, Address, U256, Vec<StorageEntry>) {
use rand::prelude::IndexedRandom;
let mut addresses = valid_addresses.choose_multiple(rng, 2).copied();
let addr_from = addresses.next().unwrap_or_else(Address::random);
let addr_to = addresses.next().unwrap_or_else(Address::random);
let balance_change = U256::from(rng.random::<u64>());
let storage_changes = if n_storage_changes.is_empty() {
Vec::new()
} else {
(0..n_storage_changes.sample_single(rng).unwrap())
.map(|_| random_storage_entry(rng, key_range.clone()))
.collect()
};
(addr_from, addr_to, balance_change, storage_changes)
}
/// Generate a random storage change.
pub fn random_storage_entry<R: Rng>(rng: &mut R, key_range: Range<u64>) -> StorageEntry {
let key = B256::new({
let n = key_range.sample_single(rng).unwrap();
let mut m = [0u8; 32];
m[24..32].copy_from_slice(&n.to_be_bytes());
m
});
let value = U256::from(rng.random::<u64>());
StorageEntry::new(key, value, false)
}
/// Generate random Externally Owned Account (EOA account without contract).
pub fn random_eoa_account<R: Rng>(rng: &mut R) -> (Address, Account) {
let nonce: u64 = rng.random();
let balance = U256::from(rng.random::<u32>());
let addr = Address::random();
(addr, Account { nonce, balance, bytecode_hash: None })
}
/// Generate random Externally Owned Accounts
pub fn random_eoa_accounts<R: Rng>(rng: &mut R, accounts_num: usize) -> Vec<(Address, Account)> {
let mut accounts = Vec::with_capacity(accounts_num);
for _ in 0..accounts_num {
accounts.push(random_eoa_account(rng))
}
accounts
}
/// Generate random Contract Accounts
pub fn random_contract_account_range<R: Rng>(
rng: &mut R,
acc_range: &mut Range<u64>,
) -> Vec<(Address, Account)> {
let mut accounts = Vec::with_capacity(acc_range.end.saturating_sub(acc_range.start) as usize);
for _ in acc_range {
let (address, eoa_account) = random_eoa_account(rng);
// todo: can a non-eoa account have a nonce > 0?
let account = Account { bytecode_hash: Some(B256::random()), ..eoa_account };
accounts.push((address, account))
}
accounts
}
/// Generate random receipt for transaction
pub fn random_receipt<R: Rng>(
rng: &mut R,
transaction: &TransactionSigned,
logs_count: Option<u8>,
topics_count: Option<u8>,
) -> Receipt {
let success = rng.random::<bool>();
let logs_count = logs_count.unwrap_or_else(|| rng.random::<u8>());
#[expect(clippy::needless_update)] // side-effect of optimism fields
Receipt {
tx_type: transaction.tx_type(),
success,
cumulative_gas_used: rng.random_range(0..=transaction.gas_limit()),
logs: if success {
(0..logs_count).map(|_| random_log(rng, None, topics_count)).collect()
} else {
vec![]
},
..Default::default()
}
}
/// Generate random log
pub fn random_log<R: Rng>(rng: &mut R, address: Option<Address>, topics_count: Option<u8>) -> Log {
let data_byte_count = rng.random::<u8>() as usize;
let topics_count = topics_count.unwrap_or_else(|| rng.random()) as usize;
Log::new_unchecked(
address.unwrap_or_else(|| Address::random()),
std::iter::repeat_with(|| B256::random()).take(topics_count).collect(),
std::iter::repeat_with(|| rng.random()).take(data_byte_count).collect::<Vec<_>>().into(),
)
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_consensus::TxEip1559;
use alloy_eips::eip2930::AccessList;
use alloy_primitives::{hex, Signature};
use reth_primitives_traits::{
crypto::secp256k1::{public_key_to_address, sign_message},
SignerRecoverable,
};
use std::str::FromStr;
#[test]
fn test_sign_message() {
let secp = Secp256k1::new();
let tx = Transaction::Eip1559(TxEip1559 {
chain_id: 1,
nonce: 0x42,
gas_limit: 44386,
to: TxKind::Call(hex!("6069a6c32cf691f5982febae4faf8a6f3ab2f0f6").into()),
value: U256::from(0_u64),
input: hex!("a22cb4650000000000000000000000005eee75727d804a2b13038928d36f8b188945a57a0000000000000000000000000000000000000000000000000000000000000000").into(),
max_fee_per_gas: 0x4a817c800,
max_priority_fee_per_gas: 0x3b9aca00,
access_list: AccessList::default(),
});
let signature_hash = tx.signature_hash();
for _ in 0..100 {
let key_pair = Keypair::new(&secp, &mut rand_08::thread_rng());
let signature =
sign_message(B256::from_slice(&key_pair.secret_bytes()[..]), signature_hash)
.unwrap();
let signed: TransactionSigned = tx.clone().into_signed(signature).into();
let recovered = signed.recover_signer().unwrap();
let expected = public_key_to_address(key_pair.public_key());
assert_eq!(recovered, expected);
}
}
#[test]
fn test_sign_eip_155() {
// reference: https://github.com/ethereum/EIPs/blob/master/EIPS/eip-155.md#example
let transaction = Transaction::Legacy(TxLegacy {
chain_id: Some(1),
nonce: 9,
gas_price: 20 * 10_u128.pow(9),
gas_limit: 21000,
to: TxKind::Call(hex!("3535353535353535353535353535353535353535").into()),
value: U256::from(10_u128.pow(18)),
input: Bytes::default(),
});
// TODO resolve dependency issue
// let expected =
// hex!("ec098504a817c800825208943535353535353535353535353535353535353535880de0b6b3a764000080018080");
// assert_eq!(expected, &alloy_rlp::encode(transaction));
let hash = transaction.signature_hash();
let expected =
B256::from_str("daf5a779ae972f972197303d7b574746c7ef83eadac0f2791ad23db92e4c8e53")
.unwrap();
assert_eq!(expected, hash);
let secret =
B256::from_str("4646464646464646464646464646464646464646464646464646464646464646")
.unwrap();
let signature = sign_message(secret, hash).unwrap();
let expected = Signature::new(
U256::from_str(
"18515461264373351373200002665853028612451056578545711640558177340181847433846",
)
.unwrap(),
U256::from_str(
"46948507304638947509940763649030358759909902576025900602547168820602576006531",
)
.unwrap(),
false,
);
assert_eq!(expected, signature);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/testing/runner/src/main.rs | testing/runner/src/main.rs | //! Command-line interface for running tests.
use std::path::PathBuf;
use clap::Parser;
use ef_tests::{cases::blockchain_test::BlockchainTests, Suite};
/// Command-line arguments for the test runner.
#[derive(Debug, Parser)]
pub struct TestRunnerCommand {
/// Path to the test suite
suite_path: PathBuf,
}
fn main() {
let cmd = TestRunnerCommand::parse();
BlockchainTests::new(cmd.suite_path.join("blockchain_tests")).run();
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.