repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network/src/session/types.rs | crates/net/network/src/session/types.rs | //! Shared types for network sessions.
use alloy_primitives::B256;
use parking_lot::RwLock;
use reth_eth_wire::BlockRangeUpdate;
use std::{
ops::RangeInclusive,
sync::{
atomic::{AtomicU64, Ordering},
Arc,
},
};
/// Information about the range of blocks available from a peer.
///
/// This represents the announced `eth69`
/// [`BlockRangeUpdate`] of a peer.
#[derive(Debug, Clone)]
pub struct BlockRangeInfo {
/// The inner range information.
inner: Arc<BlockRangeInfoInner>,
}
impl BlockRangeInfo {
/// Creates a new range information.
pub fn new(earliest: u64, latest: u64, latest_hash: B256) -> Self {
Self {
inner: Arc::new(BlockRangeInfoInner {
earliest: AtomicU64::new(earliest),
latest: AtomicU64::new(latest),
latest_hash: RwLock::new(latest_hash),
}),
}
}
/// Returns true if the block number is within the range of blocks available from the peer.
pub fn contains(&self, block_number: u64) -> bool {
self.range().contains(&block_number)
}
/// Returns the range of blocks available from the peer.
pub fn range(&self) -> RangeInclusive<u64> {
let earliest = self.earliest();
let latest = self.latest();
RangeInclusive::new(earliest, latest)
}
/// Returns the earliest block number available from the peer.
pub fn earliest(&self) -> u64 {
self.inner.earliest.load(Ordering::Relaxed)
}
/// Returns the latest block number available from the peer.
pub fn latest(&self) -> u64 {
self.inner.latest.load(Ordering::Relaxed)
}
/// Returns the latest block hash available from the peer.
pub fn latest_hash(&self) -> B256 {
*self.inner.latest_hash.read()
}
/// Updates the range information.
pub fn update(&self, earliest: u64, latest: u64, latest_hash: B256) {
self.inner.earliest.store(earliest, Ordering::Relaxed);
self.inner.latest.store(latest, Ordering::Relaxed);
*self.inner.latest_hash.write() = latest_hash;
}
/// Converts the current range information to an Eth69 [`BlockRangeUpdate`] message.
pub fn to_message(&self) -> BlockRangeUpdate {
BlockRangeUpdate {
earliest: self.earliest(),
latest: self.latest(),
latest_hash: self.latest_hash(),
}
}
}
/// Inner structure containing the range information with atomic and thread-safe fields.
#[derive(Debug)]
pub(crate) struct BlockRangeInfoInner {
/// The earliest block which is available.
earliest: AtomicU64,
/// The latest block which is available.
latest: AtomicU64,
/// Latest available block's hash.
latest_hash: RwLock<B256>,
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network/src/session/mod.rs | crates/net/network/src/session/mod.rs | //! Support for handling peer sessions.
mod active;
mod conn;
mod counter;
mod handle;
mod types;
pub use types::BlockRangeInfo;
use crate::{
message::PeerMessage,
metrics::SessionManagerMetrics,
protocol::{IntoRlpxSubProtocol, OnNotSupported, RlpxSubProtocolHandlers, RlpxSubProtocols},
session::active::ActiveSession,
};
use active::QueuedOutgoingMessages;
use counter::SessionCounter;
use futures::{future::Either, io, FutureExt, StreamExt};
use reth_ecies::{stream::ECIESStream, ECIESError};
use reth_eth_wire::{
errors::EthStreamError, handshake::EthRlpxHandshake, multiplex::RlpxProtocolMultiplexer,
BlockRangeUpdate, Capabilities, DisconnectReason, EthStream, EthVersion,
HelloMessageWithProtocols, NetworkPrimitives, UnauthedP2PStream, UnifiedStatus,
HANDSHAKE_TIMEOUT,
};
use reth_ethereum_forks::{ForkFilter, ForkId, ForkTransition, Head};
use reth_metrics::common::mpsc::MeteredPollSender;
use reth_network_api::{PeerRequest, PeerRequestSender};
use reth_network_peers::PeerId;
use reth_network_types::SessionsConfig;
use reth_tasks::TaskSpawner;
use rustc_hash::FxHashMap;
use secp256k1::SecretKey;
use std::{
collections::HashMap,
future::Future,
net::SocketAddr,
sync::{atomic::AtomicU64, Arc},
task::{Context, Poll},
time::{Duration, Instant},
};
use tokio::{
io::{AsyncRead, AsyncWrite},
net::TcpStream,
sync::{mpsc, mpsc::error::TrySendError, oneshot},
};
use tokio_stream::wrappers::ReceiverStream;
use tokio_util::sync::PollSender;
use tracing::{debug, instrument, trace};
use crate::session::active::RANGE_UPDATE_INTERVAL;
pub use conn::EthRlpxConnection;
pub use handle::{
ActiveSessionHandle, ActiveSessionMessage, PendingSessionEvent, PendingSessionHandle,
SessionCommand,
};
pub use reth_network_api::{Direction, PeerInfo};
/// Internal identifier for active sessions.
#[derive(Debug, Clone, Copy, PartialOrd, PartialEq, Eq, Hash)]
pub struct SessionId(usize);
/// Manages a set of sessions.
#[must_use = "Session Manager must be polled to process session events."]
#[derive(Debug)]
pub struct SessionManager<N: NetworkPrimitives> {
/// Tracks the identifier for the next session.
next_id: usize,
/// Keeps track of all sessions
counter: SessionCounter,
/// The maximum initial time an [`ActiveSession`] waits for a response from the peer before it
/// responds to an _internal_ request with a `TimeoutError`
initial_internal_request_timeout: Duration,
/// If an [`ActiveSession`] does not receive a response at all within this duration then it is
/// considered a protocol violation and the session will initiate a drop.
protocol_breach_request_timeout: Duration,
/// The timeout after which a pending session attempt is considered failed.
pending_session_timeout: Duration,
/// The secret key used for authenticating sessions.
secret_key: SecretKey,
/// The `Status` message to send to peers.
status: UnifiedStatus,
/// The `HelloMessage` message to send to peers.
hello_message: HelloMessageWithProtocols,
/// The [`ForkFilter`] used to validate the peer's `Status` message.
fork_filter: ForkFilter,
/// Size of the command buffer per session.
session_command_buffer: usize,
/// The executor for spawned tasks.
executor: Box<dyn TaskSpawner>,
/// All pending session that are currently handshaking, exchanging `Hello`s.
///
/// Events produced during the authentication phase are reported to this manager. Once the
/// session is authenticated, it can be moved to the `active_session` set.
pending_sessions: FxHashMap<SessionId, PendingSessionHandle>,
/// All active sessions that are ready to exchange messages.
active_sessions: HashMap<PeerId, ActiveSessionHandle<N>>,
/// The original Sender half of the [`PendingSessionEvent`] channel.
///
/// When a new (pending) session is created, the corresponding [`PendingSessionHandle`] will
/// get a clone of this sender half.
pending_sessions_tx: mpsc::Sender<PendingSessionEvent<N>>,
/// Receiver half that listens for [`PendingSessionEvent`] produced by pending sessions.
pending_session_rx: ReceiverStream<PendingSessionEvent<N>>,
/// The original Sender half of the [`ActiveSessionMessage`] channel.
///
/// When active session state is reached, the corresponding [`ActiveSessionHandle`] will get a
/// clone of this sender half.
active_session_tx: MeteredPollSender<ActiveSessionMessage<N>>,
/// Receiver half that listens for [`ActiveSessionMessage`] produced by pending sessions.
active_session_rx: ReceiverStream<ActiveSessionMessage<N>>,
/// Additional `RLPx` sub-protocols to be used by the session manager.
extra_protocols: RlpxSubProtocols,
/// Tracks the ongoing graceful disconnections attempts for incoming connections.
disconnections_counter: DisconnectionsCounter,
/// Metrics for the session manager.
metrics: SessionManagerMetrics,
/// The [`EthRlpxHandshake`] is used to perform the initial handshake with the peer.
handshake: Arc<dyn EthRlpxHandshake>,
/// Shared local range information that gets propagated to active sessions.
/// This represents the range of blocks that this node can serve to other peers.
local_range_info: BlockRangeInfo,
}
// === impl SessionManager ===
impl<N: NetworkPrimitives> SessionManager<N> {
/// Creates a new empty [`SessionManager`].
#[expect(clippy::too_many_arguments)]
pub fn new(
secret_key: SecretKey,
config: SessionsConfig,
executor: Box<dyn TaskSpawner>,
status: UnifiedStatus,
hello_message: HelloMessageWithProtocols,
fork_filter: ForkFilter,
extra_protocols: RlpxSubProtocols,
handshake: Arc<dyn EthRlpxHandshake>,
) -> Self {
let (pending_sessions_tx, pending_sessions_rx) = mpsc::channel(config.session_event_buffer);
let (active_session_tx, active_session_rx) = mpsc::channel(config.session_event_buffer);
let active_session_tx = PollSender::new(active_session_tx);
// Initialize local range info from the status
let local_range_info = BlockRangeInfo::new(
status.earliest_block.unwrap_or_default(),
status.latest_block.unwrap_or_default(),
status.blockhash,
);
Self {
next_id: 0,
counter: SessionCounter::new(config.limits),
initial_internal_request_timeout: config.initial_internal_request_timeout,
protocol_breach_request_timeout: config.protocol_breach_request_timeout,
pending_session_timeout: config.pending_session_timeout,
secret_key,
status,
hello_message,
fork_filter,
session_command_buffer: config.session_command_buffer,
executor,
pending_sessions: Default::default(),
active_sessions: Default::default(),
pending_sessions_tx,
pending_session_rx: ReceiverStream::new(pending_sessions_rx),
active_session_tx: MeteredPollSender::new(active_session_tx, "network_active_session"),
active_session_rx: ReceiverStream::new(active_session_rx),
extra_protocols,
disconnections_counter: Default::default(),
metrics: Default::default(),
handshake,
local_range_info,
}
}
/// Returns the currently tracked [`ForkId`].
pub(crate) const fn fork_id(&self) -> ForkId {
self.fork_filter.current()
}
/// Check whether the provided [`ForkId`] is compatible based on the validation rules in
/// `EIP-2124`.
pub fn is_valid_fork_id(&self, fork_id: ForkId) -> bool {
self.fork_filter.validate(fork_id).is_ok()
}
/// Returns the next unique [`SessionId`].
const fn next_id(&mut self) -> SessionId {
let id = self.next_id;
self.next_id += 1;
SessionId(id)
}
/// Returns the current status of the session.
pub const fn status(&self) -> UnifiedStatus {
self.status
}
/// Returns the secret key used for authenticating sessions.
pub const fn secret_key(&self) -> SecretKey {
self.secret_key
}
/// Returns a borrowed reference to the active sessions.
pub const fn active_sessions(&self) -> &HashMap<PeerId, ActiveSessionHandle<N>> {
&self.active_sessions
}
/// Returns the session hello message.
pub fn hello_message(&self) -> HelloMessageWithProtocols {
self.hello_message.clone()
}
/// Adds an additional protocol handler to the `RLPx` sub-protocol list.
pub(crate) fn add_rlpx_sub_protocol(&mut self, protocol: impl IntoRlpxSubProtocol) {
self.extra_protocols.push(protocol)
}
/// Returns the number of currently pending connections.
#[inline]
pub(crate) fn num_pending_connections(&self) -> usize {
self.pending_sessions.len()
}
/// Spawns the given future onto a new task that is tracked in the `spawned_tasks`
/// [`JoinSet`](tokio::task::JoinSet).
fn spawn<F>(&self, f: F)
where
F: Future<Output = ()> + Send + 'static,
{
self.executor.spawn(f.boxed());
}
/// Invoked on a received status update.
///
/// If the updated activated another fork, this will return a [`ForkTransition`] and updates the
/// active [`ForkId`]. See also [`ForkFilter::set_head`].
pub(crate) fn on_status_update(&mut self, head: Head) -> Option<ForkTransition> {
self.status.blockhash = head.hash;
self.status.total_difficulty = Some(head.total_difficulty);
let transition = self.fork_filter.set_head(head);
self.status.forkid = self.fork_filter.current();
self.status.latest_block = Some(head.number);
transition
}
/// An incoming TCP connection was received. This starts the authentication process to turn this
/// stream into an active peer session.
///
/// Returns an error if the configured limit has been reached.
pub(crate) fn on_incoming(
&mut self,
stream: TcpStream,
remote_addr: SocketAddr,
) -> Result<SessionId, ExceedsSessionLimit> {
self.counter.ensure_pending_inbound()?;
let session_id = self.next_id();
trace!(
target: "net::session",
?remote_addr,
?session_id,
"new pending incoming session"
);
let (disconnect_tx, disconnect_rx) = oneshot::channel();
let pending_events = self.pending_sessions_tx.clone();
let secret_key = self.secret_key;
let hello_message = self.hello_message.clone();
let status = self.status;
let fork_filter = self.fork_filter.clone();
let extra_handlers = self.extra_protocols.on_incoming(remote_addr);
self.spawn(pending_session_with_timeout(
self.pending_session_timeout,
session_id,
remote_addr,
Direction::Incoming,
pending_events.clone(),
start_pending_incoming_session(
self.handshake.clone(),
disconnect_rx,
session_id,
stream,
pending_events,
remote_addr,
secret_key,
hello_message,
status,
fork_filter,
extra_handlers,
),
));
let handle = PendingSessionHandle {
disconnect_tx: Some(disconnect_tx),
direction: Direction::Incoming,
};
self.pending_sessions.insert(session_id, handle);
self.counter.inc_pending_inbound();
Ok(session_id)
}
/// Starts a new pending session from the local node to the given remote node.
pub fn dial_outbound(&mut self, remote_addr: SocketAddr, remote_peer_id: PeerId) {
// The error can be dropped because no dial will be made if it would exceed the limit
if self.counter.ensure_pending_outbound().is_ok() {
let session_id = self.next_id();
let (disconnect_tx, disconnect_rx) = oneshot::channel();
let pending_events = self.pending_sessions_tx.clone();
let secret_key = self.secret_key;
let hello_message = self.hello_message.clone();
let fork_filter = self.fork_filter.clone();
let status = self.status;
let extra_handlers = self.extra_protocols.on_outgoing(remote_addr, remote_peer_id);
self.spawn(pending_session_with_timeout(
self.pending_session_timeout,
session_id,
remote_addr,
Direction::Outgoing(remote_peer_id),
pending_events.clone(),
start_pending_outbound_session(
self.handshake.clone(),
disconnect_rx,
pending_events,
session_id,
remote_addr,
remote_peer_id,
secret_key,
hello_message,
status,
fork_filter,
extra_handlers,
),
));
let handle = PendingSessionHandle {
disconnect_tx: Some(disconnect_tx),
direction: Direction::Outgoing(remote_peer_id),
};
self.pending_sessions.insert(session_id, handle);
self.counter.inc_pending_outbound();
}
}
/// Initiates a shutdown of the channel.
///
/// This will trigger the disconnect on the session task to gracefully terminate. The result
/// will be picked up by the receiver.
pub fn disconnect(&self, node: PeerId, reason: Option<DisconnectReason>) {
if let Some(session) = self.active_sessions.get(&node) {
session.disconnect(reason);
}
}
/// Initiates a shutdown of all sessions.
///
/// It will trigger the disconnect on all the session tasks to gracefully terminate. The result
/// will be picked by the receiver.
pub fn disconnect_all(&self, reason: Option<DisconnectReason>) {
for session in self.active_sessions.values() {
session.disconnect(reason);
}
}
/// Disconnects all pending sessions.
pub fn disconnect_all_pending(&mut self) {
for session in self.pending_sessions.values_mut() {
session.disconnect();
}
}
/// Sends a message to the peer's session
pub fn send_message(&self, peer_id: &PeerId, msg: PeerMessage<N>) {
if let Some(session) = self.active_sessions.get(peer_id) {
let _ = session.commands_to_session.try_send(SessionCommand::Message(msg)).inspect_err(
|e| {
if let TrySendError::Full(_) = e {
debug!(
target: "net::session",
?peer_id,
"session command buffer full, dropping message"
);
self.metrics.total_outgoing_peer_messages_dropped.increment(1);
}
},
);
}
}
/// Removes the [`PendingSessionHandle`] if it exists.
fn remove_pending_session(&mut self, id: &SessionId) -> Option<PendingSessionHandle> {
let session = self.pending_sessions.remove(id)?;
self.counter.dec_pending(&session.direction);
Some(session)
}
/// Removes the [`PendingSessionHandle`] if it exists.
fn remove_active_session(&mut self, id: &PeerId) -> Option<ActiveSessionHandle<N>> {
let session = self.active_sessions.remove(id)?;
self.counter.dec_active(&session.direction);
Some(session)
}
/// Try to gracefully disconnect an incoming connection by initiating a ECIES connection and
/// sending a disconnect. If [`SessionManager`] is at capacity for ongoing disconnections, will
/// simply drop the incoming connection.
pub(crate) fn try_disconnect_incoming_connection(
&self,
stream: TcpStream,
reason: DisconnectReason,
) {
if !self.disconnections_counter.has_capacity() {
// drop the connection if we don't have capacity for gracefully disconnecting
return
}
let guard = self.disconnections_counter.clone();
let secret_key = self.secret_key;
self.spawn(async move {
trace!(
target: "net::session",
"gracefully disconnecting incoming connection"
);
if let Ok(stream) = get_ecies_stream(stream, secret_key, Direction::Incoming).await {
let mut unauth = UnauthedP2PStream::new(stream);
let _ = unauth.send_disconnect(reason).await;
drop(guard);
}
});
}
/// This polls all the session handles and returns [`SessionEvent`].
///
/// Active sessions are prioritized.
pub(crate) fn poll(&mut self, cx: &mut Context<'_>) -> Poll<SessionEvent<N>> {
// Poll events from active sessions
match self.active_session_rx.poll_next_unpin(cx) {
Poll::Pending => {}
Poll::Ready(None) => {
unreachable!("Manager holds both channel halves.")
}
Poll::Ready(Some(event)) => {
return match event {
ActiveSessionMessage::Disconnected { peer_id, remote_addr } => {
trace!(
target: "net::session",
?peer_id,
"gracefully disconnected active session."
);
self.remove_active_session(&peer_id);
Poll::Ready(SessionEvent::Disconnected { peer_id, remote_addr })
}
ActiveSessionMessage::ClosedOnConnectionError {
peer_id,
remote_addr,
error,
} => {
trace!(target: "net::session", ?peer_id, %error,"closed session.");
self.remove_active_session(&peer_id);
Poll::Ready(SessionEvent::SessionClosedOnConnectionError {
remote_addr,
peer_id,
error,
})
}
ActiveSessionMessage::ValidMessage { peer_id, message } => {
Poll::Ready(SessionEvent::ValidMessage { peer_id, message })
}
ActiveSessionMessage::BadMessage { peer_id } => {
Poll::Ready(SessionEvent::BadMessage { peer_id })
}
ActiveSessionMessage::ProtocolBreach { peer_id } => {
Poll::Ready(SessionEvent::ProtocolBreach { peer_id })
}
}
}
}
// Poll the pending session event stream
let event = match self.pending_session_rx.poll_next_unpin(cx) {
Poll::Pending => return Poll::Pending,
Poll::Ready(None) => unreachable!("Manager holds both channel halves."),
Poll::Ready(Some(event)) => event,
};
match event {
PendingSessionEvent::Established {
session_id,
remote_addr,
local_addr,
peer_id,
capabilities,
conn,
status,
direction,
client_id,
} => {
// move from pending to established.
self.remove_pending_session(&session_id);
// If there's already a session to the peer then we disconnect right away
if self.active_sessions.contains_key(&peer_id) {
trace!(
target: "net::session",
?session_id,
?remote_addr,
?peer_id,
?direction,
"already connected"
);
self.spawn(async move {
// send a disconnect message
let _ =
conn.into_inner().disconnect(DisconnectReason::AlreadyConnected).await;
});
return Poll::Ready(SessionEvent::AlreadyConnected {
peer_id,
remote_addr,
direction,
})
}
let (commands_to_session, commands_rx) = mpsc::channel(self.session_command_buffer);
let (to_session_tx, messages_rx) = mpsc::channel(self.session_command_buffer);
let messages = PeerRequestSender::new(peer_id, to_session_tx);
let timeout = Arc::new(AtomicU64::new(
self.initial_internal_request_timeout.as_millis() as u64,
));
// negotiated version
let version = conn.version();
// Configure the interval at which the range information is updated, starting with
// ETH69
let range_update_interval = (conn.version() >= EthVersion::Eth69).then(|| {
let mut interval = tokio::time::interval(RANGE_UPDATE_INTERVAL);
interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);
interval
});
let session = ActiveSession {
next_id: 0,
remote_peer_id: peer_id,
remote_addr,
remote_capabilities: Arc::clone(&capabilities),
session_id,
commands_rx: ReceiverStream::new(commands_rx),
to_session_manager: self.active_session_tx.clone(),
pending_message_to_session: None,
internal_request_rx: ReceiverStream::new(messages_rx).fuse(),
inflight_requests: Default::default(),
conn,
queued_outgoing: QueuedOutgoingMessages::new(
self.metrics.queued_outgoing_messages.clone(),
),
received_requests_from_remote: Default::default(),
internal_request_timeout_interval: tokio::time::interval(
self.initial_internal_request_timeout,
),
internal_request_timeout: Arc::clone(&timeout),
protocol_breach_request_timeout: self.protocol_breach_request_timeout,
terminate_message: None,
range_info: None,
local_range_info: self.local_range_info.clone(),
range_update_interval,
};
self.spawn(session);
let client_version = client_id.into();
let handle = ActiveSessionHandle {
status: status.clone(),
direction,
session_id,
remote_id: peer_id,
version,
established: Instant::now(),
capabilities: Arc::clone(&capabilities),
commands_to_session,
client_version: Arc::clone(&client_version),
remote_addr,
local_addr,
};
self.active_sessions.insert(peer_id, handle);
self.counter.inc_active(&direction);
if direction.is_outgoing() {
self.metrics.total_dial_successes.increment(1);
}
Poll::Ready(SessionEvent::SessionEstablished {
peer_id,
remote_addr,
client_version,
version,
capabilities,
status,
messages,
direction,
timeout,
range_info: None,
})
}
PendingSessionEvent::Disconnected { remote_addr, session_id, direction, error } => {
trace!(
target: "net::session",
?session_id,
?remote_addr,
?error,
"disconnected pending session"
);
self.remove_pending_session(&session_id);
match direction {
Direction::Incoming => {
Poll::Ready(SessionEvent::IncomingPendingSessionClosed {
remote_addr,
error,
})
}
Direction::Outgoing(peer_id) => {
Poll::Ready(SessionEvent::OutgoingPendingSessionClosed {
remote_addr,
peer_id,
error,
})
}
}
}
PendingSessionEvent::OutgoingConnectionError {
remote_addr,
session_id,
peer_id,
error,
} => {
trace!(
target: "net::session",
%error,
?session_id,
?remote_addr,
?peer_id,
"connection refused"
);
self.remove_pending_session(&session_id);
Poll::Ready(SessionEvent::OutgoingConnectionError { remote_addr, peer_id, error })
}
PendingSessionEvent::EciesAuthError { remote_addr, session_id, error, direction } => {
trace!(
target: "net::session",
%error,
?session_id,
?remote_addr,
"ecies auth failed"
);
self.remove_pending_session(&session_id);
match direction {
Direction::Incoming => {
Poll::Ready(SessionEvent::IncomingPendingSessionClosed {
remote_addr,
error: Some(PendingSessionHandshakeError::Ecies(error)),
})
}
Direction::Outgoing(peer_id) => {
Poll::Ready(SessionEvent::OutgoingPendingSessionClosed {
remote_addr,
peer_id,
error: Some(PendingSessionHandshakeError::Ecies(error)),
})
}
}
}
}
}
/// Updates the advertised block range that this node can serve to other peers starting with
/// Eth69.
///
/// This method updates both the local status message that gets sent to peers during handshake
/// and the shared local range information that gets propagated to active sessions (Eth69).
/// The range information is used in ETH69 protocol where peers announce the range of blocks
/// they can serve to optimize data synchronization.
pub(crate) fn update_advertised_block_range(&mut self, block_range_update: BlockRangeUpdate) {
self.status.earliest_block = Some(block_range_update.earliest);
self.status.latest_block = Some(block_range_update.latest);
self.status.blockhash = block_range_update.latest_hash;
// Update the shared local range info that gets propagated to active sessions
self.local_range_info.update(
block_range_update.earliest,
block_range_update.latest,
block_range_update.latest_hash,
);
}
}
/// A counter for ongoing graceful disconnections attempts.
#[derive(Default, Debug, Clone)]
struct DisconnectionsCounter(Arc<()>);
impl DisconnectionsCounter {
const MAX_CONCURRENT_GRACEFUL_DISCONNECTIONS: usize = 15;
/// Returns true if the [`DisconnectionsCounter`] still has capacity
/// for an additional graceful disconnection.
fn has_capacity(&self) -> bool {
Arc::strong_count(&self.0) <= Self::MAX_CONCURRENT_GRACEFUL_DISCONNECTIONS
}
}
/// Events produced by the [`SessionManager`]
#[derive(Debug)]
pub enum SessionEvent<N: NetworkPrimitives> {
/// A new session was successfully authenticated.
///
/// This session is now able to exchange data.
SessionEstablished {
/// The remote node's public key
peer_id: PeerId,
/// The remote node's socket address
remote_addr: SocketAddr,
/// The user agent of the remote node, usually containing the client name and version
client_version: Arc<str>,
/// The capabilities the remote node has announced
capabilities: Arc<Capabilities>,
/// negotiated eth version
version: EthVersion,
/// The Status message the peer sent during the `eth` handshake
status: Arc<UnifiedStatus>,
/// The channel for sending messages to the peer with the session
messages: PeerRequestSender<PeerRequest<N>>,
/// The direction of the session, either `Inbound` or `Outgoing`
direction: Direction,
/// The maximum time that the session waits for a response from the peer before timing out
/// the connection
timeout: Arc<AtomicU64>,
/// The range info for the peer.
range_info: Option<BlockRangeInfo>,
},
/// The peer was already connected with another session.
AlreadyConnected {
/// The remote node's public key
peer_id: PeerId,
/// The remote node's socket address
remote_addr: SocketAddr,
/// The direction of the session, either `Inbound` or `Outgoing`
direction: Direction,
},
/// A session received a valid message via `RLPx`.
ValidMessage {
/// The remote node's public key
peer_id: PeerId,
/// Message received from the peer.
message: PeerMessage<N>,
},
/// Received a bad message from the peer.
BadMessage {
/// Identifier of the remote peer.
peer_id: PeerId,
},
/// Remote peer is considered in protocol violation
ProtocolBreach {
/// Identifier of the remote peer.
peer_id: PeerId,
},
/// Closed an incoming pending session during handshaking.
IncomingPendingSessionClosed {
/// The remote node's socket address
remote_addr: SocketAddr,
/// The pending handshake session error that caused the session to close
error: Option<PendingSessionHandshakeError>,
},
/// Closed an outgoing pending session during handshaking.
OutgoingPendingSessionClosed {
/// The remote node's socket address
remote_addr: SocketAddr,
/// The remote node's public key
peer_id: PeerId,
/// The pending handshake session error that caused the session to close
error: Option<PendingSessionHandshakeError>,
},
/// Failed to establish a tcp stream
OutgoingConnectionError {
/// The remote node's socket address
remote_addr: SocketAddr,
/// The remote node's public key
peer_id: PeerId,
/// The error that caused the outgoing connection to fail
error: io::Error,
},
/// Session was closed due to an error
SessionClosedOnConnectionError {
/// The id of the remote peer.
peer_id: PeerId,
/// The socket we were connected to.
remote_addr: SocketAddr,
/// The error that caused the session to close
error: EthStreamError,
},
/// Active session was gracefully disconnected.
Disconnected {
/// The remote node's public key
peer_id: PeerId,
/// The remote node's socket address that we were connected to
remote_addr: SocketAddr,
},
}
/// Errors that can occur during handshaking/authenticating the underlying streams.
#[derive(Debug, thiserror::Error)]
pub enum PendingSessionHandshakeError {
/// The pending session failed due to an error while establishing the `eth` stream
#[error(transparent)]
Eth(EthStreamError),
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | true |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network/src/session/counter.rs | crates/net/network/src/session/counter.rs | use super::ExceedsSessionLimit;
use reth_network_api::Direction;
use reth_network_types::SessionLimits;
/// Keeps track of all sessions.
#[derive(Debug, Clone)]
pub struct SessionCounter {
/// Limits to enforce.
limits: SessionLimits,
/// Number of pending incoming sessions.
pending_inbound: u32,
/// Number of pending outgoing sessions.
pending_outbound: u32,
/// Number of active inbound sessions.
active_inbound: u32,
/// Number of active outbound sessions.
active_outbound: u32,
}
// === impl SessionCounter ===
impl SessionCounter {
pub(crate) const fn new(limits: SessionLimits) -> Self {
Self {
limits,
pending_inbound: 0,
pending_outbound: 0,
active_inbound: 0,
active_outbound: 0,
}
}
pub(crate) const fn inc_pending_inbound(&mut self) {
self.pending_inbound += 1;
}
pub(crate) const fn inc_pending_outbound(&mut self) {
self.pending_outbound += 1;
}
pub(crate) const fn dec_pending(&mut self, direction: &Direction) {
match direction {
Direction::Outgoing(_) => {
self.pending_outbound -= 1;
}
Direction::Incoming => {
self.pending_inbound -= 1;
}
}
}
pub(crate) const fn inc_active(&mut self, direction: &Direction) {
match direction {
Direction::Outgoing(_) => {
self.active_outbound += 1;
}
Direction::Incoming => {
self.active_inbound += 1;
}
}
}
pub(crate) const fn dec_active(&mut self, direction: &Direction) {
match direction {
Direction::Outgoing(_) => {
self.active_outbound -= 1;
}
Direction::Incoming => {
self.active_inbound -= 1;
}
}
}
pub(crate) const fn ensure_pending_outbound(&self) -> Result<(), ExceedsSessionLimit> {
Self::ensure(self.pending_outbound, self.limits.max_pending_outbound)
}
pub(crate) const fn ensure_pending_inbound(&self) -> Result<(), ExceedsSessionLimit> {
Self::ensure(self.pending_inbound, self.limits.max_pending_inbound)
}
const fn ensure(current: u32, limit: Option<u32>) -> Result<(), ExceedsSessionLimit> {
if let Some(limit) = limit {
if current >= limit {
return Err(ExceedsSessionLimit(limit))
}
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_limits() {
let mut limits = SessionCounter::new(SessionLimits::default().with_max_pending_inbound(2));
assert!(limits.ensure_pending_outbound().is_ok());
limits.inc_pending_inbound();
assert!(limits.ensure_pending_inbound().is_ok());
limits.inc_pending_inbound();
assert!(limits.ensure_pending_inbound().is_err());
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network/src/session/active.rs | crates/net/network/src/session/active.rs | //! Represents an established session.
use core::sync::atomic::Ordering;
use std::{
collections::VecDeque,
future::Future,
net::SocketAddr,
pin::Pin,
sync::{atomic::AtomicU64, Arc},
task::{ready, Context, Poll},
time::{Duration, Instant},
};
use crate::{
message::{NewBlockMessage, PeerMessage, PeerResponse, PeerResponseResult},
session::{
conn::EthRlpxConnection,
handle::{ActiveSessionMessage, SessionCommand},
BlockRangeInfo, EthVersion, SessionId,
},
};
use alloy_primitives::Sealable;
use futures::{stream::Fuse, SinkExt, StreamExt};
use metrics::Gauge;
use reth_eth_wire::{
errors::{EthHandshakeError, EthStreamError},
message::{EthBroadcastMessage, MessageError, RequestPair},
Capabilities, DisconnectP2P, DisconnectReason, EthMessage, NetworkPrimitives, NewBlockPayload,
};
use reth_eth_wire_types::RawCapabilityMessage;
use reth_metrics::common::mpsc::MeteredPollSender;
use reth_network_api::PeerRequest;
use reth_network_p2p::error::RequestError;
use reth_network_peers::PeerId;
use reth_network_types::session::config::INITIAL_REQUEST_TIMEOUT;
use reth_primitives_traits::Block;
use rustc_hash::FxHashMap;
use tokio::{
sync::{mpsc::error::TrySendError, oneshot},
time::Interval,
};
use tokio_stream::wrappers::ReceiverStream;
use tokio_util::sync::PollSender;
use tracing::{debug, trace};
/// The recommended interval at which a new range update should be sent to the remote peer.
///
/// This is set to 120 seconds (2 minutes) as per the Ethereum specification for eth69.
pub(super) const RANGE_UPDATE_INTERVAL: Duration = Duration::from_secs(120);
// Constants for timeout updating.
/// Minimum timeout value
const MINIMUM_TIMEOUT: Duration = Duration::from_secs(2);
/// Maximum timeout value
const MAXIMUM_TIMEOUT: Duration = INITIAL_REQUEST_TIMEOUT;
/// How much the new measurements affect the current timeout (X percent)
const SAMPLE_IMPACT: f64 = 0.1;
/// Amount of RTTs before timeout
const TIMEOUT_SCALING: u32 = 3;
/// Restricts the number of queued outgoing messages for larger responses:
/// - Block Bodies
/// - Receipts
/// - Headers
/// - `PooledTransactions`
///
/// With proper softlimits in place (2MB) this targets 10MB (4+1 * 2MB) of outgoing response data.
///
/// This parameter serves as backpressure for reading additional requests from the remote.
/// Once we've queued up more responses than this, the session should prioritize message flushing
/// before reading any more messages from the remote peer, throttling the peer.
const MAX_QUEUED_OUTGOING_RESPONSES: usize = 4;
/// The type that advances an established session by listening for incoming messages (from local
/// node or read from connection) and emitting events back to the
/// [`SessionManager`](super::SessionManager).
///
/// It listens for
/// - incoming commands from the [`SessionManager`](super::SessionManager)
/// - incoming _internal_ requests/broadcasts via the request/command channel
/// - incoming requests/broadcasts _from remote_ via the connection
/// - responses for handled ETH requests received from the remote peer.
#[expect(dead_code)]
pub(crate) struct ActiveSession<N: NetworkPrimitives> {
/// Keeps track of request ids.
pub(crate) next_id: u64,
/// The underlying connection.
pub(crate) conn: EthRlpxConnection<N>,
/// Identifier of the node we're connected to.
pub(crate) remote_peer_id: PeerId,
/// The address we're connected to.
pub(crate) remote_addr: SocketAddr,
/// All capabilities the peer announced
pub(crate) remote_capabilities: Arc<Capabilities>,
/// Internal identifier of this session
pub(crate) session_id: SessionId,
/// Incoming commands from the manager
pub(crate) commands_rx: ReceiverStream<SessionCommand<N>>,
/// Sink to send messages to the [`SessionManager`](super::SessionManager).
pub(crate) to_session_manager: MeteredPollSender<ActiveSessionMessage<N>>,
/// A message that needs to be delivered to the session manager
pub(crate) pending_message_to_session: Option<ActiveSessionMessage<N>>,
/// Incoming internal requests which are delegated to the remote peer.
pub(crate) internal_request_rx: Fuse<ReceiverStream<PeerRequest<N>>>,
/// All requests sent to the remote peer we're waiting on a response
pub(crate) inflight_requests: FxHashMap<u64, InflightRequest<PeerRequest<N>>>,
/// All requests that were sent by the remote peer and we're waiting on an internal response
pub(crate) received_requests_from_remote: Vec<ReceivedRequest<N>>,
/// Buffered messages that should be handled and sent to the peer.
pub(crate) queued_outgoing: QueuedOutgoingMessages<N>,
/// The maximum time we wait for a response from a peer.
pub(crate) internal_request_timeout: Arc<AtomicU64>,
/// Interval when to check for timed out requests.
pub(crate) internal_request_timeout_interval: Interval,
/// If an [`ActiveSession`] does not receive a response at all within this duration then it is
/// considered a protocol violation and the session will initiate a drop.
pub(crate) protocol_breach_request_timeout: Duration,
/// Used to reserve a slot to guarantee that the termination message is delivered
pub(crate) terminate_message:
Option<(PollSender<ActiveSessionMessage<N>>, ActiveSessionMessage<N>)>,
/// The eth69 range info for the remote peer.
pub(crate) range_info: Option<BlockRangeInfo>,
/// The eth69 range info for the local node (this node).
/// This represents the range of blocks that this node can serve to other peers.
pub(crate) local_range_info: BlockRangeInfo,
/// Optional interval for sending periodic range updates to the remote peer (eth69+)
/// Recommended frequency is ~2 minutes per spec
pub(crate) range_update_interval: Option<Interval>,
}
impl<N: NetworkPrimitives> ActiveSession<N> {
/// Returns `true` if the session is currently in the process of disconnecting
fn is_disconnecting(&self) -> bool {
self.conn.inner().is_disconnecting()
}
/// Returns the next request id
const fn next_id(&mut self) -> u64 {
let id = self.next_id;
self.next_id += 1;
id
}
/// Shrinks the capacity of the internal buffers.
pub fn shrink_to_fit(&mut self) {
self.received_requests_from_remote.shrink_to_fit();
self.queued_outgoing.shrink_to_fit();
}
/// Returns how many responses we've currently queued up.
fn queued_response_count(&self) -> usize {
self.queued_outgoing.messages.iter().filter(|m| m.is_response()).count()
}
/// Handle a message read from the connection.
///
/// Returns an error if the message is considered to be in violation of the protocol.
fn on_incoming_message(&mut self, msg: EthMessage<N>) -> OnIncomingMessageOutcome<N> {
/// A macro that handles an incoming request
/// This creates a new channel and tries to send the sender half to the session while
/// storing the receiver half internally so the pending response can be polled.
macro_rules! on_request {
($req:ident, $resp_item:ident, $req_item:ident) => {{
let RequestPair { request_id, message: request } = $req;
let (tx, response) = oneshot::channel();
let received = ReceivedRequest {
request_id,
rx: PeerResponse::$resp_item { response },
received: Instant::now(),
};
self.received_requests_from_remote.push(received);
self.try_emit_request(PeerMessage::EthRequest(PeerRequest::$req_item {
request,
response: tx,
}))
.into()
}};
}
/// Processes a response received from the peer
macro_rules! on_response {
($resp:ident, $item:ident) => {{
let RequestPair { request_id, message } = $resp;
if let Some(req) = self.inflight_requests.remove(&request_id) {
match req.request {
RequestState::Waiting(PeerRequest::$item { response, .. }) => {
trace!(peer_id=?self.remote_peer_id, ?request_id, "received response from peer");
let _ = response.send(Ok(message));
self.update_request_timeout(req.timestamp, Instant::now());
}
RequestState::Waiting(request) => {
request.send_bad_response();
}
RequestState::TimedOut => {
// request was already timed out internally
self.update_request_timeout(req.timestamp, Instant::now());
}
}
} else {
trace!(peer_id=?self.remote_peer_id, ?request_id, "received response to unknown request");
// we received a response to a request we never sent
self.on_bad_message();
}
OnIncomingMessageOutcome::Ok
}};
}
match msg {
message @ EthMessage::Status(_) => OnIncomingMessageOutcome::BadMessage {
error: EthStreamError::EthHandshakeError(EthHandshakeError::StatusNotInHandshake),
message,
},
EthMessage::NewBlockHashes(msg) => {
self.try_emit_broadcast(PeerMessage::NewBlockHashes(msg)).into()
}
EthMessage::NewBlock(msg) => {
let block = NewBlockMessage {
hash: msg.block().header().hash_slow(),
block: Arc::new(*msg),
};
self.try_emit_broadcast(PeerMessage::NewBlock(block)).into()
}
EthMessage::Transactions(msg) => {
self.try_emit_broadcast(PeerMessage::ReceivedTransaction(msg)).into()
}
EthMessage::NewPooledTransactionHashes66(msg) => {
self.try_emit_broadcast(PeerMessage::PooledTransactions(msg.into())).into()
}
EthMessage::NewPooledTransactionHashes68(msg) => {
if msg.hashes.len() != msg.types.len() || msg.hashes.len() != msg.sizes.len() {
return OnIncomingMessageOutcome::BadMessage {
error: EthStreamError::TransactionHashesInvalidLenOfFields {
hashes_len: msg.hashes.len(),
types_len: msg.types.len(),
sizes_len: msg.sizes.len(),
},
message: EthMessage::NewPooledTransactionHashes68(msg),
}
}
self.try_emit_broadcast(PeerMessage::PooledTransactions(msg.into())).into()
}
EthMessage::GetBlockHeaders(req) => {
on_request!(req, BlockHeaders, GetBlockHeaders)
}
EthMessage::BlockHeaders(resp) => {
on_response!(resp, GetBlockHeaders)
}
EthMessage::GetBlockBodies(req) => {
on_request!(req, BlockBodies, GetBlockBodies)
}
EthMessage::BlockBodies(resp) => {
on_response!(resp, GetBlockBodies)
}
EthMessage::GetPooledTransactions(req) => {
on_request!(req, PooledTransactions, GetPooledTransactions)
}
EthMessage::PooledTransactions(resp) => {
on_response!(resp, GetPooledTransactions)
}
EthMessage::GetNodeData(req) => {
// GetNodeData is disabled to prevent privacy leaks - treat as bad message
return OnIncomingMessageOutcome::BadMessage {
error: EthStreamError::InvalidMessage(MessageError::Other(
"GetNodeData message is disabled to prevent privacy leaks".to_string(),
)),
message: EthMessage::GetNodeData(req),
}
}
EthMessage::NodeData(resp) => {
// NodeData is disabled to prevent privacy leaks - treat as bad message
return OnIncomingMessageOutcome::BadMessage {
error: EthStreamError::InvalidMessage(MessageError::Other(
"NodeData message is disabled to prevent privacy leaks".to_string(),
)),
message: EthMessage::NodeData(resp),
}
}
EthMessage::GetReceipts(req) => {
if self.conn.version() >= EthVersion::Eth69 {
on_request!(req, Receipts69, GetReceipts69)
} else {
on_request!(req, Receipts, GetReceipts)
}
}
EthMessage::Receipts(resp) => {
on_response!(resp, GetReceipts)
}
EthMessage::Receipts69(resp) => {
on_response!(resp, GetReceipts69)
}
EthMessage::BlockRangeUpdate(msg) => {
// Validate that earliest <= latest according to the spec
if msg.earliest > msg.latest {
return OnIncomingMessageOutcome::BadMessage {
error: EthStreamError::InvalidMessage(MessageError::Other(format!(
"invalid block range: earliest ({}) > latest ({})",
msg.earliest, msg.latest
))),
message: EthMessage::BlockRangeUpdate(msg),
};
}
if let Some(range_info) = self.range_info.as_ref() {
range_info.update(msg.earliest, msg.latest, msg.latest_hash);
}
OnIncomingMessageOutcome::Ok
}
EthMessage::Other(bytes) => self.try_emit_broadcast(PeerMessage::Other(bytes)).into(),
}
}
/// Handle an internal peer request that will be sent to the remote.
fn on_internal_peer_request(&mut self, request: PeerRequest<N>, deadline: Instant) {
let request_id = self.next_id();
trace!(?request, peer_id=?self.remote_peer_id, ?request_id, "sending request to peer");
let msg = request.create_request_message(request_id);
self.queued_outgoing.push_back(msg.into());
let req = InflightRequest {
request: RequestState::Waiting(request),
timestamp: Instant::now(),
deadline,
};
self.inflight_requests.insert(request_id, req);
}
/// Handle a message received from the internal network
fn on_internal_peer_message(&mut self, msg: PeerMessage<N>) {
match msg {
PeerMessage::NewBlockHashes(msg) => {
self.queued_outgoing.push_back(EthMessage::NewBlockHashes(msg).into());
}
PeerMessage::NewBlock(msg) => {
self.queued_outgoing.push_back(EthBroadcastMessage::NewBlock(msg.block).into());
}
PeerMessage::PooledTransactions(msg) => {
if msg.is_valid_for_version(self.conn.version()) {
self.queued_outgoing.push_back(EthMessage::from(msg).into());
} else {
debug!(target: "net", ?msg, version=?self.conn.version(), "Message is invalid for connection version, skipping");
}
}
PeerMessage::EthRequest(req) => {
let deadline = self.request_deadline();
self.on_internal_peer_request(req, deadline);
}
PeerMessage::SendTransactions(msg) => {
self.queued_outgoing.push_back(EthBroadcastMessage::Transactions(msg).into());
}
PeerMessage::BlockRangeUpdated(_) => {}
PeerMessage::ReceivedTransaction(_) => {
unreachable!("Not emitted by network")
}
PeerMessage::Other(other) => {
self.queued_outgoing.push_back(OutgoingMessage::Raw(other));
}
}
}
/// Returns the deadline timestamp at which the request times out
fn request_deadline(&self) -> Instant {
Instant::now() +
Duration::from_millis(self.internal_request_timeout.load(Ordering::Relaxed))
}
/// Handle a Response to the peer
///
/// This will queue the response to be sent to the peer
fn handle_outgoing_response(&mut self, id: u64, resp: PeerResponseResult<N>) {
match resp.try_into_message(id) {
Ok(msg) => {
self.queued_outgoing.push_back(msg.into());
}
Err(err) => {
debug!(target: "net", %err, "Failed to respond to received request");
}
}
}
/// Send a message back to the [`SessionManager`](super::SessionManager).
///
/// Returns the message if the bounded channel is currently unable to handle this message.
#[expect(clippy::result_large_err)]
fn try_emit_broadcast(&self, message: PeerMessage<N>) -> Result<(), ActiveSessionMessage<N>> {
let Some(sender) = self.to_session_manager.inner().get_ref() else { return Ok(()) };
match sender
.try_send(ActiveSessionMessage::ValidMessage { peer_id: self.remote_peer_id, message })
{
Ok(_) => Ok(()),
Err(err) => {
trace!(
target: "net",
%err,
"no capacity for incoming broadcast",
);
match err {
TrySendError::Full(msg) => Err(msg),
TrySendError::Closed(_) => Ok(()),
}
}
}
}
/// Send a message back to the [`SessionManager`](super::SessionManager)
/// covering both broadcasts and incoming requests.
///
/// Returns the message if the bounded channel is currently unable to handle this message.
#[expect(clippy::result_large_err)]
fn try_emit_request(&self, message: PeerMessage<N>) -> Result<(), ActiveSessionMessage<N>> {
let Some(sender) = self.to_session_manager.inner().get_ref() else { return Ok(()) };
match sender
.try_send(ActiveSessionMessage::ValidMessage { peer_id: self.remote_peer_id, message })
{
Ok(_) => Ok(()),
Err(err) => {
trace!(
target: "net",
%err,
"no capacity for incoming request",
);
match err {
TrySendError::Full(msg) => Err(msg),
TrySendError::Closed(_) => {
// Note: this would mean the `SessionManager` was dropped, which is already
// handled by checking if the command receiver channel has been closed.
Ok(())
}
}
}
}
}
/// Notify the manager that the peer sent a bad message
fn on_bad_message(&self) {
let Some(sender) = self.to_session_manager.inner().get_ref() else { return };
let _ = sender.try_send(ActiveSessionMessage::BadMessage { peer_id: self.remote_peer_id });
}
/// Report back that this session has been closed.
fn emit_disconnect(&mut self, cx: &mut Context<'_>) -> Poll<()> {
trace!(target: "net::session", remote_peer_id=?self.remote_peer_id, "emitting disconnect");
let msg = ActiveSessionMessage::Disconnected {
peer_id: self.remote_peer_id,
remote_addr: self.remote_addr,
};
self.terminate_message = Some((self.to_session_manager.inner().clone(), msg));
self.poll_terminate_message(cx).expect("message is set")
}
/// Report back that this session has been closed due to an error
fn close_on_error(&mut self, error: EthStreamError, cx: &mut Context<'_>) -> Poll<()> {
let msg = ActiveSessionMessage::ClosedOnConnectionError {
peer_id: self.remote_peer_id,
remote_addr: self.remote_addr,
error,
};
self.terminate_message = Some((self.to_session_manager.inner().clone(), msg));
self.poll_terminate_message(cx).expect("message is set")
}
/// Starts the disconnect process
fn start_disconnect(&mut self, reason: DisconnectReason) -> Result<(), EthStreamError> {
Ok(self.conn.inner_mut().start_disconnect(reason)?)
}
/// Flushes the disconnect message and emits the corresponding message
fn poll_disconnect(&mut self, cx: &mut Context<'_>) -> Poll<()> {
debug_assert!(self.is_disconnecting(), "not disconnecting");
// try to close the flush out the remaining Disconnect message
let _ = ready!(self.conn.poll_close_unpin(cx));
self.emit_disconnect(cx)
}
/// Attempts to disconnect by sending the given disconnect reason
fn try_disconnect(&mut self, reason: DisconnectReason, cx: &mut Context<'_>) -> Poll<()> {
match self.start_disconnect(reason) {
Ok(()) => {
// we're done
self.poll_disconnect(cx)
}
Err(err) => {
debug!(target: "net::session", %err, remote_peer_id=?self.remote_peer_id, "could not send disconnect");
self.close_on_error(err, cx)
}
}
}
/// Checks for _internally_ timed out requests.
///
/// If a requests misses its deadline, then it is timed out internally.
/// If a request misses the `protocol_breach_request_timeout` then this session is considered in
/// protocol violation and will close.
///
/// Returns `true` if a peer missed the `protocol_breach_request_timeout`, in which case the
/// session should be terminated.
#[must_use]
fn check_timed_out_requests(&mut self, now: Instant) -> bool {
for (id, req) in &mut self.inflight_requests {
if req.is_timed_out(now) {
if req.is_waiting() {
debug!(target: "net::session", ?id, remote_peer_id=?self.remote_peer_id, "timed out outgoing request");
req.timeout();
} else if now - req.timestamp > self.protocol_breach_request_timeout {
return true
}
}
}
false
}
/// Updates the request timeout with a request's timestamps
fn update_request_timeout(&mut self, sent: Instant, received: Instant) {
let elapsed = received.saturating_duration_since(sent);
let current = Duration::from_millis(self.internal_request_timeout.load(Ordering::Relaxed));
let request_timeout = calculate_new_timeout(current, elapsed);
self.internal_request_timeout.store(request_timeout.as_millis() as u64, Ordering::Relaxed);
self.internal_request_timeout_interval = tokio::time::interval(request_timeout);
}
/// If a termination message is queued this will try to send it
fn poll_terminate_message(&mut self, cx: &mut Context<'_>) -> Option<Poll<()>> {
let (mut tx, msg) = self.terminate_message.take()?;
match tx.poll_reserve(cx) {
Poll::Pending => {
self.terminate_message = Some((tx, msg));
return Some(Poll::Pending)
}
Poll::Ready(Ok(())) => {
let _ = tx.send_item(msg);
}
Poll::Ready(Err(_)) => {
// channel closed
}
}
// terminate the task
Some(Poll::Ready(()))
}
}
impl<N: NetworkPrimitives> Future for ActiveSession<N> {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.get_mut();
// if the session is terminate we have to send the termination message before we can close
if let Some(terminate) = this.poll_terminate_message(cx) {
return terminate
}
if this.is_disconnecting() {
return this.poll_disconnect(cx)
}
// The receive loop can be CPU intensive since it involves message decoding which could take
// up a lot of resources and increase latencies for other sessions if not yielded manually.
// If the budget is exhausted we manually yield back control to the (coop) scheduler. This
// manual yield point should prevent situations where polling appears to be frozen. See also <https://tokio.rs/blog/2020-04-preemption>
// And tokio's docs on cooperative scheduling <https://docs.rs/tokio/latest/tokio/task/#cooperative-scheduling>
let mut budget = 4;
// The main poll loop that drives the session
'main: loop {
let mut progress = false;
// we prioritize incoming commands sent from the session manager
loop {
match this.commands_rx.poll_next_unpin(cx) {
Poll::Pending => break,
Poll::Ready(None) => {
// this is only possible when the manager was dropped, in which case we also
// terminate this session
return Poll::Ready(())
}
Poll::Ready(Some(cmd)) => {
progress = true;
match cmd {
SessionCommand::Disconnect { reason } => {
debug!(
target: "net::session",
?reason,
remote_peer_id=?this.remote_peer_id,
"Received disconnect command for session"
);
let reason =
reason.unwrap_or(DisconnectReason::DisconnectRequested);
return this.try_disconnect(reason, cx)
}
SessionCommand::Message(msg) => {
this.on_internal_peer_message(msg);
}
}
}
}
}
let deadline = this.request_deadline();
while let Poll::Ready(Some(req)) = this.internal_request_rx.poll_next_unpin(cx) {
progress = true;
this.on_internal_peer_request(req, deadline);
}
// Advance all active requests.
// We remove each request one by one and add them back.
for idx in (0..this.received_requests_from_remote.len()).rev() {
let mut req = this.received_requests_from_remote.swap_remove(idx);
match req.rx.poll(cx) {
Poll::Pending => {
// not ready yet
this.received_requests_from_remote.push(req);
}
Poll::Ready(resp) => {
this.handle_outgoing_response(req.request_id, resp);
}
}
}
// Send messages by advancing the sink and queuing in buffered messages
while this.conn.poll_ready_unpin(cx).is_ready() {
if let Some(msg) = this.queued_outgoing.pop_front() {
progress = true;
let res = match msg {
OutgoingMessage::Eth(msg) => this.conn.start_send_unpin(msg),
OutgoingMessage::Broadcast(msg) => this.conn.start_send_broadcast(msg),
OutgoingMessage::Raw(msg) => this.conn.start_send_raw(msg),
};
if let Err(err) = res {
debug!(target: "net::session", %err, remote_peer_id=?this.remote_peer_id, "failed to send message");
// notify the manager
return this.close_on_error(err, cx)
}
} else {
// no more messages to send over the wire
break
}
}
// read incoming messages from the wire
'receive: loop {
// ensure we still have enough budget for another iteration
budget -= 1;
if budget == 0 {
// make sure we're woken up again
cx.waker().wake_by_ref();
break 'main
}
// try to resend the pending message that we could not send because the channel was
// full. [`PollSender`] will ensure that we're woken up again when the channel is
// ready to receive the message, and will only error if the channel is closed.
if let Some(msg) = this.pending_message_to_session.take() {
match this.to_session_manager.poll_reserve(cx) {
Poll::Ready(Ok(_)) => {
let _ = this.to_session_manager.send_item(msg);
}
Poll::Ready(Err(_)) => return Poll::Ready(()),
Poll::Pending => {
this.pending_message_to_session = Some(msg);
break 'receive
}
};
}
// check whether we should throttle incoming messages
if this.received_requests_from_remote.len() > MAX_QUEUED_OUTGOING_RESPONSES {
// we're currently waiting for the responses to the peer's requests which aren't
// queued as outgoing yet
//
// Note: we don't need to register the waker here because we polled the requests
// above
break 'receive
}
// we also need to check if we have multiple responses queued up
if this.queued_outgoing.messages.len() > MAX_QUEUED_OUTGOING_RESPONSES &&
this.queued_response_count() > MAX_QUEUED_OUTGOING_RESPONSES
{
// if we've queued up more responses than allowed, we don't poll for new
// messages and break the receive loop early
//
// Note: we don't need to register the waker here because we still have
// queued messages and the sink impl registered the waker because we've
// already advanced it to `Pending` earlier
break 'receive
}
match this.conn.poll_next_unpin(cx) {
Poll::Pending => break,
Poll::Ready(None) => {
if this.is_disconnecting() {
break
}
debug!(target: "net::session", remote_peer_id=?this.remote_peer_id, "eth stream completed");
return this.emit_disconnect(cx)
}
Poll::Ready(Some(res)) => {
match res {
Ok(msg) => {
trace!(target: "net::session", msg_id=?msg.message_id(), remote_peer_id=?this.remote_peer_id, "received eth message");
// decode and handle message
match this.on_incoming_message(msg) {
OnIncomingMessageOutcome::Ok => {
// handled successfully
progress = true;
}
OnIncomingMessageOutcome::BadMessage { error, message } => {
debug!(target: "net::session", %error, msg=?message, remote_peer_id=?this.remote_peer_id, "received invalid protocol message");
return this.close_on_error(error, cx)
}
OnIncomingMessageOutcome::NoCapacity(msg) => {
// failed to send due to lack of capacity
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | true |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network/src/session/handle.rs | crates/net/network/src/session/handle.rs | //! Session handles.
use crate::{
message::PeerMessage,
session::{conn::EthRlpxConnection, Direction, SessionId},
PendingSessionHandshakeError,
};
use reth_ecies::ECIESError;
use reth_eth_wire::{
errors::EthStreamError, Capabilities, DisconnectReason, EthVersion, NetworkPrimitives,
UnifiedStatus,
};
use reth_network_api::PeerInfo;
use reth_network_peers::{NodeRecord, PeerId};
use reth_network_types::PeerKind;
use std::{io, net::SocketAddr, sync::Arc, time::Instant};
use tokio::sync::{
mpsc::{self, error::SendError},
oneshot,
};
/// A handler attached to a peer session that's not authenticated yet, pending Handshake and hello
/// message which exchanges the `capabilities` of the peer.
///
/// This session needs to wait until it is authenticated.
#[derive(Debug)]
pub struct PendingSessionHandle {
/// Can be used to tell the session to disconnect the connection/abort the handshake process.
pub(crate) disconnect_tx: Option<oneshot::Sender<()>>,
/// The direction of the session
pub(crate) direction: Direction,
}
// === impl PendingSessionHandle ===
impl PendingSessionHandle {
/// Sends a disconnect command to the pending session.
pub fn disconnect(&mut self) {
if let Some(tx) = self.disconnect_tx.take() {
let _ = tx.send(());
}
}
/// Returns the direction of the pending session (inbound or outbound).
pub const fn direction(&self) -> Direction {
self.direction
}
}
/// An established session with a remote peer.
///
/// Within an active session that supports the `Ethereum Wire Protocol`, three high-level tasks can
/// be performed: chain synchronization, block propagation and transaction exchange.
#[derive(Debug)]
pub struct ActiveSessionHandle<N: NetworkPrimitives> {
/// The direction of the session
pub(crate) direction: Direction,
/// The assigned id for this session
pub(crate) session_id: SessionId,
/// negotiated eth version
pub(crate) version: EthVersion,
/// The identifier of the remote peer
pub(crate) remote_id: PeerId,
/// The timestamp when the session has been established.
pub(crate) established: Instant,
/// Announced capabilities of the peer.
pub(crate) capabilities: Arc<Capabilities>,
/// Sender half of the command channel used send commands _to_ the spawned session
pub(crate) commands_to_session: mpsc::Sender<SessionCommand<N>>,
/// The client's name and version
pub(crate) client_version: Arc<str>,
/// The address we're connected to
pub(crate) remote_addr: SocketAddr,
/// The local address of the connection.
pub(crate) local_addr: Option<SocketAddr>,
/// The Status message the peer sent for the `eth` handshake
pub(crate) status: Arc<UnifiedStatus>,
}
// === impl ActiveSessionHandle ===
impl<N: NetworkPrimitives> ActiveSessionHandle<N> {
/// Sends a disconnect command to the session.
pub fn disconnect(&self, reason: Option<DisconnectReason>) {
// Note: we clone the sender which ensures the channel has capacity to send the message
let _ = self.commands_to_session.clone().try_send(SessionCommand::Disconnect { reason });
}
/// Sends a disconnect command to the session, awaiting the command channel for available
/// capacity.
pub async fn try_disconnect(
&self,
reason: Option<DisconnectReason>,
) -> Result<(), SendError<SessionCommand<N>>> {
self.commands_to_session.clone().send(SessionCommand::Disconnect { reason }).await
}
/// Returns the direction of the active session (inbound or outbound).
pub const fn direction(&self) -> Direction {
self.direction
}
/// Returns the assigned session id for this session.
pub const fn session_id(&self) -> SessionId {
self.session_id
}
/// Returns the negotiated eth version for this session.
pub const fn version(&self) -> EthVersion {
self.version
}
/// Returns the identifier of the remote peer.
pub const fn remote_id(&self) -> PeerId {
self.remote_id
}
/// Returns the timestamp when the session has been established.
pub const fn established(&self) -> Instant {
self.established
}
/// Returns the announced capabilities of the peer.
pub fn capabilities(&self) -> Arc<Capabilities> {
self.capabilities.clone()
}
/// Returns the client's name and version.
pub fn client_version(&self) -> Arc<str> {
self.client_version.clone()
}
/// Returns the address we're connected to.
pub const fn remote_addr(&self) -> SocketAddr {
self.remote_addr
}
/// Extracts the [`PeerInfo`] from the session handle.
pub(crate) fn peer_info(&self, record: &NodeRecord, kind: PeerKind) -> PeerInfo {
PeerInfo {
remote_id: self.remote_id,
direction: self.direction,
enode: record.to_string(),
enr: None,
remote_addr: self.remote_addr,
local_addr: self.local_addr,
capabilities: self.capabilities.clone(),
client_version: self.client_version.clone(),
eth_version: self.version,
status: self.status.clone(),
session_established: self.established,
kind,
}
}
}
/// Events a pending session can produce.
///
/// This represents the state changes a session can undergo until it is ready to send capability messages <https://github.com/ethereum/devp2p/blob/6b0abc3d956a626c28dce1307ee9f546db17b6bd/rlpx.md>.
///
/// A session starts with a `Handshake`, followed by a `Hello` message which
#[derive(Debug)]
pub enum PendingSessionEvent<N: NetworkPrimitives> {
/// Represents a successful `Hello` and `Status` exchange: <https://github.com/ethereum/devp2p/blob/6b0abc3d956a626c28dce1307ee9f546db17b6bd/rlpx.md#hello-0x00>
Established {
/// An internal identifier for the established session
session_id: SessionId,
/// The remote node's socket address
remote_addr: SocketAddr,
/// The local address of the connection
local_addr: Option<SocketAddr>,
/// The remote node's public key
peer_id: PeerId,
/// All capabilities the peer announced
capabilities: Arc<Capabilities>,
/// The Status message the peer sent for the `eth` handshake
status: Arc<UnifiedStatus>,
/// The actual connection stream which can be used to send and receive `eth` protocol
/// messages
conn: EthRlpxConnection<N>,
/// The direction of the session, either `Inbound` or `Outgoing`
direction: Direction,
/// The remote node's user agent, usually containing the client name and version
client_id: String,
},
/// Handshake unsuccessful, session was disconnected.
Disconnected {
/// The remote node's socket address
remote_addr: SocketAddr,
/// The internal identifier for the disconnected session
session_id: SessionId,
/// The direction of the session, either `Inbound` or `Outgoing`
direction: Direction,
/// The error that caused the disconnect
error: Option<PendingSessionHandshakeError>,
},
/// Thrown when unable to establish a [`TcpStream`](tokio::net::TcpStream).
OutgoingConnectionError {
/// The remote node's socket address
remote_addr: SocketAddr,
/// The internal identifier for the disconnected session
session_id: SessionId,
/// The remote node's public key
peer_id: PeerId,
/// The error that caused the outgoing connection failure
error: io::Error,
},
/// Thrown when authentication via ECIES failed.
EciesAuthError {
/// The remote node's socket address
remote_addr: SocketAddr,
/// The internal identifier for the disconnected session
session_id: SessionId,
/// The error that caused the ECIES session to fail
error: ECIESError,
/// The direction of the session, either `Inbound` or `Outgoing`
direction: Direction,
},
}
/// Commands that can be sent to the spawned session.
#[derive(Debug)]
pub enum SessionCommand<N: NetworkPrimitives> {
/// Disconnect the connection
Disconnect {
/// Why the disconnect was initiated
reason: Option<DisconnectReason>,
},
/// Sends a message to the peer
Message(PeerMessage<N>),
}
/// Message variants an active session can produce and send back to the
/// [`SessionManager`](crate::session::SessionManager)
#[derive(Debug)]
pub enum ActiveSessionMessage<N: NetworkPrimitives> {
/// Session was gracefully disconnected.
Disconnected {
/// The remote node's public key
peer_id: PeerId,
/// The remote node's socket address
remote_addr: SocketAddr,
},
/// Session was closed due an error
ClosedOnConnectionError {
/// The remote node's public key
peer_id: PeerId,
/// The remote node's socket address
remote_addr: SocketAddr,
/// The error that caused the session to close
error: EthStreamError,
},
/// A session received a valid message via `RLPx`.
ValidMessage {
/// Identifier of the remote peer.
peer_id: PeerId,
/// Message received from the peer.
message: PeerMessage<N>,
},
/// Received a bad message from the peer.
BadMessage {
/// Identifier of the remote peer.
peer_id: PeerId,
},
/// Remote peer is considered in protocol violation
ProtocolBreach {
/// Identifier of the remote peer.
peer_id: PeerId,
},
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network/src/transactions/config.rs | crates/net/network/src/transactions/config.rs | use std::{fmt::Debug, marker::PhantomData, str::FromStr};
use super::{
PeerMetadata, DEFAULT_MAX_COUNT_TRANSACTIONS_SEEN_BY_PEER,
DEFAULT_SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESP_ON_PACK_GET_POOLED_TRANSACTIONS_REQ,
SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE,
};
use crate::transactions::constants::tx_fetcher::{
DEFAULT_MAX_CAPACITY_CACHE_PENDING_FETCH, DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS,
DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS_PER_PEER,
};
use alloy_primitives::B256;
use derive_more::{Constructor, Display};
use reth_eth_wire::NetworkPrimitives;
use reth_ethereum_primitives::TxType;
/// Configuration for managing transactions within the network.
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct TransactionsManagerConfig {
/// Configuration for fetching transactions.
pub transaction_fetcher_config: TransactionFetcherConfig,
/// Max number of seen transactions to store for each peer.
pub max_transactions_seen_by_peer_history: u32,
/// How new pending transactions are propagated.
#[cfg_attr(feature = "serde", serde(default))]
pub propagation_mode: TransactionPropagationMode,
}
impl Default for TransactionsManagerConfig {
fn default() -> Self {
Self {
transaction_fetcher_config: TransactionFetcherConfig::default(),
max_transactions_seen_by_peer_history: DEFAULT_MAX_COUNT_TRANSACTIONS_SEEN_BY_PEER,
propagation_mode: TransactionPropagationMode::default(),
}
}
}
/// Determines how new pending transactions are propagated to other peers in full.
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum TransactionPropagationMode {
/// Send full transactions to sqrt of current peers.
#[default]
Sqrt,
/// Always send transactions in full.
All,
/// Send full transactions to a maximum number of peers
Max(usize),
}
impl TransactionPropagationMode {
/// Returns the number of peers full transactions should be propagated to.
pub(crate) fn full_peer_count(&self, peer_count: usize) -> usize {
match self {
Self::Sqrt => (peer_count as f64).sqrt().round() as usize,
Self::All => peer_count,
Self::Max(max) => peer_count.min(*max),
}
}
}
impl FromStr for TransactionPropagationMode {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let s = s.to_lowercase();
match s.as_str() {
"sqrt" => Ok(Self::Sqrt),
"all" => Ok(Self::All),
s => {
if let Some(num) = s.strip_prefix("max:") {
num.parse::<usize>()
.map(TransactionPropagationMode::Max)
.map_err(|_| format!("Invalid number for Max variant: {num}"))
} else {
Err(format!("Invalid transaction propagation mode: {s}"))
}
}
}
}
}
/// Configuration for fetching transactions.
#[derive(Debug, Constructor, Clone)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct TransactionFetcherConfig {
/// Max inflight [`GetPooledTransactions`](reth_eth_wire::GetPooledTransactions) requests.
pub max_inflight_requests: u32,
/// Max inflight [`GetPooledTransactions`](reth_eth_wire::GetPooledTransactions) requests per
/// peer.
pub max_inflight_requests_per_peer: u8,
/// Soft limit for the byte size of a
/// [`PooledTransactions`](reth_eth_wire::PooledTransactions) response on assembling a
/// [`GetPooledTransactions`](reth_eth_wire::GetPooledTransactions) request. Spec'd at 2
/// MiB.
pub soft_limit_byte_size_pooled_transactions_response: usize,
/// Soft limit for the byte size of the expected
/// [`PooledTransactions`](reth_eth_wire::PooledTransactions) response on packing a
/// [`GetPooledTransactions`](reth_eth_wire::GetPooledTransactions) request with hashes.
pub soft_limit_byte_size_pooled_transactions_response_on_pack_request: usize,
/// Max capacity of the cache of transaction hashes, for transactions that weren't yet fetched.
/// A transaction is pending fetch if its hash didn't fit into a
/// [`GetPooledTransactions`](reth_eth_wire::GetPooledTransactions) yet, or it wasn't returned
/// upon request to peers.
pub max_capacity_cache_txns_pending_fetch: u32,
}
impl Default for TransactionFetcherConfig {
fn default() -> Self {
Self {
max_inflight_requests: DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS,
max_inflight_requests_per_peer: DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS_PER_PEER,
soft_limit_byte_size_pooled_transactions_response:
SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE,
soft_limit_byte_size_pooled_transactions_response_on_pack_request:
DEFAULT_SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESP_ON_PACK_GET_POOLED_TRANSACTIONS_REQ,
max_capacity_cache_txns_pending_fetch: DEFAULT_MAX_CAPACITY_CACHE_PENDING_FETCH,
}
}
}
/// A policy defining which peers pending transactions are gossiped to.
pub trait TransactionPropagationPolicy: Send + Sync + Unpin + 'static {
/// Filter a given peer based on the policy.
///
/// This determines whether transactions can be propagated to this peer.
fn can_propagate<N: NetworkPrimitives>(&self, peer: &mut PeerMetadata<N>) -> bool;
/// A callback on the policy when a new peer session is established.
fn on_session_established<N: NetworkPrimitives>(&mut self, peer: &mut PeerMetadata<N>);
/// A callback on the policy when a peer session is closed.
fn on_session_closed<N: NetworkPrimitives>(&mut self, peer: &mut PeerMetadata<N>);
}
/// Determines which peers pending transactions are propagated to.
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Display)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum TransactionPropagationKind {
/// Propagate transactions to all peers.
///
/// No restrictions
#[default]
All,
/// Propagate transactions to only trusted peers.
Trusted,
/// Do not propagate transactions
None,
}
impl TransactionPropagationPolicy for TransactionPropagationKind {
fn can_propagate<N: NetworkPrimitives>(&self, peer: &mut PeerMetadata<N>) -> bool {
match self {
Self::All => true,
Self::Trusted => peer.peer_kind.is_trusted(),
Self::None => false,
}
}
fn on_session_established<N: NetworkPrimitives>(&mut self, _peer: &mut PeerMetadata<N>) {}
fn on_session_closed<N: NetworkPrimitives>(&mut self, _peer: &mut PeerMetadata<N>) {}
}
impl FromStr for TransactionPropagationKind {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"All" | "all" => Ok(Self::All),
"Trusted" | "trusted" => Ok(Self::Trusted),
"None" | "none" => Ok(Self::None),
_ => Err(format!("Invalid transaction propagation policy: {s}")),
}
}
}
/// Defines the outcome of evaluating a transaction against an `AnnouncementFilteringPolicy`.
///
/// Dictates how the `TransactionManager` should proceed on an announced transaction.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum AnnouncementAcceptance {
/// Accept the transaction announcement.
Accept,
/// Log the transaction but not fetching the transaction or penalizing the peer.
Ignore,
/// Reject
Reject {
/// If true, the peer sending this announcement should be penalized.
penalize_peer: bool,
},
}
/// A policy that defines how to handle incoming transaction announcements,
/// particularly concerning transaction types and other announcement metadata.
pub trait AnnouncementFilteringPolicy: Send + Sync + Unpin + 'static {
/// Decides how to handle a transaction announcement based on its type, hash, and size.
fn decide_on_announcement(&self, ty: u8, hash: &B256, size: usize) -> AnnouncementAcceptance;
}
/// A generic `AnnouncementFilteringPolicy` that enforces strict validation
/// of transaction type based on a generic type `T`.
#[derive(Debug, Clone)]
pub struct TypedStrictFilter<T: TryFrom<u8> + Debug + Send + Sync + 'static>(PhantomData<T>);
impl<T: TryFrom<u8> + Debug + Send + Sync + 'static> Default for TypedStrictFilter<T> {
fn default() -> Self {
Self(PhantomData)
}
}
impl<T> AnnouncementFilteringPolicy for TypedStrictFilter<T>
where
T: TryFrom<u8> + Debug + Send + Sync + Unpin + 'static,
<T as TryFrom<u8>>::Error: Debug,
{
fn decide_on_announcement(&self, ty: u8, hash: &B256, size: usize) -> AnnouncementAcceptance {
match T::try_from(ty) {
Ok(_valid_type) => AnnouncementAcceptance::Accept,
Err(e) => {
tracing::trace!(target: "net::tx::policy::strict_typed",
type_param = %std::any::type_name::<T>(),
%ty,
%size,
%hash,
error = ?e,
"Invalid or unrecognized transaction type byte. Rejecting entry and recommending peer penalization."
);
AnnouncementAcceptance::Reject { penalize_peer: true }
}
}
}
}
/// Type alias for a `TypedStrictFilter`. This is the default strict announcement filter.
pub type StrictEthAnnouncementFilter = TypedStrictFilter<TxType>;
/// An [`AnnouncementFilteringPolicy`] that permissively handles unknown type bytes
/// based on a given type `T` using `T::try_from(u8)`.
///
/// If `T::try_from(ty)` succeeds, the announcement is accepted. Otherwise, it's ignored.
#[derive(Debug, Clone)]
pub struct TypedRelaxedFilter<T: TryFrom<u8> + Debug + Send + Sync + 'static>(PhantomData<T>);
impl<T: TryFrom<u8> + Debug + Send + Sync + 'static> Default for TypedRelaxedFilter<T> {
fn default() -> Self {
Self(PhantomData)
}
}
impl<T> AnnouncementFilteringPolicy for TypedRelaxedFilter<T>
where
T: TryFrom<u8> + Debug + Send + Sync + Unpin + 'static,
<T as TryFrom<u8>>::Error: Debug,
{
fn decide_on_announcement(&self, ty: u8, hash: &B256, size: usize) -> AnnouncementAcceptance {
match T::try_from(ty) {
Ok(_valid_type) => AnnouncementAcceptance::Accept,
Err(e) => {
tracing::trace!(target: "net::tx::policy::relaxed_typed",
type_param = %std::any::type_name::<T>(),
%ty,
%size,
%hash,
error = ?e,
"Unknown transaction type byte. Ignoring entry."
);
AnnouncementAcceptance::Ignore
}
}
}
}
/// Type alias for `TypedRelaxedFilter`. This filter accepts known Ethereum transaction types and
/// ignores unknown ones without penalizing the peer.
pub type RelaxedEthAnnouncementFilter = TypedRelaxedFilter<TxType>;
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_transaction_propagation_mode_from_str() {
// Test "sqrt" variant
assert_eq!(
TransactionPropagationMode::from_str("sqrt").unwrap(),
TransactionPropagationMode::Sqrt
);
assert_eq!(
TransactionPropagationMode::from_str("SQRT").unwrap(),
TransactionPropagationMode::Sqrt
);
assert_eq!(
TransactionPropagationMode::from_str("Sqrt").unwrap(),
TransactionPropagationMode::Sqrt
);
// Test "all" variant
assert_eq!(
TransactionPropagationMode::from_str("all").unwrap(),
TransactionPropagationMode::All
);
assert_eq!(
TransactionPropagationMode::from_str("ALL").unwrap(),
TransactionPropagationMode::All
);
assert_eq!(
TransactionPropagationMode::from_str("All").unwrap(),
TransactionPropagationMode::All
);
// Test "max:N" variant
assert_eq!(
TransactionPropagationMode::from_str("max:10").unwrap(),
TransactionPropagationMode::Max(10)
);
assert_eq!(
TransactionPropagationMode::from_str("MAX:42").unwrap(),
TransactionPropagationMode::Max(42)
);
assert_eq!(
TransactionPropagationMode::from_str("Max:100").unwrap(),
TransactionPropagationMode::Max(100)
);
// Test invalid inputs
assert!(TransactionPropagationMode::from_str("invalid").is_err());
assert!(TransactionPropagationMode::from_str("max:not_a_number").is_err());
assert!(TransactionPropagationMode::from_str("max:").is_err());
assert!(TransactionPropagationMode::from_str("max").is_err());
assert!(TransactionPropagationMode::from_str("").is_err());
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network/src/transactions/fetcher.rs | crates/net/network/src/transactions/fetcher.rs | //! `TransactionFetcher` is responsible for rate limiting and retry logic for fetching
//! transactions. Upon receiving an announcement, functionality of the `TransactionFetcher` is
//! used for filtering out hashes 1) for which the tx is already known and 2) unknown but the hash
//! is already seen in a previous announcement. The hashes that remain from an announcement are
//! then packed into a request with respect to the [`EthVersion`] of the announcement. Any hashes
//! that don't fit into the request, are buffered in the `TransactionFetcher`. If on the other
//! hand, space remains, hashes that the peer has previously announced are taken out of buffered
//! hashes to fill the request up. The [`GetPooledTransactions`] request is then sent to the
//! peer's session, this marks the peer as active with respect to
//! `MAX_CONCURRENT_TX_REQUESTS_PER_PEER`.
//!
//! When a peer buffers hashes in the `TransactionsManager::on_new_pooled_transaction_hashes`
//! pipeline, it is stored as fallback peer for those hashes. When [`TransactionsManager`] is
//! polled, it checks if any of fallback peer is idle. If so, it packs a request for that peer,
//! filling it from the buffered hashes. It does so until there are no more idle peers or until
//! the hashes buffer is empty.
//!
//! If a [`GetPooledTransactions`] request resolves with an error, the hashes in the request are
//! buffered with respect to `MAX_REQUEST_RETRIES_PER_TX_HASH`. So is the case if the request
//! resolves with partial success, that is some of the requested hashes are not in the response,
//! these are then buffered.
//!
//! Most healthy peers will send the same hashes in their announcements, as RLPx is a gossip
//! protocol. This means it's unlikely, that a valid hash, will be buffered for very long
//! before it's re-tried. Nonetheless, the capacity of the buffered hashes cache must be large
//! enough to buffer many hashes during network failure, to allow for recovery.
use super::{
config::TransactionFetcherConfig,
constants::{tx_fetcher::*, SOFT_LIMIT_COUNT_HASHES_IN_GET_POOLED_TRANSACTIONS_REQUEST},
PeerMetadata, PooledTransactions, SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE,
};
use crate::{
cache::{LruCache, LruMap},
duration_metered_exec,
metrics::TransactionFetcherMetrics,
};
use alloy_consensus::transaction::PooledTransaction;
use alloy_primitives::TxHash;
use derive_more::{Constructor, Deref};
use futures::{stream::FuturesUnordered, Future, FutureExt, Stream, StreamExt};
use pin_project::pin_project;
use reth_eth_wire::{
DedupPayload, GetPooledTransactions, HandleMempoolData, HandleVersionedMempoolData,
PartiallyValidData, RequestTxHashes, ValidAnnouncementData,
};
use reth_eth_wire_types::{EthNetworkPrimitives, NetworkPrimitives};
use reth_network_api::PeerRequest;
use reth_network_p2p::error::{RequestError, RequestResult};
use reth_network_peers::PeerId;
use reth_primitives_traits::SignedTransaction;
use schnellru::ByLength;
use std::{
collections::HashMap,
pin::Pin,
task::{ready, Context, Poll},
time::Duration,
};
use tokio::sync::{mpsc::error::TrySendError, oneshot, oneshot::error::RecvError};
use tracing::trace;
/// The type responsible for fetching missing transactions from peers.
///
/// This will keep track of unique transaction hashes that are currently being fetched and submits
/// new requests on announced hashes.
#[derive(Debug)]
#[pin_project]
pub struct TransactionFetcher<N: NetworkPrimitives = EthNetworkPrimitives> {
/// All peers with to which a [`GetPooledTransactions`] request is inflight.
pub active_peers: LruMap<PeerId, u8, ByLength>,
/// All currently active [`GetPooledTransactions`] requests.
///
/// The set of hashes encompassed by these requests are a subset of all hashes in the fetcher.
/// It's disjoint from the set of hashes which are awaiting an idle fallback peer in order to
/// be fetched.
#[pin]
pub inflight_requests: FuturesUnordered<GetPooledTxRequestFut<N::PooledTransaction>>,
/// Hashes that are awaiting an idle fallback peer so they can be fetched.
///
/// This is a subset of all hashes in the fetcher, and is disjoint from the set of hashes for
/// which a [`GetPooledTransactions`] request is inflight.
pub hashes_pending_fetch: LruCache<TxHash>,
/// Tracks all hashes in the transaction fetcher.
pub hashes_fetch_inflight_and_pending_fetch: LruMap<TxHash, TxFetchMetadata, ByLength>,
/// Info on capacity of the transaction fetcher.
pub info: TransactionFetcherInfo,
#[doc(hidden)]
metrics: TransactionFetcherMetrics,
}
impl<N: NetworkPrimitives> TransactionFetcher<N> {
/// Removes the peer from the active set.
pub(crate) fn remove_peer(&mut self, peer_id: &PeerId) {
self.active_peers.remove(peer_id);
}
/// Updates metrics.
#[inline]
pub fn update_metrics(&self) {
let metrics = &self.metrics;
metrics.inflight_transaction_requests.set(self.inflight_requests.len() as f64);
let hashes_pending_fetch = self.hashes_pending_fetch.len() as f64;
let total_hashes = self.hashes_fetch_inflight_and_pending_fetch.len() as f64;
metrics.hashes_pending_fetch.set(hashes_pending_fetch);
metrics.hashes_inflight_transaction_requests.set(total_hashes - hashes_pending_fetch);
}
#[inline]
fn update_pending_fetch_cache_search_metrics(&self, durations: TxFetcherSearchDurations) {
let metrics = &self.metrics;
let TxFetcherSearchDurations { find_idle_peer, fill_request } = durations;
metrics
.duration_find_idle_fallback_peer_for_any_pending_hash
.set(find_idle_peer.as_secs_f64());
metrics.duration_fill_request_from_hashes_pending_fetch.set(fill_request.as_secs_f64());
}
/// Sets up transaction fetcher with config
pub fn with_transaction_fetcher_config(config: &TransactionFetcherConfig) -> Self {
let TransactionFetcherConfig {
max_inflight_requests,
max_capacity_cache_txns_pending_fetch,
..
} = *config;
let info = config.clone().into();
let metrics = TransactionFetcherMetrics::default();
metrics.capacity_inflight_requests.increment(max_inflight_requests as u64);
Self {
active_peers: LruMap::new(max_inflight_requests),
hashes_pending_fetch: LruCache::new(max_capacity_cache_txns_pending_fetch),
hashes_fetch_inflight_and_pending_fetch: LruMap::new(
max_inflight_requests + max_capacity_cache_txns_pending_fetch,
),
info,
metrics,
..Default::default()
}
}
/// Removes the specified hashes from inflight tracking.
#[inline]
pub fn remove_hashes_from_transaction_fetcher<I>(&mut self, hashes: I)
where
I: IntoIterator<Item = TxHash>,
{
for hash in hashes {
self.hashes_fetch_inflight_and_pending_fetch.remove(&hash);
self.hashes_pending_fetch.remove(&hash);
}
}
/// Updates peer's activity status upon a resolved [`GetPooledTxRequest`].
fn decrement_inflight_request_count_for(&mut self, peer_id: &PeerId) {
let remove = || -> bool {
if let Some(inflight_count) = self.active_peers.get(peer_id) {
*inflight_count = inflight_count.saturating_sub(1);
if *inflight_count == 0 {
return true
}
}
false
}();
if remove {
self.active_peers.remove(peer_id);
}
}
/// Returns `true` if peer is idle with respect to `self.inflight_requests`.
#[inline]
pub fn is_idle(&self, peer_id: &PeerId) -> bool {
let Some(inflight_count) = self.active_peers.peek(peer_id) else { return true };
if *inflight_count < self.info.max_inflight_requests_per_peer {
return true
}
false
}
/// Returns any idle peer for the given hash.
pub fn get_idle_peer_for(&self, hash: TxHash) -> Option<&PeerId> {
let TxFetchMetadata { fallback_peers, .. } =
self.hashes_fetch_inflight_and_pending_fetch.peek(&hash)?;
for peer_id in fallback_peers.iter() {
if self.is_idle(peer_id) {
return Some(peer_id)
}
}
None
}
/// Returns any idle peer for any hash pending fetch. If one is found, the corresponding
/// hash is written to the request buffer that is passed as parameter.
///
/// Loops through the hashes pending fetch in lru order until one is found with an idle
/// fallback peer, or the budget passed as parameter is depleted, whatever happens first.
pub fn find_any_idle_fallback_peer_for_any_pending_hash(
&mut self,
hashes_to_request: &mut RequestTxHashes,
mut budget: Option<usize>, // search fallback peers for max `budget` lru pending hashes
) -> Option<PeerId> {
let mut hashes_pending_fetch_iter = self.hashes_pending_fetch.iter();
let idle_peer = loop {
let &hash = hashes_pending_fetch_iter.next()?;
let idle_peer = self.get_idle_peer_for(hash);
if idle_peer.is_some() {
hashes_to_request.insert(hash);
break idle_peer.copied()
}
if let Some(ref mut bud) = budget {
*bud = bud.saturating_sub(1);
if *bud == 0 {
return None
}
}
};
let hash = hashes_to_request.iter().next()?;
// pop hash that is loaded in request buffer from cache of hashes pending fetch
drop(hashes_pending_fetch_iter);
_ = self.hashes_pending_fetch.remove(hash);
idle_peer
}
/// Packages hashes for a [`GetPooledTxRequest`] up to limit. Returns left over hashes. Takes
/// a [`RequestTxHashes`] buffer as parameter for filling with hashes to request.
///
/// Returns left over hashes.
pub fn pack_request(
&self,
hashes_to_request: &mut RequestTxHashes,
hashes_from_announcement: ValidAnnouncementData,
) -> RequestTxHashes {
if hashes_from_announcement.msg_version().is_eth68() {
return self.pack_request_eth68(hashes_to_request, hashes_from_announcement)
}
self.pack_request_eth66(hashes_to_request, hashes_from_announcement)
}
/// Packages hashes for a [`GetPooledTxRequest`] from an
/// [`Eth68`](reth_eth_wire::EthVersion::Eth68) announcement up to limit as defined by protocol
/// version 68. Takes a [`RequestTxHashes`] buffer as parameter for filling with hashes to
/// request.
///
/// Returns left over hashes.
///
/// Loops through hashes passed as parameter and checks if a hash fits in the expected
/// response. If no, it's added to surplus hashes. If yes, it's added to hashes to the request
/// and expected response size is accumulated.
pub fn pack_request_eth68(
&self,
hashes_to_request: &mut RequestTxHashes,
hashes_from_announcement: impl HandleMempoolData
+ IntoIterator<Item = (TxHash, Option<(u8, usize)>)>,
) -> RequestTxHashes {
let mut acc_size_response = 0;
let mut hashes_from_announcement_iter = hashes_from_announcement.into_iter();
if let Some((hash, Some((_ty, size)))) = hashes_from_announcement_iter.next() {
hashes_to_request.insert(hash);
// tx is really big, pack request with single tx
if size >= self.info.soft_limit_byte_size_pooled_transactions_response_on_pack_request {
return hashes_from_announcement_iter.collect()
}
acc_size_response = size;
}
let mut surplus_hashes = RequestTxHashes::default();
// folds size based on expected response size and adds selected hashes to the request
// list and the other hashes to the surplus list
loop {
let Some((hash, metadata)) = hashes_from_announcement_iter.next() else { break };
let Some((_ty, size)) = metadata else {
unreachable!("this method is called upon reception of an eth68 announcement")
};
let next_acc_size = acc_size_response + size;
if next_acc_size <=
self.info.soft_limit_byte_size_pooled_transactions_response_on_pack_request
{
// only update accumulated size of tx response if tx will fit in without exceeding
// soft limit
acc_size_response = next_acc_size;
_ = hashes_to_request.insert(hash)
} else {
_ = surplus_hashes.insert(hash)
}
let free_space =
self.info.soft_limit_byte_size_pooled_transactions_response_on_pack_request -
acc_size_response;
if free_space < MEDIAN_BYTE_SIZE_SMALL_LEGACY_TX_ENCODED {
break
}
}
surplus_hashes.extend(hashes_from_announcement_iter.map(|(hash, _metadata)| hash));
surplus_hashes
}
/// Packages hashes for a [`GetPooledTxRequest`] from an
/// [`Eth66`](reth_eth_wire::EthVersion::Eth66) announcement up to limit as defined by
/// protocol version 66. Takes a [`RequestTxHashes`] buffer as parameter for filling with
/// hashes to request.
///
/// Returns left over hashes.
pub fn pack_request_eth66(
&self,
hashes_to_request: &mut RequestTxHashes,
hashes_from_announcement: ValidAnnouncementData,
) -> RequestTxHashes {
let (mut hashes, _version) = hashes_from_announcement.into_request_hashes();
if hashes.len() <= SOFT_LIMIT_COUNT_HASHES_IN_GET_POOLED_TRANSACTIONS_REQUEST {
*hashes_to_request = hashes;
hashes_to_request.shrink_to_fit();
RequestTxHashes::default()
} else {
let surplus_hashes =
hashes.retain_count(SOFT_LIMIT_COUNT_HASHES_IN_GET_POOLED_TRANSACTIONS_REQUEST);
*hashes_to_request = hashes;
hashes_to_request.shrink_to_fit();
surplus_hashes
}
}
/// Tries to buffer hashes for retry.
pub fn try_buffer_hashes_for_retry(
&mut self,
mut hashes: RequestTxHashes,
peer_failed_to_serve: &PeerId,
) {
// It could be that the txns have been received over broadcast in the time being. Remove
// the peer as fallback peer so it isn't request again for these hashes.
hashes.retain(|hash| {
if let Some(entry) = self.hashes_fetch_inflight_and_pending_fetch.get(hash) {
entry.fallback_peers_mut().remove(peer_failed_to_serve);
return true
}
// tx has been seen over broadcast in the time it took for the request to resolve
false
});
self.buffer_hashes(hashes, None)
}
/// Number of hashes pending fetch.
pub fn num_pending_hashes(&self) -> usize {
self.hashes_pending_fetch.len()
}
/// Number of all transaction hashes in the fetcher.
pub fn num_all_hashes(&self) -> usize {
self.hashes_fetch_inflight_and_pending_fetch.len()
}
/// Buffers hashes. Note: Only peers that haven't yet tried to request the hashes should be
/// passed as `fallback_peer` parameter! For re-buffering hashes on failed request, use
/// [`TransactionFetcher::try_buffer_hashes_for_retry`]. Hashes that have been re-requested
/// [`DEFAULT_MAX_RETRIES`], are dropped.
pub fn buffer_hashes(&mut self, hashes: RequestTxHashes, fallback_peer: Option<PeerId>) {
for hash in hashes {
// hash could have been evicted from bounded lru map
if self.hashes_fetch_inflight_and_pending_fetch.peek(&hash).is_none() {
continue
}
let Some(TxFetchMetadata { retries, fallback_peers, .. }) =
self.hashes_fetch_inflight_and_pending_fetch.get(&hash)
else {
return
};
if let Some(peer_id) = fallback_peer {
// peer has not yet requested hash
fallback_peers.insert(peer_id);
} else {
if *retries >= DEFAULT_MAX_RETRIES {
trace!(target: "net::tx",
%hash,
retries,
"retry limit for `GetPooledTransactions` requests reached for hash, dropping hash"
);
self.hashes_fetch_inflight_and_pending_fetch.remove(&hash);
self.hashes_pending_fetch.remove(&hash);
continue
}
*retries += 1;
}
if let (_, Some(evicted_hash)) = self.hashes_pending_fetch.insert_and_get_evicted(hash)
{
self.hashes_fetch_inflight_and_pending_fetch.remove(&evicted_hash);
self.hashes_pending_fetch.remove(&evicted_hash);
}
}
}
/// Tries to request hashes pending fetch.
///
/// Finds the first buffered hash with a fallback peer that is idle, if any. Fills the rest of
/// the request by checking the transactions seen by the peer against the buffer.
pub fn on_fetch_pending_hashes(
&mut self,
peers: &HashMap<PeerId, PeerMetadata<N>>,
has_capacity_wrt_pending_pool_imports: impl Fn(usize) -> bool,
) {
let mut hashes_to_request = RequestTxHashes::with_capacity(
DEFAULT_MARGINAL_COUNT_HASHES_GET_POOLED_TRANSACTIONS_REQUEST,
);
let mut search_durations = TxFetcherSearchDurations::default();
// budget to look for an idle peer before giving up
let budget_find_idle_fallback_peer = self
.search_breadth_budget_find_idle_fallback_peer(&has_capacity_wrt_pending_pool_imports);
let peer_id = duration_metered_exec!(
{
let Some(peer_id) = self.find_any_idle_fallback_peer_for_any_pending_hash(
&mut hashes_to_request,
budget_find_idle_fallback_peer,
) else {
// no peers are idle or budget is depleted
return
};
peer_id
},
search_durations.find_idle_peer
);
// peer should always exist since `is_session_active` already checked
let Some(peer) = peers.get(&peer_id) else { return };
let conn_eth_version = peer.version;
// fill the request with more hashes pending fetch that have been announced by the peer.
// the search for more hashes is done with respect to the given budget, which determines
// how many hashes to loop through before giving up. if no more hashes are found wrt to
// the budget, the single hash that was taken out of the cache above is sent in a request.
let budget_fill_request = self
.search_breadth_budget_find_intersection_pending_hashes_and_hashes_seen_by_peer(
&has_capacity_wrt_pending_pool_imports,
);
duration_metered_exec!(
{
self.fill_request_from_hashes_pending_fetch(
&mut hashes_to_request,
&peer.seen_transactions,
budget_fill_request,
)
},
search_durations.fill_request
);
self.update_pending_fetch_cache_search_metrics(search_durations);
trace!(target: "net::tx",
peer_id=format!("{peer_id:#}"),
hashes=?*hashes_to_request,
%conn_eth_version,
"requesting hashes that were stored pending fetch from peer"
);
// request the buffered missing transactions
if let Some(failed_to_request_hashes) =
self.request_transactions_from_peer(hashes_to_request, peer)
{
trace!(target: "net::tx",
peer_id=format!("{peer_id:#}"),
?failed_to_request_hashes,
%conn_eth_version,
"failed sending request to peer's session, buffering hashes"
);
self.buffer_hashes(failed_to_request_hashes, Some(peer_id));
}
}
/// Filters out hashes that have been seen before. For hashes that have already been seen, the
/// peer is added as fallback peer.
pub fn filter_unseen_and_pending_hashes(
&mut self,
new_announced_hashes: &mut ValidAnnouncementData,
is_tx_bad_import: impl Fn(&TxHash) -> bool,
peer_id: &PeerId,
client_version: &str,
) {
let mut previously_unseen_hashes_count = 0;
let msg_version = new_announced_hashes.msg_version();
// filter out inflight hashes, and register the peer as fallback for all inflight hashes
new_announced_hashes.retain(|hash, metadata| {
// occupied entry
if let Some(TxFetchMetadata{ tx_encoded_length: previously_seen_size, ..}) = self.hashes_fetch_inflight_and_pending_fetch.peek_mut(hash) {
// update size metadata if available
if let Some((_ty, size)) = metadata {
if let Some(prev_size) = previously_seen_size {
// check if this peer is announcing a different size than a previous peer
if size != prev_size {
trace!(target: "net::tx",
peer_id=format!("{peer_id:#}"),
%hash,
size,
previously_seen_size,
%client_version,
"peer announced a different size for tx, this is especially worrying if one size is much bigger..."
);
}
}
// believe the most recent peer to announce tx
*previously_seen_size = Some(*size);
}
// hash has been seen but is not inflight
if self.hashes_pending_fetch.remove(hash) {
return true
}
return false
}
// vacant entry
if is_tx_bad_import(hash) {
return false
}
previously_unseen_hashes_count += 1;
if self.hashes_fetch_inflight_and_pending_fetch.get_or_insert(*hash, ||
TxFetchMetadata{retries: 0, fallback_peers: LruCache::new(DEFAULT_MAX_COUNT_FALLBACK_PEERS as u32), tx_encoded_length: None}
).is_none() {
trace!(target: "net::tx",
peer_id=format!("{peer_id:#}"),
%hash,
?msg_version,
%client_version,
"failed to cache new announced hash from peer in schnellru::LruMap, dropping hash"
);
return false
}
true
});
trace!(target: "net::tx",
peer_id=format!("{peer_id:#}"),
previously_unseen_hashes_count=previously_unseen_hashes_count,
msg_version=?msg_version,
client_version=%client_version,
"received previously unseen hashes in announcement from peer"
);
}
/// Requests the missing transactions from the previously unseen announced hashes of the peer.
/// Returns the requested hashes if the request concurrency limit is reached or if the request
/// fails to send over the channel to the peer's session task.
///
/// This filters all announced hashes that are already in flight, and requests the missing,
/// while marking the given peer as an alternative peer for the hashes that are already in
/// flight.
pub fn request_transactions_from_peer(
&mut self,
new_announced_hashes: RequestTxHashes,
peer: &PeerMetadata<N>,
) -> Option<RequestTxHashes> {
let peer_id: PeerId = peer.request_tx.peer_id;
let conn_eth_version = peer.version;
if self.active_peers.len() >= self.info.max_inflight_requests {
trace!(target: "net::tx",
peer_id=format!("{peer_id:#}"),
hashes=?*new_announced_hashes,
%conn_eth_version,
max_inflight_transaction_requests=self.info.max_inflight_requests,
"limit for concurrent `GetPooledTransactions` requests reached, dropping request for hashes to peer"
);
return Some(new_announced_hashes)
}
let Some(inflight_count) = self.active_peers.get_or_insert(peer_id, || 0) else {
trace!(target: "net::tx",
peer_id=format!("{peer_id:#}"),
hashes=?*new_announced_hashes,
conn_eth_version=%conn_eth_version,
"failed to cache active peer in schnellru::LruMap, dropping request to peer"
);
return Some(new_announced_hashes)
};
if *inflight_count >= self.info.max_inflight_requests_per_peer {
trace!(target: "net::tx",
peer_id=format!("{peer_id:#}"),
hashes=?*new_announced_hashes,
%conn_eth_version,
max_concurrent_tx_reqs_per_peer=self.info.max_inflight_requests_per_peer,
"limit for concurrent `GetPooledTransactions` requests per peer reached"
);
return Some(new_announced_hashes)
}
#[cfg(debug_assertions)]
{
for hash in &new_announced_hashes {
if self.hashes_pending_fetch.contains(hash) {
tracing::debug!(target: "net::tx", "`{}` should have been taken out of buffer before packing in a request, breaks invariant `@hashes_pending_fetch` and `@inflight_requests`, `@hashes_fetch_inflight_and_pending_fetch` for `{}`: {:?}",
format!("{:?}", new_announced_hashes), // Assuming new_announced_hashes can be debug-printed directly
format!("{:?}", new_announced_hashes),
new_announced_hashes.iter().map(|hash| {
let metadata = self.hashes_fetch_inflight_and_pending_fetch.get(hash);
// Assuming you only need `retries` and `tx_encoded_length` for debugging
(*hash, metadata.map(|m| (m.retries, m.tx_encoded_length)))
}).collect::<Vec<(TxHash, Option<(u8, Option<usize>)>)>>())
}
}
}
let (response, rx) = oneshot::channel();
let req = PeerRequest::GetPooledTransactions {
request: GetPooledTransactions(new_announced_hashes.iter().copied().collect()),
response,
};
// try to send the request to the peer
if let Err(err) = peer.request_tx.try_send(req) {
// peer channel is full
return match err {
TrySendError::Full(_) | TrySendError::Closed(_) => {
self.metrics.egress_peer_channel_full.increment(1);
Some(new_announced_hashes)
}
}
}
*inflight_count += 1;
// stores a new request future for the request
self.inflight_requests.push(GetPooledTxRequestFut::new(peer_id, new_announced_hashes, rx));
None
}
/// Tries to fill request with hashes pending fetch so that the expected [`PooledTransactions`]
/// response is full enough. A mutable reference to a list of hashes to request is passed as
/// parameter. A budget is passed as parameter, this ensures that the node stops searching
/// for more hashes after the budget is depleted. Under bad network conditions, the cache of
/// hashes pending fetch may become very full for a while. As the node recovers, the hashes
/// pending fetch cache should get smaller. The budget should aim to be big enough to loop
/// through all buffered hashes in good network conditions.
///
/// The request hashes buffer is filled as if it's an eth68 request, i.e. smartly assemble
/// the request based on expected response size. For any hash missing size metadata, it is
/// guessed at [`AVERAGE_BYTE_SIZE_TX_ENCODED`].
///
/// Loops through hashes pending fetch and does:
///
/// 1. Check if a hash pending fetch is seen by peer.
/// 2. Optimistically include the hash in the request.
/// 3. Accumulate expected total response size.
/// 4. Check if acc size and hashes count is at limit, if so stop looping.
/// 5. Remove hashes to request from cache of hashes pending fetch.
pub fn fill_request_from_hashes_pending_fetch(
&mut self,
hashes_to_request: &mut RequestTxHashes,
seen_hashes: &LruCache<TxHash>,
mut budget_fill_request: Option<usize>, // check max `budget` lru pending hashes
) {
let Some(hash) = hashes_to_request.iter().next() else { return };
let mut acc_size_response = self
.hashes_fetch_inflight_and_pending_fetch
.get(hash)
.and_then(|entry| entry.tx_encoded_len())
.unwrap_or(AVERAGE_BYTE_SIZE_TX_ENCODED);
// if request full enough already, we're satisfied, send request for single tx
if acc_size_response >=
DEFAULT_SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE_ON_FETCH_PENDING_HASHES
{
return
}
// try to fill request by checking if any other hashes pending fetch (in lru order) are
// also seen by peer
for hash in self.hashes_pending_fetch.iter() {
// 1. Check if a hash pending fetch is seen by peer.
if !seen_hashes.contains(hash) {
continue
};
// 2. Optimistically include the hash in the request.
hashes_to_request.insert(*hash);
// 3. Accumulate expected total response size.
let size = self
.hashes_fetch_inflight_and_pending_fetch
.get(hash)
.and_then(|entry| entry.tx_encoded_len())
.unwrap_or(AVERAGE_BYTE_SIZE_TX_ENCODED);
acc_size_response += size;
// 4. Check if acc size or hashes count is at limit, if so stop looping.
// if expected response is full enough or the number of hashes in the request is
// enough, we're satisfied
if acc_size_response >=
DEFAULT_SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE_ON_FETCH_PENDING_HASHES ||
hashes_to_request.len() >
DEFAULT_SOFT_LIMIT_COUNT_HASHES_IN_GET_POOLED_TRANSACTIONS_REQUEST_ON_FETCH_PENDING_HASHES
{
break
}
if let Some(ref mut bud) = budget_fill_request {
*bud -= 1;
if *bud == 0 {
break
}
}
}
// 5. Remove hashes to request from cache of hashes pending fetch.
for hash in hashes_to_request.iter() {
self.hashes_pending_fetch.remove(hash);
}
}
/// Returns `true` if [`TransactionFetcher`] has capacity to request pending hashes. Returns
/// `false` if [`TransactionFetcher`] is operating close to full capacity.
pub fn has_capacity_for_fetching_pending_hashes(&self) -> bool {
let info = &self.info;
self.has_capacity(info.max_inflight_requests)
}
/// Returns `true` if the number of inflight requests are under a given tolerated max.
fn has_capacity(&self, max_inflight_requests: usize) -> bool {
self.inflight_requests.len() <= max_inflight_requests
}
/// Returns the limit to enforce when looking for any pending hash with an idle fallback peer.
///
/// Returns `Some(limit)` if [`TransactionFetcher`] and the
/// [`TransactionPool`](reth_transaction_pool::TransactionPool) are operating close to full
/// capacity. Returns `None`, unlimited, if they are not that busy.
pub fn search_breadth_budget_find_idle_fallback_peer(
&self,
has_capacity_wrt_pending_pool_imports: impl Fn(usize) -> bool,
) -> Option<usize> {
let info = &self.info;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | true |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network/src/transactions/policy.rs | crates/net/network/src/transactions/policy.rs | use crate::transactions::config::{AnnouncementFilteringPolicy, TransactionPropagationPolicy};
use std::fmt::Debug;
/// A bundle of policies that control the behavior of network components like
/// the [`TransactionsManager`](super::TransactionsManager).
///
/// This trait allows for different collections of policies to be used interchangeably.
pub trait TransactionPolicies: Send + Sync + Debug + 'static {
/// The type of the policy used for transaction propagation.
type Propagation: TransactionPropagationPolicy;
/// The type of the policy used for filtering transaction announcements.
type Announcement: AnnouncementFilteringPolicy;
/// Returns a reference to the transaction propagation policy.
fn propagation_policy(&self) -> &Self::Propagation;
/// Returns a mutable reference to the transaction propagation policy.
fn propagation_policy_mut(&mut self) -> &mut Self::Propagation;
/// Returns a reference to the announcement filtering policy.
fn announcement_filter(&self) -> &Self::Announcement;
}
/// A container that bundles specific implementations of transaction-related policies,
///
/// This struct implements the [`TransactionPolicies`] trait, providing a complete set of
/// policies required by components like the [`TransactionsManager`](super::TransactionsManager).
/// It holds a specific [`TransactionPropagationPolicy`] and an
/// [`AnnouncementFilteringPolicy`].
#[derive(Debug, Clone, Default)]
pub struct NetworkPolicies<P, A> {
propagation: P,
announcement: A,
}
impl<P, A> NetworkPolicies<P, A> {
/// Creates a new bundle of network policies.
pub const fn new(propagation: P, announcement: A) -> Self {
Self { propagation, announcement }
}
/// Returns a new `NetworkPolicies` bundle with the `TransactionPropagationPolicy` replaced.
pub fn with_propagation<NewP>(self, new_propagation: NewP) -> NetworkPolicies<NewP, A>
where
NewP: TransactionPropagationPolicy,
{
NetworkPolicies::new(new_propagation, self.announcement)
}
/// Returns a new `NetworkPolicies` bundle with the `AnnouncementFilteringPolicy` replaced.
pub fn with_announcement<NewA>(self, new_announcement: NewA) -> NetworkPolicies<P, NewA>
where
NewA: AnnouncementFilteringPolicy,
{
NetworkPolicies::new(self.propagation, new_announcement)
}
}
impl<P, A> TransactionPolicies for NetworkPolicies<P, A>
where
P: TransactionPropagationPolicy + Debug,
A: AnnouncementFilteringPolicy + Debug,
{
type Propagation = P;
type Announcement = A;
fn propagation_policy(&self) -> &Self::Propagation {
&self.propagation
}
fn propagation_policy_mut(&mut self) -> &mut Self::Propagation {
&mut self.propagation
}
fn announcement_filter(&self) -> &Self::Announcement {
&self.announcement
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network/src/transactions/mod.rs | crates/net/network/src/transactions/mod.rs | //! Transactions management for the p2p network.
/// Aggregation on configurable parameters for [`TransactionsManager`].
pub mod config;
/// Default and spec'd bounds.
pub mod constants;
/// Component responsible for fetching transactions from [`NewPooledTransactionHashes`].
pub mod fetcher;
/// Defines the [`TransactionPolicies`] trait for aggregating transaction-related policies.
pub mod policy;
pub use self::constants::{
tx_fetcher::DEFAULT_SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESP_ON_PACK_GET_POOLED_TRANSACTIONS_REQ,
SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE,
};
use config::{AnnouncementAcceptance, StrictEthAnnouncementFilter, TransactionPropagationKind};
pub use config::{
AnnouncementFilteringPolicy, TransactionFetcherConfig, TransactionPropagationMode,
TransactionPropagationPolicy, TransactionsManagerConfig,
};
use policy::{NetworkPolicies, TransactionPolicies};
pub(crate) use fetcher::{FetchEvent, TransactionFetcher};
use self::constants::{tx_manager::*, DEFAULT_SOFT_LIMIT_BYTE_SIZE_TRANSACTIONS_BROADCAST_MESSAGE};
use crate::{
budget::{
DEFAULT_BUDGET_TRY_DRAIN_NETWORK_TRANSACTION_EVENTS,
DEFAULT_BUDGET_TRY_DRAIN_PENDING_POOL_IMPORTS, DEFAULT_BUDGET_TRY_DRAIN_POOL_IMPORTS,
DEFAULT_BUDGET_TRY_DRAIN_STREAM,
},
cache::LruCache,
duration_metered_exec, metered_poll_nested_stream_with_budget,
metrics::{
AnnouncedTxTypesMetrics, TransactionsManagerMetrics, NETWORK_POOL_TRANSACTIONS_SCOPE,
},
NetworkHandle, TxTypesCounter,
};
use alloy_primitives::{TxHash, B256};
use constants::SOFT_LIMIT_COUNT_HASHES_IN_NEW_POOLED_TRANSACTIONS_BROADCAST_MESSAGE;
use futures::{stream::FuturesUnordered, Future, StreamExt};
use reth_eth_wire::{
DedupPayload, EthNetworkPrimitives, EthVersion, GetPooledTransactions, HandleMempoolData,
HandleVersionedMempoolData, NetworkPrimitives, NewPooledTransactionHashes,
NewPooledTransactionHashes66, NewPooledTransactionHashes68, PooledTransactions,
RequestTxHashes, Transactions, ValidAnnouncementData,
};
use reth_ethereum_primitives::{TransactionSigned, TxType};
use reth_metrics::common::mpsc::UnboundedMeteredReceiver;
use reth_network_api::{
events::{PeerEvent, SessionInfo},
NetworkEvent, NetworkEventListenerProvider, PeerKind, PeerRequest, PeerRequestSender, Peers,
};
use reth_network_p2p::{
error::{RequestError, RequestResult},
sync::SyncStateProvider,
};
use reth_network_peers::PeerId;
use reth_network_types::ReputationChangeKind;
use reth_primitives_traits::SignedTransaction;
use reth_tokio_util::EventStream;
use reth_transaction_pool::{
error::{PoolError, PoolResult},
AddedTransactionOutcome, GetPooledTransactionLimit, PoolTransaction, PropagateKind,
PropagatedTransactions, TransactionPool, ValidPoolTransaction,
};
use std::{
collections::{hash_map::Entry, HashMap, HashSet},
pin::Pin,
sync::{
atomic::{AtomicUsize, Ordering},
Arc,
},
task::{Context, Poll},
time::{Duration, Instant},
};
use tokio::sync::{mpsc, oneshot, oneshot::error::RecvError};
use tokio_stream::wrappers::{ReceiverStream, UnboundedReceiverStream};
use tracing::{debug, trace};
/// The future for importing transactions into the pool.
///
/// Resolves with the result of each transaction import.
pub type PoolImportFuture =
Pin<Box<dyn Future<Output = Vec<PoolResult<AddedTransactionOutcome>>> + Send + 'static>>;
/// Api to interact with [`TransactionsManager`] task.
///
/// This can be obtained via [`TransactionsManager::handle`] and can be used to manually interact
/// with the [`TransactionsManager`] task once it is spawned.
///
/// For example [`TransactionsHandle::get_peer_transaction_hashes`] returns the transaction hashes
/// known by a specific peer.
#[derive(Debug, Clone)]
pub struct TransactionsHandle<N: NetworkPrimitives = EthNetworkPrimitives> {
/// Command channel to the [`TransactionsManager`]
manager_tx: mpsc::UnboundedSender<TransactionsCommand<N>>,
}
/// Implementation of the `TransactionsHandle` API for use in testnet via type
/// [`PeerHandle`](crate::test_utils::PeerHandle).
impl<N: NetworkPrimitives> TransactionsHandle<N> {
fn send(&self, cmd: TransactionsCommand<N>) {
let _ = self.manager_tx.send(cmd);
}
/// Fetch the [`PeerRequestSender`] for the given peer.
async fn peer_handle(
&self,
peer_id: PeerId,
) -> Result<Option<PeerRequestSender<PeerRequest<N>>>, RecvError> {
let (tx, rx) = oneshot::channel();
self.send(TransactionsCommand::GetPeerSender { peer_id, peer_request_sender: tx });
rx.await
}
/// Manually propagate the transaction that belongs to the hash.
pub fn propagate(&self, hash: TxHash) {
self.send(TransactionsCommand::PropagateHash(hash))
}
/// Manually propagate the transaction hash to a specific peer.
///
/// Note: this only propagates if the pool contains the transaction.
pub fn propagate_hash_to(&self, hash: TxHash, peer: PeerId) {
self.propagate_hashes_to(Some(hash), peer)
}
/// Manually propagate the transaction hashes to a specific peer.
///
/// Note: this only propagates the transactions that are known to the pool.
pub fn propagate_hashes_to(&self, hash: impl IntoIterator<Item = TxHash>, peer: PeerId) {
let hashes = hash.into_iter().collect::<Vec<_>>();
if hashes.is_empty() {
return
}
self.send(TransactionsCommand::PropagateHashesTo(hashes, peer))
}
/// Request the active peer IDs from the [`TransactionsManager`].
pub async fn get_active_peers(&self) -> Result<HashSet<PeerId>, RecvError> {
let (tx, rx) = oneshot::channel();
self.send(TransactionsCommand::GetActivePeers(tx));
rx.await
}
/// Manually propagate full transaction hashes to a specific peer.
///
/// Do nothing if transactions are empty.
pub fn propagate_transactions_to(&self, transactions: Vec<TxHash>, peer: PeerId) {
if transactions.is_empty() {
return
}
self.send(TransactionsCommand::PropagateTransactionsTo(transactions, peer))
}
/// Manually propagate the given transaction hashes to all peers.
///
/// It's up to the [`TransactionsManager`] whether the transactions are sent as hashes or in
/// full.
pub fn propagate_transactions(&self, transactions: Vec<TxHash>) {
if transactions.is_empty() {
return
}
self.send(TransactionsCommand::PropagateTransactions(transactions))
}
/// Manually propagate the given transactions to all peers.
///
/// It's up to the [`TransactionsManager`] whether the transactions are sent as hashes or in
/// full.
pub fn broadcast_transactions(
&self,
transactions: impl IntoIterator<Item = N::BroadcastedTransaction>,
) {
let transactions =
transactions.into_iter().map(PropagateTransaction::new).collect::<Vec<_>>();
if transactions.is_empty() {
return
}
self.send(TransactionsCommand::BroadcastTransactions(transactions))
}
/// Request the transaction hashes known by specific peers.
pub async fn get_transaction_hashes(
&self,
peers: Vec<PeerId>,
) -> Result<HashMap<PeerId, HashSet<TxHash>>, RecvError> {
if peers.is_empty() {
return Ok(Default::default())
}
let (tx, rx) = oneshot::channel();
self.send(TransactionsCommand::GetTransactionHashes { peers, tx });
rx.await
}
/// Request the transaction hashes known by a specific peer.
pub async fn get_peer_transaction_hashes(
&self,
peer: PeerId,
) -> Result<HashSet<TxHash>, RecvError> {
let res = self.get_transaction_hashes(vec![peer]).await?;
Ok(res.into_values().next().unwrap_or_default())
}
/// Requests the transactions directly from the given peer.
///
/// Returns `None` if the peer is not connected.
///
/// **Note**: this returns the response from the peer as received.
pub async fn get_pooled_transactions_from(
&self,
peer_id: PeerId,
hashes: Vec<B256>,
) -> Result<Option<Vec<N::PooledTransaction>>, RequestError> {
let Some(peer) = self.peer_handle(peer_id).await? else { return Ok(None) };
let (tx, rx) = oneshot::channel();
let request = PeerRequest::GetPooledTransactions { request: hashes.into(), response: tx };
peer.try_send(request).ok();
rx.await?.map(|res| Some(res.0))
}
}
/// Manages transactions on top of the p2p network.
///
/// This can be spawned to another task and is supposed to be run as background service.
/// [`TransactionsHandle`] can be used as frontend to programmatically send commands to it and
/// interact with it.
///
/// The [`TransactionsManager`] is responsible for:
/// - handling incoming eth messages for transactions.
/// - serving transaction requests.
/// - propagate transactions
///
/// This type communicates with the [`NetworkManager`](crate::NetworkManager) in both directions.
/// - receives incoming network messages.
/// - sends messages to dispatch (responses, propagate tx)
///
/// It is directly connected to the [`TransactionPool`] to retrieve requested transactions and
/// propagate new transactions over the network.
///
/// It can be configured with different policies for transaction propagation and announcement
/// filtering. See [`NetworkPolicies`] and [`TransactionPolicies`] for more details.
///
/// ## Network Transaction Processing
///
/// ### Message Types
///
/// - **`Transactions`**: Full transaction broadcasts (rejects blob transactions)
/// - **`NewPooledTransactionHashes`**: Hash announcements
///
/// ### Peer Tracking
///
/// - Maintains per-peer transaction cache (default: 10,240 entries)
/// - Prevents duplicate imports and enables efficient propagation
///
/// ### Bad Transaction Handling
///
/// Caches and rejects transactions with consensus violations (gas, signature, chain ID).
/// Penalizes peers sending invalid transactions.
///
/// ### Import Management
///
/// Limits concurrent pool imports and backs off when approaching capacity.
///
/// ### Transaction Fetching
///
/// For announced transactions: filters known → queues unknown → fetches → imports
///
/// ### Propagation Rules
///
/// Based on: origin (Local/External/Private), peer capabilities, and network state.
/// Disabled during initial sync.
///
/// ### Security
///
/// Rate limiting via reputation, bad transaction isolation, peer scoring.
#[derive(Debug)]
#[must_use = "Manager does nothing unless polled."]
pub struct TransactionsManager<
Pool,
N: NetworkPrimitives = EthNetworkPrimitives,
PBundle: TransactionPolicies = NetworkPolicies<
TransactionPropagationKind,
StrictEthAnnouncementFilter,
>,
> {
/// Access to the transaction pool.
pool: Pool,
/// Network access.
network: NetworkHandle<N>,
/// Subscriptions to all network related events.
///
/// From which we get all new incoming transaction related messages.
network_events: EventStream<NetworkEvent<PeerRequest<N>>>,
/// Transaction fetcher to handle inflight and missing transaction requests.
transaction_fetcher: TransactionFetcher<N>,
/// All currently pending transactions grouped by peers.
///
/// This way we can track incoming transactions and prevent multiple pool imports for the same
/// transaction
transactions_by_peers: HashMap<TxHash, HashSet<PeerId>>,
/// Transactions that are currently imported into the `Pool`.
///
/// The import process includes:
/// - validation of the transactions, e.g. transaction is well formed: valid tx type, fees are
/// valid, or for 4844 transaction the blobs are valid. See also
/// [`EthTransactionValidator`](reth_transaction_pool::validate::EthTransactionValidator)
/// - if the transaction is valid, it is added into the pool.
///
/// Once the new transaction reaches the __pending__ state it will be emitted by the pool via
/// [`TransactionPool::pending_transactions_listener`] and arrive at the `pending_transactions`
/// receiver.
pool_imports: FuturesUnordered<PoolImportFuture>,
/// Stats on pending pool imports that help the node self-monitor.
pending_pool_imports_info: PendingPoolImportsInfo,
/// Bad imports.
bad_imports: LruCache<TxHash>,
/// All the connected peers.
peers: HashMap<PeerId, PeerMetadata<N>>,
/// Send half for the command channel.
///
/// This is kept so that a new [`TransactionsHandle`] can be created at any time.
command_tx: mpsc::UnboundedSender<TransactionsCommand<N>>,
/// Incoming commands from [`TransactionsHandle`].
///
/// This will only receive commands if a user manually sends a command to the manager through
/// the [`TransactionsHandle`] to interact with this type directly.
command_rx: UnboundedReceiverStream<TransactionsCommand<N>>,
/// A stream that yields new __pending__ transactions.
///
/// A transaction is considered __pending__ if it is executable on the current state of the
/// chain. In other words, this only yields transactions that satisfy all consensus
/// requirements, these include:
/// - no nonce gaps
/// - all dynamic fee requirements are (currently) met
/// - account has enough balance to cover the transaction's gas
pending_transactions: ReceiverStream<TxHash>,
/// Incoming events from the [`NetworkManager`](crate::NetworkManager).
transaction_events: UnboundedMeteredReceiver<NetworkTransactionEvent<N>>,
/// How the `TransactionsManager` is configured.
config: TransactionsManagerConfig,
/// Network Policies
policies: PBundle,
/// `TransactionsManager` metrics
metrics: TransactionsManagerMetrics,
/// `AnnouncedTxTypes` metrics
announced_tx_types_metrics: AnnouncedTxTypesMetrics,
}
impl<Pool: TransactionPool, N: NetworkPrimitives>
TransactionsManager<
Pool,
N,
NetworkPolicies<TransactionPropagationKind, StrictEthAnnouncementFilter>,
>
{
/// Sets up a new instance.
///
/// Note: This expects an existing [`NetworkManager`](crate::NetworkManager) instance.
pub fn new(
network: NetworkHandle<N>,
pool: Pool,
from_network: mpsc::UnboundedReceiver<NetworkTransactionEvent<N>>,
transactions_manager_config: TransactionsManagerConfig,
) -> Self {
Self::with_policy(
network,
pool,
from_network,
transactions_manager_config,
NetworkPolicies::default(),
)
}
}
impl<Pool: TransactionPool, N: NetworkPrimitives, PBundle: TransactionPolicies>
TransactionsManager<Pool, N, PBundle>
{
/// Sets up a new instance with given the settings.
///
/// Note: This expects an existing [`NetworkManager`](crate::NetworkManager) instance.
pub fn with_policy(
network: NetworkHandle<N>,
pool: Pool,
from_network: mpsc::UnboundedReceiver<NetworkTransactionEvent<N>>,
transactions_manager_config: TransactionsManagerConfig,
policies: PBundle,
) -> Self {
let network_events = network.event_listener();
let (command_tx, command_rx) = mpsc::unbounded_channel();
let transaction_fetcher = TransactionFetcher::with_transaction_fetcher_config(
&transactions_manager_config.transaction_fetcher_config,
);
// install a listener for new __pending__ transactions that are allowed to be propagated
// over the network
let pending = pool.pending_transactions_listener();
let pending_pool_imports_info = PendingPoolImportsInfo::default();
let metrics = TransactionsManagerMetrics::default();
metrics
.capacity_pending_pool_imports
.increment(pending_pool_imports_info.max_pending_pool_imports as u64);
Self {
pool,
network,
network_events,
transaction_fetcher,
transactions_by_peers: Default::default(),
pool_imports: Default::default(),
pending_pool_imports_info: PendingPoolImportsInfo::new(
DEFAULT_MAX_COUNT_PENDING_POOL_IMPORTS,
),
bad_imports: LruCache::new(DEFAULT_MAX_COUNT_BAD_IMPORTS),
peers: Default::default(),
command_tx,
command_rx: UnboundedReceiverStream::new(command_rx),
pending_transactions: ReceiverStream::new(pending),
transaction_events: UnboundedMeteredReceiver::new(
from_network,
NETWORK_POOL_TRANSACTIONS_SCOPE,
),
config: transactions_manager_config,
policies,
metrics,
announced_tx_types_metrics: AnnouncedTxTypesMetrics::default(),
}
}
/// Returns a new handle that can send commands to this type.
pub fn handle(&self) -> TransactionsHandle<N> {
TransactionsHandle { manager_tx: self.command_tx.clone() }
}
/// Returns `true` if [`TransactionsManager`] has capacity to request pending hashes. Returns
/// `false` if [`TransactionsManager`] is operating close to full capacity.
fn has_capacity_for_fetching_pending_hashes(&self) -> bool {
self.pending_pool_imports_info
.has_capacity(self.pending_pool_imports_info.max_pending_pool_imports) &&
self.transaction_fetcher.has_capacity_for_fetching_pending_hashes()
}
fn report_peer_bad_transactions(&self, peer_id: PeerId) {
self.report_peer(peer_id, ReputationChangeKind::BadTransactions);
self.metrics.reported_bad_transactions.increment(1);
}
fn report_peer(&self, peer_id: PeerId, kind: ReputationChangeKind) {
trace!(target: "net::tx", ?peer_id, ?kind, "reporting reputation change");
self.network.reputation_change(peer_id, kind);
}
fn report_already_seen(&self, peer_id: PeerId) {
trace!(target: "net::tx", ?peer_id, "Penalizing peer for already seen transaction");
self.network.reputation_change(peer_id, ReputationChangeKind::AlreadySeenTransaction);
}
/// Clear the transaction
fn on_good_import(&mut self, hash: TxHash) {
self.transactions_by_peers.remove(&hash);
}
/// Penalize the peers that intentionally sent the bad transaction, and cache it to avoid
/// fetching or importing it again.
///
/// Errors that count as bad transactions are:
///
/// - intrinsic gas too low
/// - exceeds gas limit
/// - gas uint overflow
/// - exceeds max init code size
/// - oversized data
/// - signer account has bytecode
/// - chain id mismatch
/// - old legacy chain id
/// - tx type not supported
///
/// (and additionally for blobs txns...)
///
/// - no blobs
/// - too many blobs
/// - invalid kzg proof
/// - kzg error
/// - not blob transaction (tx type mismatch)
/// - wrong versioned kzg commitment hash
fn on_bad_import(&mut self, err: PoolError) {
let peers = self.transactions_by_peers.remove(&err.hash);
// if we're _currently_ syncing, we ignore a bad transaction
if !err.is_bad_transaction() || self.network.is_syncing() {
return
}
// otherwise we penalize the peer that sent the bad transaction, with the assumption that
// the peer should have known that this transaction is bad (e.g. violating consensus rules)
if let Some(peers) = peers {
for peer_id in peers {
self.report_peer_bad_transactions(peer_id);
}
}
self.metrics.bad_imports.increment(1);
self.bad_imports.insert(err.hash);
}
/// Runs an operation to fetch hashes that are cached in [`TransactionFetcher`].
fn on_fetch_hashes_pending_fetch(&mut self) {
// try drain transaction hashes pending fetch
let info = &self.pending_pool_imports_info;
let max_pending_pool_imports = info.max_pending_pool_imports;
let has_capacity_wrt_pending_pool_imports =
|divisor| info.has_capacity(max_pending_pool_imports / divisor);
self.transaction_fetcher
.on_fetch_pending_hashes(&self.peers, has_capacity_wrt_pending_pool_imports);
}
fn on_request_error(&self, peer_id: PeerId, req_err: RequestError) {
let kind = match req_err {
RequestError::UnsupportedCapability => ReputationChangeKind::BadProtocol,
RequestError::Timeout => ReputationChangeKind::Timeout,
RequestError::ChannelClosed | RequestError::ConnectionDropped => {
// peer is already disconnected
return
}
RequestError::BadResponse => return self.report_peer_bad_transactions(peer_id),
};
self.report_peer(peer_id, kind);
}
#[inline]
fn update_poll_metrics(&self, start: Instant, poll_durations: TxManagerPollDurations) {
let metrics = &self.metrics;
let TxManagerPollDurations {
acc_network_events,
acc_pending_imports,
acc_tx_events,
acc_imported_txns,
acc_fetch_events,
acc_pending_fetch,
acc_cmds,
} = poll_durations;
// update metrics for whole poll function
metrics.duration_poll_tx_manager.set(start.elapsed().as_secs_f64());
// update metrics for nested expressions
metrics.acc_duration_poll_network_events.set(acc_network_events.as_secs_f64());
metrics.acc_duration_poll_pending_pool_imports.set(acc_pending_imports.as_secs_f64());
metrics.acc_duration_poll_transaction_events.set(acc_tx_events.as_secs_f64());
metrics.acc_duration_poll_imported_transactions.set(acc_imported_txns.as_secs_f64());
metrics.acc_duration_poll_fetch_events.set(acc_fetch_events.as_secs_f64());
metrics.acc_duration_fetch_pending_hashes.set(acc_pending_fetch.as_secs_f64());
metrics.acc_duration_poll_commands.set(acc_cmds.as_secs_f64());
}
}
impl<Pool: TransactionPool, N: NetworkPrimitives, PBundle: TransactionPolicies>
TransactionsManager<Pool, N, PBundle>
{
/// Processes a batch import results.
fn on_batch_import_result(&mut self, batch_results: Vec<PoolResult<AddedTransactionOutcome>>) {
for res in batch_results {
match res {
Ok(AddedTransactionOutcome { hash, .. }) => {
self.on_good_import(hash);
}
Err(err) => {
self.on_bad_import(err);
}
}
}
}
/// Request handler for an incoming `NewPooledTransactionHashes`
fn on_new_pooled_transaction_hashes(
&mut self,
peer_id: PeerId,
msg: NewPooledTransactionHashes,
) {
// If the node is initially syncing, ignore transactions
if self.network.is_initially_syncing() {
return
}
if self.network.tx_gossip_disabled() {
return
}
// get handle to peer's session, if the session is still active
let Some(peer) = self.peers.get_mut(&peer_id) else {
trace!(
peer_id = format!("{peer_id:#}"),
?msg,
"discarding announcement from inactive peer"
);
return
};
let client = peer.client_version.clone();
// keep track of the transactions the peer knows
let mut count_txns_already_seen_by_peer = 0;
for tx in msg.iter_hashes().copied() {
if !peer.seen_transactions.insert(tx) {
count_txns_already_seen_by_peer += 1;
}
}
if count_txns_already_seen_by_peer > 0 {
// this may occur if transactions are sent or announced to a peer, at the same time as
// the peer sends/announces those hashes to us. this is because, marking
// txns as seen by a peer is done optimistically upon sending them to the
// peer.
self.metrics.messages_with_hashes_already_seen_by_peer.increment(1);
self.metrics
.occurrences_hash_already_seen_by_peer
.increment(count_txns_already_seen_by_peer);
trace!(target: "net::tx",
%count_txns_already_seen_by_peer,
peer_id=format!("{peer_id:#}"),
?client,
"Peer sent hashes that have already been marked as seen by peer"
);
self.report_already_seen(peer_id);
}
// 1. filter out spam
if msg.is_empty() {
self.report_peer(peer_id, ReputationChangeKind::BadAnnouncement);
return;
}
let original_len = msg.len();
let mut partially_valid_msg = msg.dedup();
if partially_valid_msg.len() != original_len {
self.report_peer(peer_id, ReputationChangeKind::BadAnnouncement);
}
// 2. filter out transactions pending import to pool
partially_valid_msg.retain_by_hash(|hash| !self.transactions_by_peers.contains_key(hash));
// 3. filter out known hashes
//
// known txns have already been successfully fetched or received over gossip.
//
// most hashes will be filtered out here since this the mempool protocol is a gossip
// protocol, healthy peers will send many of the same hashes.
//
let hashes_count_pre_pool_filter = partially_valid_msg.len();
self.pool.retain_unknown(&mut partially_valid_msg);
if hashes_count_pre_pool_filter > partially_valid_msg.len() {
let already_known_hashes_count =
hashes_count_pre_pool_filter - partially_valid_msg.len();
self.metrics
.occurrences_hashes_already_in_pool
.increment(already_known_hashes_count as u64);
}
if partially_valid_msg.is_empty() {
// nothing to request
return
}
// 4. filter out invalid entries (spam)
//
// validates messages with respect to the given network, e.g. allowed tx types
//
let mut should_report_peer = false;
let mut tx_types_counter = TxTypesCounter::default();
let is_eth68_message = partially_valid_msg
.msg_version()
.expect("partially valid announcement should have a version")
.is_eth68();
partially_valid_msg.retain(|tx_hash, metadata_ref_mut| {
let (ty_byte, size_val) = match *metadata_ref_mut {
Some((ty, size)) => {
if !is_eth68_message {
should_report_peer = true;
}
(ty, size)
}
None => {
if is_eth68_message {
should_report_peer = true;
return false;
}
(0u8, 0)
}
};
if is_eth68_message {
if let Some((actual_ty_byte, _)) = *metadata_ref_mut {
if let Ok(parsed_tx_type) = TxType::try_from(actual_ty_byte) {
tx_types_counter.increase_by_tx_type(parsed_tx_type);
}
}
}
let decision = self
.policies
.announcement_filter()
.decide_on_announcement(ty_byte, tx_hash, size_val);
match decision {
AnnouncementAcceptance::Accept => true,
AnnouncementAcceptance::Ignore => false,
AnnouncementAcceptance::Reject { penalize_peer } => {
if penalize_peer {
should_report_peer = true;
}
false
}
}
});
if is_eth68_message {
self.announced_tx_types_metrics.update_eth68_announcement_metrics(tx_types_counter);
}
if should_report_peer {
self.report_peer(peer_id, ReputationChangeKind::BadAnnouncement);
}
let mut valid_announcement_data =
ValidAnnouncementData::from_partially_valid_data(partially_valid_msg);
if valid_announcement_data.is_empty() {
// no valid announcement data
return
}
// 5. filter out already seen unknown hashes
//
// seen hashes are already in the tx fetcher, pending fetch.
//
// for any seen hashes add the peer as fallback. unseen hashes are loaded into the tx
// fetcher, hence they should be valid at this point.
let bad_imports = &self.bad_imports;
self.transaction_fetcher.filter_unseen_and_pending_hashes(
&mut valid_announcement_data,
|hash| bad_imports.contains(hash),
&peer_id,
&client,
);
if valid_announcement_data.is_empty() {
// nothing to request
return
}
trace!(target: "net::tx::propagation",
peer_id=format!("{peer_id:#}"),
hashes_len=valid_announcement_data.len(),
hashes=?valid_announcement_data.keys().collect::<Vec<_>>(),
msg_version=%valid_announcement_data.msg_version(),
client_version=%client,
"received previously unseen and pending hashes in announcement from peer"
);
// only send request for hashes to idle peer, otherwise buffer hashes storing peer as
// fallback
if !self.transaction_fetcher.is_idle(&peer_id) {
// load message version before announcement data is destructed in packing
let msg_version = valid_announcement_data.msg_version();
let (hashes, _version) = valid_announcement_data.into_request_hashes();
trace!(target: "net::tx",
peer_id=format!("{peer_id:#}"),
hashes=?*hashes,
%msg_version,
%client,
"buffering hashes announced by busy peer"
);
self.transaction_fetcher.buffer_hashes(hashes, Some(peer_id));
return
}
let mut hashes_to_request =
RequestTxHashes::with_capacity(valid_announcement_data.len() / 4);
let surplus_hashes =
self.transaction_fetcher.pack_request(&mut hashes_to_request, valid_announcement_data);
if !surplus_hashes.is_empty() {
trace!(target: "net::tx",
peer_id=format!("{peer_id:#}"),
surplus_hashes=?*surplus_hashes,
%client,
"some hashes in announcement from peer didn't fit in `GetPooledTransactions` request, buffering surplus hashes"
);
self.transaction_fetcher.buffer_hashes(surplus_hashes, Some(peer_id));
}
trace!(target: "net::tx",
peer_id=format!("{peer_id:#}"),
hashes=?*hashes_to_request,
%client,
"sending hashes in `GetPooledTransactions` request to peer's session"
);
// request the missing transactions
//
// get handle to peer's session again, at this point we know it exists
let Some(peer) = self.peers.get_mut(&peer_id) else { return };
if let Some(failed_to_request_hashes) =
self.transaction_fetcher.request_transactions_from_peer(hashes_to_request, peer)
{
let conn_eth_version = peer.version;
trace!(target: "net::tx",
peer_id=format!("{peer_id:#}"),
failed_to_request_hashes=?*failed_to_request_hashes,
%conn_eth_version,
%client,
"sending `GetPooledTransactions` request to peer's session failed, buffering hashes"
);
self.transaction_fetcher.buffer_hashes(failed_to_request_hashes, Some(peer_id));
}
}
}
impl<Pool, N, PBundle> TransactionsManager<Pool, N, PBundle>
where
Pool: TransactionPool + Unpin + 'static,
N: NetworkPrimitives<
BroadcastedTransaction: SignedTransaction,
PooledTransaction: SignedTransaction,
> + Unpin,
PBundle: TransactionPolicies,
Pool::Transaction:
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | true |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network/src/transactions/constants.rs | crates/net/network/src/transactions/constants.rs | /* ==================== BROADCAST ==================== */
/// Soft limit for the number of hashes in a
/// [`NewPooledTransactionHashes`](reth_eth_wire::NewPooledTransactionHashes) broadcast message.
///
/// Spec'd at 4096 hashes.
///
/// <https://github.com/ethereum/devp2p/blob/master/caps/eth.md#newpooledtransactionhashes-0x08>
pub const SOFT_LIMIT_COUNT_HASHES_IN_NEW_POOLED_TRANSACTIONS_BROADCAST_MESSAGE: usize = 4096;
/// Default soft limit for the byte size of a [`Transactions`](reth_eth_wire::Transactions)
/// broadcast message.
///
/// Default is 128 KiB.
pub const DEFAULT_SOFT_LIMIT_BYTE_SIZE_TRANSACTIONS_BROADCAST_MESSAGE: usize = 128 * 1024;
/* ================ REQUEST-RESPONSE ================ */
/// Recommended soft limit for the number of hashes in a
/// [`GetPooledTransactions`](reth_eth_wire::GetPooledTransactions) request.
///
/// Spec'd at 256 hashes (8 KiB).
///
/// <https://github.com/ethereum/devp2p/blob/master/caps/eth.md#getpooledtransactions-0x09>
pub const SOFT_LIMIT_COUNT_HASHES_IN_GET_POOLED_TRANSACTIONS_REQUEST: usize = 256;
/// Soft limit for the byte size of a [`PooledTransactions`](reth_eth_wire::PooledTransactions)
/// response on assembling a [`GetPooledTransactions`](reth_eth_wire::GetPooledTransactions)
/// request.
///
/// Spec'd at 2 MiB.
///
/// <https://github.com/ethereum/devp2p/blob/master/caps/eth.md#protocol-messages>.
pub const SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE: usize = 2 * 1024 * 1024;
/// Constants used by [`TransactionsManager`](super::TransactionsManager).
pub mod tx_manager {
use super::SOFT_LIMIT_COUNT_HASHES_IN_NEW_POOLED_TRANSACTIONS_BROADCAST_MESSAGE;
/// Default limit for number of transactions to keep track of for a single peer.
///
/// Default is 320 transaction hashes.
pub const DEFAULT_MAX_COUNT_TRANSACTIONS_SEEN_BY_PEER: u32 = 10 * 1024 / 32;
/// Default maximum pending pool imports to tolerate.
///
/// Default is equivalent to the number of hashes in one full announcement, which is spec'd at
/// 4096 hashes, so 4096 pending pool imports.
pub const DEFAULT_MAX_COUNT_PENDING_POOL_IMPORTS: usize =
SOFT_LIMIT_COUNT_HASHES_IN_NEW_POOLED_TRANSACTIONS_BROADCAST_MESSAGE;
/// Default limit for number of bad imports to keep track of.
///
/// Default is 100 KiB, i.e. 3 200 transaction hashes.
pub const DEFAULT_MAX_COUNT_BAD_IMPORTS: u32 = 100 * 1024 / 32;
}
/// Constants used by [`TransactionFetcher`](super::TransactionFetcher).
pub mod tx_fetcher {
use reth_network_types::peers::config::{
DEFAULT_MAX_COUNT_PEERS_INBOUND, DEFAULT_MAX_COUNT_PEERS_OUTBOUND,
};
use super::{
SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE,
SOFT_LIMIT_COUNT_HASHES_IN_GET_POOLED_TRANSACTIONS_REQUEST,
SOFT_LIMIT_COUNT_HASHES_IN_NEW_POOLED_TRANSACTIONS_BROADCAST_MESSAGE,
};
/* ============== SCALARS OF MESSAGES ============== */
/// Default soft limit for the byte size of a
/// [`PooledTransactions`](reth_eth_wire::PooledTransactions) response on assembling a
/// [`GetPooledTransactions`](reth_eth_wire::PooledTransactions) request. This defaults to less
/// than the [`SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE`], at 2 MiB, used when
/// assembling a [`PooledTransactions`](reth_eth_wire::PooledTransactions) response.
///
/// Default is 128 KiB.
pub const DEFAULT_SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESP_ON_PACK_GET_POOLED_TRANSACTIONS_REQ: usize = 128 * 1024;
/* ==================== RETRIES ==================== */
/// Default maximum request retires per [`TxHash`](alloy_primitives::TxHash). Note, this is
/// reset should the [`TxHash`](alloy_primitives::TxHash) re-appear in an announcement after it
/// has been evicted from the hashes pending fetch cache, i.e. the counter is restarted. If
/// this happens, it is likely a very popular transaction, that should and can indeed be
/// fetched hence this behaviour is favourable.
///
/// Default is 2 retries.
pub const DEFAULT_MAX_RETRIES: u8 = 2;
/// Default number of alternative peers to keep track of for each transaction pending fetch. At
/// most [`DEFAULT_MAX_RETRIES`], which defaults to 2 peers, can ever be needed per peer.
///
/// Default is the sum of [`DEFAULT_MAX_RETRIES`] an
/// [`DEFAULT_MARGINAL_COUNT_FALLBACK_PEERS`], which defaults to 1 peer, so 3 peers.
pub const DEFAULT_MAX_COUNT_FALLBACK_PEERS: u8 =
DEFAULT_MAX_RETRIES + DEFAULT_MARGINAL_COUNT_FALLBACK_PEERS;
/// Default marginal on fallback peers. This is the case, since a transaction is only requested
/// once from each individual peer.
///
/// Default is 1 peer.
pub const DEFAULT_MARGINAL_COUNT_FALLBACK_PEERS: u8 = 1;
/* ==================== CONCURRENCY ==================== */
/// Default maximum concurrent [`GetPooledTransactions`](reth_eth_wire::GetPooledTransactions)
/// requests.
///
/// Default is the product of [`DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS_PER_PEER`], which
/// defaults to 1 request, and the sum of [`DEFAULT_MAX_COUNT_PEERS_INBOUND`] and
/// [`DEFAULT_MAX_COUNT_PEERS_OUTBOUND`], which default to 30 and 100 peers respectively, so
/// 130 requests.
pub const DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS: u32 =
DEFAULT_MAX_COUNT_PEERS_INBOUND + DEFAULT_MAX_COUNT_PEERS_OUTBOUND;
/// Default maximum number of concurrent
/// [`GetPooledTransactions`](reth_eth_wire::GetPooledTransactions)s to allow per peer. This
/// number reflects concurrent requests for different hashes.
///
/// Default is 1 request.
pub const DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS_PER_PEER: u8 = 1;
/* =============== HASHES PENDING FETCH ================ */
/// Default limit for number of transactions waiting for an idle peer to be fetched from.
///
/// Default is 100 times the [`SOFT_LIMIT_COUNT_HASHES_IN_GET_POOLED_TRANSACTIONS_REQUEST`],
/// which defaults to 256 hashes, so 25 600 hashes.
pub const DEFAULT_MAX_CAPACITY_CACHE_PENDING_FETCH: u32 =
100 * SOFT_LIMIT_COUNT_HASHES_IN_GET_POOLED_TRANSACTIONS_REQUEST as u32;
/// Default max size for cache of inflight and pending transactions fetch.
///
/// Default is [`DEFAULT_MAX_CAPACITY_CACHE_PENDING_FETCH`] +
/// [`DEFAULT_MAX_COUNT_INFLIGHT_REQUESTS_ON_FETCH_PENDING_HASHES`], which is 25600 hashes and
/// 65 requests, so it is 25665 hashes.
pub const DEFAULT_MAX_CAPACITY_CACHE_INFLIGHT_AND_PENDING_FETCH: u32 =
DEFAULT_MAX_CAPACITY_CACHE_PENDING_FETCH +
DEFAULT_MAX_COUNT_INFLIGHT_REQUESTS_ON_FETCH_PENDING_HASHES as u32;
/// Default maximum number of hashes pending fetch to tolerate at any time.
///
/// Default is half of [`DEFAULT_MAX_CAPACITY_CACHE_PENDING_FETCH`], which defaults to 25 600
/// hashes, so 12 800 hashes.
pub const DEFAULT_MAX_COUNT_PENDING_FETCH: usize =
DEFAULT_MAX_CAPACITY_CACHE_PENDING_FETCH as usize / 2;
/* ====== LIMITED CAPACITY ON FETCH PENDING HASHES ====== */
/// Default budget for finding an idle fallback peer for any hash pending fetch, when said
/// search is budget constrained.
///
/// Default is a sixth of [`DEFAULT_MAX_COUNT_PENDING_FETCH`], which defaults to 12 800 hashes
/// (the ideal max number of hashes pending fetch), divided by
/// [`DEFAULT_MAX_COUNT_FALLBACK_PEERS`], which defaults to 3 peers (the depth of the search),
/// so a search breadth of 711 lru hashes in the pending hashes cache.
pub const DEFAULT_BUDGET_FIND_IDLE_FALLBACK_PEER: usize =
DEFAULT_MAX_COUNT_PENDING_FETCH / 6 / DEFAULT_MAX_COUNT_FALLBACK_PEERS as usize;
/// Default budget for finding hashes in the intersection of transactions announced by a peer
/// and in the cache of hashes pending fetch, when said search is budget constrained.
///
/// Default is an eight of [`DEFAULT_MAX_COUNT_PENDING_FETCH`], which defaults to 12 800 hashes
/// (the ideal max number of hashes pending fetch), so a search breadth of 1 600 lru hashes in
/// the pending hashes cache.
pub const DEFAULT_BUDGET_FIND_INTERSECTION_ANNOUNCED_BY_PEER_AND_PENDING_FETCH: usize =
DEFAULT_MAX_COUNT_PENDING_FETCH / 8;
/* ====== SCALARS FOR USE ON FETCH PENDING HASHES ====== */
/// Default soft limit for the number of hashes in a
/// [`GetPooledTransactions`](reth_eth_wire::GetPooledTransactions) request, when it is filled
/// from hashes pending fetch.
///
/// Default is half of the [`SOFT_LIMIT_COUNT_HASHES_IN_GET_POOLED_TRANSACTIONS_REQUEST`]
/// which by spec is 256 hashes, so 128 hashes.
pub const DEFAULT_SOFT_LIMIT_COUNT_HASHES_IN_GET_POOLED_TRANSACTIONS_REQUEST_ON_FETCH_PENDING_HASHES:
usize = SOFT_LIMIT_COUNT_HASHES_IN_GET_POOLED_TRANSACTIONS_REQUEST / 2;
/// Default soft limit for a [`PooledTransactions`](reth_eth_wire::PooledTransactions) response
/// when it's used as expected response in calibrating the filling of a
/// [`GetPooledTransactions`](reth_eth_wire::GetPooledTransactions) request, when the request
/// is filled from hashes pending fetch.
///
/// Default is half of
/// [`DEFAULT_SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESP_ON_PACK_GET_POOLED_TRANSACTIONS_REQ`],
/// which defaults to 128 KiB, so 64 KiB.
pub const DEFAULT_SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE_ON_FETCH_PENDING_HASHES:
usize =
DEFAULT_SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESP_ON_PACK_GET_POOLED_TRANSACTIONS_REQ /
2;
/// Default max inflight request when fetching pending hashes.
///
/// Default is half of [`DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS`], which defaults to 130
/// requests, so 65 requests.
pub const DEFAULT_MAX_COUNT_INFLIGHT_REQUESTS_ON_FETCH_PENDING_HASHES: usize =
DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS as usize / 2;
/// Default divisor of the max inflight request when calculating search breadth of the search
/// for any idle peer to which to send a request filled with hashes pending fetch. The max
/// inflight requests is configured in
/// [`TransactionFetcherInfo`](crate::transactions::fetcher::TransactionFetcherInfo).
///
/// Default is 3 requests.
pub const DEFAULT_DIVISOR_MAX_COUNT_INFLIGHT_REQUESTS_ON_FIND_IDLE_PEER: usize = 3;
/// Default divisor of the max inflight request when calculating search breadth of the search
/// for the intersection of hashes announced by a peer and hashes pending fetch. The max
/// inflight requests is configured in
/// [`TransactionFetcherInfo`](crate::transactions::fetcher::TransactionFetcherInfo).
///
/// Default is 3 requests.
pub const DEFAULT_DIVISOR_MAX_COUNT_INFLIGHT_REQUESTS_ON_FIND_INTERSECTION: usize = 3;
// Default divisor to the max pending pool imports when calculating search breadth of the
/// search for any idle peer to which to send a request filled with hashes pending fetch.
/// The max pending pool imports is configured in
/// [`PendingPoolImportsInfo`](crate::transactions::PendingPoolImportsInfo).
///
/// Default is 4 requests.
pub const DEFAULT_DIVISOR_MAX_COUNT_PENDING_POOL_IMPORTS_ON_FIND_IDLE_PEER: usize = 4;
/// Default divisor to the max pending pool imports when calculating search breadth of the
/// search for any idle peer to which to send a request filled with hashes pending fetch.
/// The max pending pool imports is configured in
/// [`PendingPoolImportsInfo`](crate::transactions::PendingPoolImportsInfo).
///
/// Default is 4 requests.
pub const DEFAULT_DIVISOR_MAX_COUNT_PENDING_POOL_IMPORTS_ON_FIND_INTERSECTION: usize = 4;
/* ================== ROUGH MEASURES ================== */
/// Average byte size of an encoded transaction.
///
/// Default is [`SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE`], which defaults to 2 MiB,
/// divided by [`SOFT_LIMIT_COUNT_HASHES_IN_NEW_POOLED_TRANSACTIONS_BROADCAST_MESSAGE`], which
/// is spec'd at 4096 hashes, so 521 bytes.
pub const AVERAGE_BYTE_SIZE_TX_ENCODED: usize =
SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE /
SOFT_LIMIT_COUNT_HASHES_IN_NEW_POOLED_TRANSACTIONS_BROADCAST_MESSAGE;
/// Median observed size in bytes of a small encoded legacy transaction.
///
/// Default is 120 bytes.
pub const MEDIAN_BYTE_SIZE_SMALL_LEGACY_TX_ENCODED: usize = 120;
/// Marginal on the number of hashes to preallocate memory for in a
/// [`GetPooledTransactions`](reth_eth_wire::GetPooledTransactions) request, when packed
/// according to the [`Eth68`](reth_eth_wire::EthVersion::Eth68) protocol version. To make
/// sure enough memory is preallocated in most cases, it's sensible to use a margin. This,
/// since the capacity is calculated based on median value
/// [`MEDIAN_BYTE_SIZE_SMALL_LEGACY_TX_ENCODED`]. There may otherwise be a noteworthy number of
/// cases where just 1 or 2 bytes too little memory is preallocated.
///
/// Default is 8 hashes.
pub const DEFAULT_MARGINAL_COUNT_HASHES_GET_POOLED_TRANSACTIONS_REQUEST: usize = 8;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network/src/fetch/client.rs | crates/net/network/src/fetch/client.rs | //! A client implementation that can interact with the network and download data.
use crate::{fetch::DownloadRequest, flattened_response::FlattenedResponse};
use alloy_primitives::B256;
use futures::{future, future::Either};
use reth_eth_wire::{EthNetworkPrimitives, NetworkPrimitives};
use reth_network_api::test_utils::PeersHandle;
use reth_network_p2p::{
bodies::client::{BodiesClient, BodiesFut},
download::DownloadClient,
error::{PeerRequestResult, RequestError},
headers::client::{HeadersClient, HeadersRequest},
priority::Priority,
BlockClient,
};
use reth_network_peers::PeerId;
use reth_network_types::ReputationChangeKind;
use std::{
ops::RangeInclusive,
sync::{
atomic::{AtomicUsize, Ordering},
Arc,
},
};
use tokio::sync::{mpsc::UnboundedSender, oneshot};
#[cfg_attr(doc, aquamarine::aquamarine)]
/// Front-end API for fetching data from the network.
///
/// Following diagram illustrates how a request, See [`HeadersClient::get_headers`] and
/// [`BodiesClient::get_block_bodies`] is handled internally.
///
/// include_mmd!("docs/mermaid/fetch-client.mmd")
#[derive(Debug, Clone)]
pub struct FetchClient<N: NetworkPrimitives = EthNetworkPrimitives> {
/// Sender half of the request channel.
pub(crate) request_tx: UnboundedSender<DownloadRequest<N>>,
/// The handle to the peers
pub(crate) peers_handle: PeersHandle,
/// Number of active peer sessions the node's currently handling.
pub(crate) num_active_peers: Arc<AtomicUsize>,
}
impl<N: NetworkPrimitives> DownloadClient for FetchClient<N> {
fn report_bad_message(&self, peer_id: PeerId) {
self.peers_handle.reputation_change(peer_id, ReputationChangeKind::BadMessage);
}
fn num_connected_peers(&self) -> usize {
self.num_active_peers.load(Ordering::Relaxed)
}
}
// The `Output` future of the [HeadersClient] impl of [FetchClient] that either returns a response
// or an error.
type HeadersClientFuture<T> = Either<FlattenedResponse<T>, future::Ready<T>>;
impl<N: NetworkPrimitives> HeadersClient for FetchClient<N> {
type Header = N::BlockHeader;
type Output = HeadersClientFuture<PeerRequestResult<Vec<N::BlockHeader>>>;
/// Sends a `GetBlockHeaders` request to an available peer.
fn get_headers_with_priority(
&self,
request: HeadersRequest,
priority: Priority,
) -> Self::Output {
let (response, rx) = oneshot::channel();
if self
.request_tx
.send(DownloadRequest::GetBlockHeaders { request, response, priority })
.is_ok()
{
Either::Left(FlattenedResponse::from(rx))
} else {
Either::Right(future::err(RequestError::ChannelClosed))
}
}
}
impl<N: NetworkPrimitives> BodiesClient for FetchClient<N> {
type Body = N::BlockBody;
type Output = BodiesFut<N::BlockBody>;
/// Sends a `GetBlockBodies` request to an available peer.
fn get_block_bodies_with_priority_and_range_hint(
&self,
request: Vec<B256>,
priority: Priority,
range_hint: Option<RangeInclusive<u64>>,
) -> Self::Output {
let (response, rx) = oneshot::channel();
if self
.request_tx
.send(DownloadRequest::GetBlockBodies { request, response, priority, range_hint })
.is_ok()
{
Box::pin(FlattenedResponse::from(rx))
} else {
Box::pin(future::err(RequestError::ChannelClosed))
}
}
}
impl<N: NetworkPrimitives> BlockClient for FetchClient<N> {
type Block = N::Block;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network/src/fetch/mod.rs | crates/net/network/src/fetch/mod.rs | //! Fetch data from the network.
mod client;
pub use client::FetchClient;
use crate::{message::BlockRequest, session::BlockRangeInfo};
use alloy_primitives::B256;
use futures::StreamExt;
use reth_eth_wire::{EthNetworkPrimitives, GetBlockBodies, GetBlockHeaders, NetworkPrimitives};
use reth_network_api::test_utils::PeersHandle;
use reth_network_p2p::{
error::{EthResponseValidator, PeerRequestResult, RequestError, RequestResult},
headers::client::HeadersRequest,
priority::Priority,
};
use reth_network_peers::PeerId;
use reth_network_types::ReputationChangeKind;
use std::{
collections::{HashMap, VecDeque},
ops::RangeInclusive,
sync::{
atomic::{AtomicU64, AtomicUsize, Ordering},
Arc,
},
task::{Context, Poll},
};
use tokio::sync::{mpsc, mpsc::UnboundedSender, oneshot};
use tokio_stream::wrappers::UnboundedReceiverStream;
type InflightHeadersRequest<H> = Request<HeadersRequest, PeerRequestResult<Vec<H>>>;
type InflightBodiesRequest<B> = Request<Vec<B256>, PeerRequestResult<Vec<B>>>;
/// Manages data fetching operations.
///
/// This type is hooked into the staged sync pipeline and delegates download request to available
/// peers and sends the response once ready.
///
/// This type maintains a list of connected peers that are available for requests.
#[derive(Debug)]
pub struct StateFetcher<N: NetworkPrimitives = EthNetworkPrimitives> {
/// Currently active [`GetBlockHeaders`] requests
inflight_headers_requests: HashMap<PeerId, InflightHeadersRequest<N::BlockHeader>>,
/// Currently active [`GetBlockBodies`] requests
inflight_bodies_requests: HashMap<PeerId, InflightBodiesRequest<N::BlockBody>>,
/// The list of _available_ peers for requests.
peers: HashMap<PeerId, Peer>,
/// The handle to the peers manager
peers_handle: PeersHandle,
/// Number of active peer sessions the node's currently handling.
num_active_peers: Arc<AtomicUsize>,
/// Requests queued for processing
queued_requests: VecDeque<DownloadRequest<N>>,
/// Receiver for new incoming download requests
download_requests_rx: UnboundedReceiverStream<DownloadRequest<N>>,
/// Sender for download requests, used to detach a [`FetchClient`]
download_requests_tx: UnboundedSender<DownloadRequest<N>>,
}
// === impl StateSyncer ===
impl<N: NetworkPrimitives> StateFetcher<N> {
pub(crate) fn new(peers_handle: PeersHandle, num_active_peers: Arc<AtomicUsize>) -> Self {
let (download_requests_tx, download_requests_rx) = mpsc::unbounded_channel();
Self {
inflight_headers_requests: Default::default(),
inflight_bodies_requests: Default::default(),
peers: Default::default(),
peers_handle,
num_active_peers,
queued_requests: Default::default(),
download_requests_rx: UnboundedReceiverStream::new(download_requests_rx),
download_requests_tx,
}
}
/// Invoked when connected to a new peer.
pub(crate) fn new_active_peer(
&mut self,
peer_id: PeerId,
best_hash: B256,
best_number: u64,
timeout: Arc<AtomicU64>,
range_info: Option<BlockRangeInfo>,
) {
self.peers.insert(
peer_id,
Peer {
state: PeerState::Idle,
best_hash,
best_number,
timeout,
last_response_likely_bad: false,
range_info,
},
);
}
/// Removes the peer from the peer list, after which it is no longer available for future
/// requests.
///
/// Invoked when an active session was closed.
///
/// This cancels also inflight request and sends an error to the receiver.
pub(crate) fn on_session_closed(&mut self, peer: &PeerId) {
self.peers.remove(peer);
if let Some(req) = self.inflight_headers_requests.remove(peer) {
let _ = req.response.send(Err(RequestError::ConnectionDropped));
}
if let Some(req) = self.inflight_bodies_requests.remove(peer) {
let _ = req.response.send(Err(RequestError::ConnectionDropped));
}
}
/// Updates the block information for the peer.
///
/// Returns `true` if this a newer block
pub(crate) fn update_peer_block(&mut self, peer_id: &PeerId, hash: B256, number: u64) -> bool {
if let Some(peer) = self.peers.get_mut(peer_id) {
if number > peer.best_number {
peer.best_hash = hash;
peer.best_number = number;
return true
}
}
false
}
/// Invoked when an active session is about to be disconnected.
pub(crate) fn on_pending_disconnect(&mut self, peer_id: &PeerId) {
if let Some(peer) = self.peers.get_mut(peer_id) {
peer.state = PeerState::Closing;
}
}
/// Returns the _next_ idle peer that's ready to accept a request,
/// prioritizing those with the lowest timeout/latency and those that recently responded with
/// adequate data.
fn next_best_peer(&self) -> Option<PeerId> {
let mut idle = self.peers.iter().filter(|(_, peer)| peer.state.is_idle());
let mut best_peer = idle.next()?;
for maybe_better in idle {
// replace best peer if our current best peer sent us a bad response last time
if best_peer.1.last_response_likely_bad && !maybe_better.1.last_response_likely_bad {
best_peer = maybe_better;
continue
}
// replace best peer if this peer has better rtt
if maybe_better.1.timeout() < best_peer.1.timeout() &&
!maybe_better.1.last_response_likely_bad
{
best_peer = maybe_better;
}
}
Some(*best_peer.0)
}
/// Returns the next action to return
fn poll_action(&mut self) -> PollAction {
// we only check and not pop here since we don't know yet whether a peer is available.
if self.queued_requests.is_empty() {
return PollAction::NoRequests
}
let Some(peer_id) = self.next_best_peer() else { return PollAction::NoPeersAvailable };
let request = self.queued_requests.pop_front().expect("not empty");
let request = self.prepare_block_request(peer_id, request);
PollAction::Ready(FetchAction::BlockRequest { peer_id, request })
}
/// Advance the state the syncer
pub(crate) fn poll(&mut self, cx: &mut Context<'_>) -> Poll<FetchAction> {
// drain buffered actions first
loop {
let no_peers_available = match self.poll_action() {
PollAction::Ready(action) => return Poll::Ready(action),
PollAction::NoRequests => false,
PollAction::NoPeersAvailable => true,
};
loop {
// poll incoming requests
match self.download_requests_rx.poll_next_unpin(cx) {
Poll::Ready(Some(request)) => match request.get_priority() {
Priority::High => {
// find the first normal request and queue before, add this request to
// the back of the high-priority queue
let pos = self
.queued_requests
.iter()
.position(|req| req.is_normal_priority())
.unwrap_or(0);
self.queued_requests.insert(pos, request);
}
Priority::Normal => {
self.queued_requests.push_back(request);
}
},
Poll::Ready(None) => {
unreachable!("channel can't close")
}
Poll::Pending => break,
}
}
if self.queued_requests.is_empty() || no_peers_available {
return Poll::Pending
}
}
}
/// Handles a new request to a peer.
///
/// Caution: this assumes the peer exists and is idle
fn prepare_block_request(&mut self, peer_id: PeerId, req: DownloadRequest<N>) -> BlockRequest {
// update the peer's state
if let Some(peer) = self.peers.get_mut(&peer_id) {
peer.state = req.peer_state();
}
match req {
DownloadRequest::GetBlockHeaders { request, response, .. } => {
let inflight = Request { request: request.clone(), response };
self.inflight_headers_requests.insert(peer_id, inflight);
let HeadersRequest { start, limit, direction } = request;
BlockRequest::GetBlockHeaders(GetBlockHeaders {
start_block: start,
limit,
skip: 0,
direction,
})
}
DownloadRequest::GetBlockBodies { request, response, .. } => {
let inflight = Request { request: request.clone(), response };
self.inflight_bodies_requests.insert(peer_id, inflight);
BlockRequest::GetBlockBodies(GetBlockBodies(request))
}
}
}
/// Returns a new followup request for the peer.
///
/// Caution: this expects that the peer is _not_ closed.
fn followup_request(&mut self, peer_id: PeerId) -> Option<BlockResponseOutcome> {
let req = self.queued_requests.pop_front()?;
let req = self.prepare_block_request(peer_id, req);
Some(BlockResponseOutcome::Request(peer_id, req))
}
/// Called on a `GetBlockHeaders` response from a peer.
///
/// This delegates the response and returns a [`BlockResponseOutcome`] to either queue in a
/// direct followup request or get the peer reported if the response was a
/// [`EthResponseValidator::reputation_change_err`]
pub(crate) fn on_block_headers_response(
&mut self,
peer_id: PeerId,
res: RequestResult<Vec<N::BlockHeader>>,
) -> Option<BlockResponseOutcome> {
let is_error = res.is_err();
let maybe_reputation_change = res.reputation_change_err();
let resp = self.inflight_headers_requests.remove(&peer_id);
let is_likely_bad_response =
resp.as_ref().is_some_and(|r| res.is_likely_bad_headers_response(&r.request));
if let Some(resp) = resp {
// delegate the response
let _ = resp.response.send(res.map(|h| (peer_id, h).into()));
}
if let Some(peer) = self.peers.get_mut(&peer_id) {
// update the peer's response state
peer.last_response_likely_bad = is_likely_bad_response;
// If the peer is still ready to accept new requests, we try to send a followup
// request immediately.
if peer.state.on_request_finished() && !is_error && !is_likely_bad_response {
return self.followup_request(peer_id)
}
}
// if the response was an `Err` worth reporting the peer for then we return a `BadResponse`
// outcome
maybe_reputation_change
.map(|reputation_change| BlockResponseOutcome::BadResponse(peer_id, reputation_change))
}
/// Called on a `GetBlockBodies` response from a peer
pub(crate) fn on_block_bodies_response(
&mut self,
peer_id: PeerId,
res: RequestResult<Vec<N::BlockBody>>,
) -> Option<BlockResponseOutcome> {
let is_likely_bad_response = res.as_ref().map_or(true, |bodies| bodies.is_empty());
if let Some(resp) = self.inflight_bodies_requests.remove(&peer_id) {
let _ = resp.response.send(res.map(|b| (peer_id, b).into()));
}
if let Some(peer) = self.peers.get_mut(&peer_id) {
// update the peer's response state
peer.last_response_likely_bad = is_likely_bad_response;
if peer.state.on_request_finished() && !is_likely_bad_response {
return self.followup_request(peer_id)
}
}
None
}
/// Returns a new [`FetchClient`] that can send requests to this type.
pub(crate) fn client(&self) -> FetchClient<N> {
FetchClient {
request_tx: self.download_requests_tx.clone(),
peers_handle: self.peers_handle.clone(),
num_active_peers: Arc::clone(&self.num_active_peers),
}
}
}
/// The outcome of [`StateFetcher::poll_action`]
enum PollAction {
Ready(FetchAction),
NoRequests,
NoPeersAvailable,
}
/// Represents a connected peer
#[derive(Debug)]
struct Peer {
/// The state this peer currently resides in.
state: PeerState,
/// Best known hash that the peer has
best_hash: B256,
/// Tracks the best number of the peer.
best_number: u64,
/// Tracks the current timeout value we use for the peer.
timeout: Arc<AtomicU64>,
/// Tracks whether the peer has recently responded with a likely bad response.
///
/// This is used to de-rank the peer if there are other peers available.
/// This exists because empty responses may not be penalized (e.g. when blocks near the tip are
/// downloaded), but we still want to avoid requesting from the same peer again if it has the
/// lowest timeout.
last_response_likely_bad: bool,
/// Tracks the range info for the peer.
#[allow(dead_code)]
range_info: Option<BlockRangeInfo>,
}
impl Peer {
fn timeout(&self) -> u64 {
self.timeout.load(Ordering::Relaxed)
}
}
/// Tracks the state of an individual peer
#[derive(Debug)]
enum PeerState {
/// Peer is currently not handling requests and is available.
Idle,
/// Peer is handling a `GetBlockHeaders` request.
GetBlockHeaders,
/// Peer is handling a `GetBlockBodies` request.
GetBlockBodies,
/// Peer session is about to close
Closing,
}
// === impl PeerState ===
impl PeerState {
/// Returns true if the peer is currently idle.
const fn is_idle(&self) -> bool {
matches!(self, Self::Idle)
}
/// Resets the state on a received response.
///
/// If the state was already marked as `Closing` do nothing.
///
/// Returns `true` if the peer is ready for another request.
const fn on_request_finished(&mut self) -> bool {
if !matches!(self, Self::Closing) {
*self = Self::Idle;
return true
}
false
}
}
/// A request that waits for a response from the network, so it can send it back through the
/// response channel.
#[derive(Debug)]
struct Request<Req, Resp> {
/// The issued request object
// TODO: this can be attached to the response in error case
request: Req,
response: oneshot::Sender<Resp>,
}
/// Requests that can be sent to the Syncer from a [`FetchClient`]
#[derive(Debug)]
pub(crate) enum DownloadRequest<N: NetworkPrimitives> {
/// Download the requested headers and send response through channel
GetBlockHeaders {
request: HeadersRequest,
response: oneshot::Sender<PeerRequestResult<Vec<N::BlockHeader>>>,
priority: Priority,
},
/// Download the requested headers and send response through channel
GetBlockBodies {
request: Vec<B256>,
response: oneshot::Sender<PeerRequestResult<Vec<N::BlockBody>>>,
priority: Priority,
#[allow(dead_code)]
range_hint: Option<RangeInclusive<u64>>,
},
}
// === impl DownloadRequest ===
impl<N: NetworkPrimitives> DownloadRequest<N> {
/// Returns the corresponding state for a peer that handles the request.
const fn peer_state(&self) -> PeerState {
match self {
Self::GetBlockHeaders { .. } => PeerState::GetBlockHeaders,
Self::GetBlockBodies { .. } => PeerState::GetBlockBodies,
}
}
/// Returns the requested priority of this request
const fn get_priority(&self) -> &Priority {
match self {
Self::GetBlockHeaders { priority, .. } | Self::GetBlockBodies { priority, .. } => {
priority
}
}
}
/// Returns `true` if this request is normal priority.
const fn is_normal_priority(&self) -> bool {
self.get_priority().is_normal()
}
}
/// An action the syncer can emit.
pub(crate) enum FetchAction {
/// Dispatch an eth request to the given peer.
BlockRequest {
/// The targeted recipient for the request
peer_id: PeerId,
/// The request to send
request: BlockRequest,
},
}
/// Outcome of a processed response.
///
/// Returned after processing a response.
#[derive(Debug, PartialEq, Eq)]
pub(crate) enum BlockResponseOutcome {
/// Continue with another request to the peer.
Request(PeerId, BlockRequest),
/// How to handle a bad response and the reputation change to apply, if any.
BadResponse(PeerId, ReputationChangeKind),
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{peers::PeersManager, PeersConfig};
use alloy_consensus::Header;
use alloy_primitives::B512;
use std::future::poll_fn;
#[tokio::test(flavor = "multi_thread")]
async fn test_poll_fetcher() {
let manager = PeersManager::new(PeersConfig::default());
let mut fetcher =
StateFetcher::<EthNetworkPrimitives>::new(manager.handle(), Default::default());
poll_fn(move |cx| {
assert!(fetcher.poll(cx).is_pending());
let (tx, _rx) = oneshot::channel();
fetcher.queued_requests.push_back(DownloadRequest::GetBlockBodies {
request: vec![],
response: tx,
priority: Priority::default(),
range_hint: None,
});
assert!(fetcher.poll(cx).is_pending());
Poll::Ready(())
})
.await;
}
#[tokio::test]
async fn test_peer_rotation() {
let manager = PeersManager::new(PeersConfig::default());
let mut fetcher =
StateFetcher::<EthNetworkPrimitives>::new(manager.handle(), Default::default());
// Add a few random peers
let peer1 = B512::random();
let peer2 = B512::random();
fetcher.new_active_peer(peer1, B256::random(), 1, Arc::new(AtomicU64::new(1)), None);
fetcher.new_active_peer(peer2, B256::random(), 2, Arc::new(AtomicU64::new(1)), None);
let first_peer = fetcher.next_best_peer().unwrap();
assert!(first_peer == peer1 || first_peer == peer2);
// Pending disconnect for first_peer
fetcher.on_pending_disconnect(&first_peer);
// first_peer now isn't idle, so we should get other peer
let second_peer = fetcher.next_best_peer().unwrap();
assert!(first_peer == peer1 || first_peer == peer2);
assert_ne!(first_peer, second_peer);
// without idle peers, returns None
fetcher.on_pending_disconnect(&second_peer);
assert_eq!(fetcher.next_best_peer(), None);
}
#[tokio::test]
async fn test_peer_prioritization() {
let manager = PeersManager::new(PeersConfig::default());
let mut fetcher =
StateFetcher::<EthNetworkPrimitives>::new(manager.handle(), Default::default());
// Add a few random peers
let peer1 = B512::random();
let peer2 = B512::random();
let peer3 = B512::random();
let peer2_timeout = Arc::new(AtomicU64::new(300));
fetcher.new_active_peer(peer1, B256::random(), 1, Arc::new(AtomicU64::new(30)), None);
fetcher.new_active_peer(peer2, B256::random(), 2, Arc::clone(&peer2_timeout), None);
fetcher.new_active_peer(peer3, B256::random(), 3, Arc::new(AtomicU64::new(50)), None);
// Must always get peer1 (lowest timeout)
assert_eq!(fetcher.next_best_peer(), Some(peer1));
assert_eq!(fetcher.next_best_peer(), Some(peer1));
// peer2's timeout changes below peer1's
peer2_timeout.store(10, Ordering::Relaxed);
// Then we get peer 2 always (now lowest)
assert_eq!(fetcher.next_best_peer(), Some(peer2));
assert_eq!(fetcher.next_best_peer(), Some(peer2));
}
#[tokio::test]
async fn test_on_block_headers_response() {
let manager = PeersManager::new(PeersConfig::default());
let mut fetcher =
StateFetcher::<EthNetworkPrimitives>::new(manager.handle(), Default::default());
let peer_id = B512::random();
assert_eq!(fetcher.on_block_headers_response(peer_id, Ok(vec![Header::default()])), None);
assert_eq!(
fetcher.on_block_headers_response(peer_id, Err(RequestError::Timeout)),
Some(BlockResponseOutcome::BadResponse(peer_id, ReputationChangeKind::Timeout))
);
assert_eq!(
fetcher.on_block_headers_response(peer_id, Err(RequestError::BadResponse)),
None
);
assert_eq!(
fetcher.on_block_headers_response(peer_id, Err(RequestError::ChannelClosed)),
None
);
assert_eq!(
fetcher.on_block_headers_response(peer_id, Err(RequestError::ConnectionDropped)),
None
);
assert_eq!(
fetcher.on_block_headers_response(peer_id, Err(RequestError::UnsupportedCapability)),
None
);
}
#[tokio::test]
async fn test_header_response_outcome() {
let manager = PeersManager::new(PeersConfig::default());
let mut fetcher =
StateFetcher::<EthNetworkPrimitives>::new(manager.handle(), Default::default());
let peer_id = B512::random();
let request_pair = || {
let (tx, _rx) = oneshot::channel();
let req = Request {
request: HeadersRequest {
start: 0u64.into(),
limit: 1,
direction: Default::default(),
},
response: tx,
};
let header = Header { number: 0, ..Default::default() };
(req, header)
};
fetcher.new_active_peer(
peer_id,
Default::default(),
Default::default(),
Default::default(),
None,
);
let (req, header) = request_pair();
fetcher.inflight_headers_requests.insert(peer_id, req);
let outcome = fetcher.on_block_headers_response(peer_id, Ok(vec![header]));
assert!(outcome.is_none());
assert!(fetcher.peers[&peer_id].state.is_idle());
let outcome =
fetcher.on_block_headers_response(peer_id, Err(RequestError::Timeout)).unwrap();
assert!(EthResponseValidator::reputation_change_err(&Err::<Vec<Header>, _>(
RequestError::Timeout
))
.is_some());
match outcome {
BlockResponseOutcome::BadResponse(peer, _) => {
assert_eq!(peer, peer_id)
}
BlockResponseOutcome::Request(_, _) => {
unreachable!()
}
};
assert!(fetcher.peers[&peer_id].state.is_idle());
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network/src/test_utils/transactions.rs | crates/net/network/src/test_utils/transactions.rs | //! Test helper impls for transactions
#![allow(dead_code)]
use crate::{
cache::LruCache,
transactions::{
constants::{
tx_fetcher::DEFAULT_MAX_COUNT_FALLBACK_PEERS,
tx_manager::DEFAULT_MAX_COUNT_TRANSACTIONS_SEEN_BY_PEER,
},
fetcher::{TransactionFetcher, TxFetchMetadata},
PeerMetadata, TransactionsManager,
},
NetworkConfigBuilder, NetworkManager,
};
use alloy_primitives::TxHash;
use reth_eth_wire::EthVersion;
use reth_eth_wire_types::EthNetworkPrimitives;
use reth_network_api::{PeerKind, PeerRequest, PeerRequestSender};
use reth_network_peers::PeerId;
use reth_storage_api::noop::NoopProvider;
use reth_transaction_pool::test_utils::{testing_pool, TestPool};
use secp256k1::SecretKey;
use std::sync::Arc;
use tokio::sync::mpsc;
use tracing::trace;
/// A new tx manager for testing.
pub async fn new_tx_manager(
) -> (TransactionsManager<TestPool, EthNetworkPrimitives>, NetworkManager<EthNetworkPrimitives>) {
let secret_key = SecretKey::new(&mut rand_08::thread_rng());
let client = NoopProvider::default();
let config = NetworkConfigBuilder::new(secret_key)
// let OS choose port
.listener_port(0)
.disable_discovery()
.build(client);
let pool = testing_pool();
let transactions_manager_config = config.transactions_manager_config.clone();
let (_network_handle, network, transactions, _) = NetworkManager::new(config)
.await
.unwrap()
.into_builder()
.transactions(pool.clone(), transactions_manager_config)
.split_with_handle();
(transactions, network)
}
/// Directly buffer hash into tx fetcher for testing.
pub fn buffer_hash_to_tx_fetcher(
tx_fetcher: &mut TransactionFetcher,
hash: TxHash,
peer_id: PeerId,
retries: u8,
tx_encoded_length: Option<usize>,
) {
match tx_fetcher.hashes_fetch_inflight_and_pending_fetch.get_or_insert(hash, || {
TxFetchMetadata::new(
retries,
LruCache::new(DEFAULT_MAX_COUNT_FALLBACK_PEERS as u32),
tx_encoded_length,
)
}) {
Some(metadata) => {
metadata.fallback_peers_mut().insert(peer_id);
}
None => {
trace!(target: "net::tx",
peer_id=format!("{peer_id:#}"),
%hash,
"failed to insert hash from peer in schnellru::LruMap, dropping hash"
)
}
}
tx_fetcher.hashes_pending_fetch.insert(hash);
}
/// Mock a new session, returns (peer, channel-to-send-get-pooled-tx-response-on).
pub fn new_mock_session(
peer_id: PeerId,
version: EthVersion,
) -> (PeerMetadata<EthNetworkPrimitives>, mpsc::Receiver<PeerRequest>) {
let (to_mock_session_tx, to_mock_session_rx) = mpsc::channel(1);
(
PeerMetadata::new(
PeerRequestSender::new(peer_id, to_mock_session_tx),
version,
Arc::from(""),
DEFAULT_MAX_COUNT_TRANSACTIONS_SEEN_BY_PEER,
PeerKind::Trusted,
),
to_mock_session_rx,
)
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network/src/test_utils/mod.rs | crates/net/network/src/test_utils/mod.rs | //! Common helpers for network testing.
mod init;
mod testnet;
pub mod transactions;
pub use init::{
enr_to_peer_id, unused_port, unused_tcp_addr, unused_tcp_and_udp_port, unused_tcp_udp,
unused_udp_addr, unused_udp_port,
};
pub use testnet::{NetworkEventStream, Peer, PeerConfig, PeerHandle, Testnet, TestnetHandle};
pub use transactions::{buffer_hash_to_tx_fetcher, new_mock_session, new_tx_manager};
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network/src/test_utils/init.rs | crates/net/network/src/test_utils/init.rs | use enr::{k256::ecdsa::SigningKey, Enr, EnrPublicKey};
use reth_network_peers::PeerId;
use std::net::SocketAddr;
/// Obtains a `PeerId` from an ENR. In this case, the `PeerId` represents the public key contained
/// in the ENR.
pub fn enr_to_peer_id(enr: Enr<SigningKey>) -> PeerId {
// In the following tests, methods which accept a public key expect it to contain the public
// key in its 64-byte encoded (uncompressed) form.
enr.public_key().encode_uncompressed().into()
}
// copied from ethers-rs
/// A bit of hack to find an unused TCP port.
///
/// Does not guarantee that the given port is unused after the function exits, just that it was
/// unused before the function started (i.e., it does not reserve a port).
pub fn unused_port() -> u16 {
unused_tcp_addr().port()
}
/// Finds an unused tcp address
pub fn unused_tcp_addr() -> SocketAddr {
let listener = std::net::TcpListener::bind("127.0.0.1:0")
.expect("Failed to create TCP listener to find unused port");
listener.local_addr().expect("Failed to read TCP listener local_addr to find unused port")
}
/// Finds an unused udp port
pub fn unused_udp_port() -> u16 {
unused_udp_addr().port()
}
/// Finds an unused udp address
pub fn unused_udp_addr() -> SocketAddr {
let udp_listener = std::net::UdpSocket::bind("127.0.0.1:0")
.expect("Failed to create UDP listener to find unused port");
udp_listener.local_addr().expect("Failed to read UDP listener local_addr to find unused port")
}
/// Finds a single port that is unused for both TCP and UDP.
pub fn unused_tcp_and_udp_port() -> u16 {
loop {
let port = unused_port();
if std::net::UdpSocket::bind(format!("127.0.0.1:{port}")).is_ok() {
return port
}
}
}
/// Creates two unused `SocketAddrs`, intended for use as the p2p (TCP) and discovery ports (UDP)
/// for new reth instances.
pub fn unused_tcp_udp() -> (SocketAddr, SocketAddr) {
(unused_tcp_addr(), unused_udp_addr())
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network/src/test_utils/testnet.rs | crates/net/network/src/test_utils/testnet.rs | //! A network implementation for testing purposes.
use crate::{
builder::ETH_REQUEST_CHANNEL_CAPACITY,
error::NetworkError,
eth_requests::EthRequestHandler,
protocol::IntoRlpxSubProtocol,
transactions::{
config::{StrictEthAnnouncementFilter, TransactionPropagationKind},
policy::NetworkPolicies,
TransactionsHandle, TransactionsManager, TransactionsManagerConfig,
},
NetworkConfig, NetworkConfigBuilder, NetworkHandle, NetworkManager,
};
use futures::{FutureExt, StreamExt};
use pin_project::pin_project;
use reth_chainspec::{ChainSpecProvider, EthereumHardforks, Hardforks};
use reth_eth_wire::{
protocol::Protocol, DisconnectReason, EthNetworkPrimitives, HelloMessageWithProtocols,
};
use reth_ethereum_primitives::{PooledTransactionVariant, TransactionSigned};
use reth_network_api::{
events::{PeerEvent, SessionInfo},
test_utils::{PeersHandle, PeersHandleProvider},
NetworkEvent, NetworkEventListenerProvider, NetworkInfo, Peers,
};
use reth_network_peers::PeerId;
use reth_storage_api::{
noop::NoopProvider, BlockReader, BlockReaderIdExt, HeaderProvider, StateProviderFactory,
};
use reth_tasks::TokioTaskExecutor;
use reth_tokio_util::EventStream;
use reth_transaction_pool::{
blobstore::InMemoryBlobStore,
test_utils::{TestPool, TestPoolBuilder},
EthTransactionPool, PoolTransaction, TransactionPool, TransactionValidationTaskExecutor,
};
use secp256k1::SecretKey;
use std::{
fmt,
future::Future,
net::{Ipv4Addr, SocketAddr, SocketAddrV4},
pin::Pin,
task::{Context, Poll},
};
use tokio::{
sync::{
mpsc::{channel, unbounded_channel},
oneshot,
},
task::JoinHandle,
};
/// A test network consisting of multiple peers.
pub struct Testnet<C, Pool> {
/// All running peers in the network.
peers: Vec<Peer<C, Pool>>,
}
// === impl Testnet ===
impl<C> Testnet<C, TestPool>
where
C: BlockReader + HeaderProvider + Clone + 'static + ChainSpecProvider<ChainSpec: Hardforks>,
{
/// Same as [`Self::try_create_with`] but panics on error
pub async fn create_with(num_peers: usize, provider: C) -> Self {
Self::try_create_with(num_peers, provider).await.unwrap()
}
/// Creates a new [`Testnet`] with the given number of peers and the provider.
pub async fn try_create_with(num_peers: usize, provider: C) -> Result<Self, NetworkError> {
let mut this = Self { peers: Vec::with_capacity(num_peers) };
for _ in 0..num_peers {
let config = PeerConfig::new(provider.clone());
this.add_peer_with_config(config).await?;
}
Ok(this)
}
/// Extend the list of peers with new peers that are configured with each of the given
/// [`PeerConfig`]s.
pub async fn extend_peer_with_config(
&mut self,
configs: impl IntoIterator<Item = PeerConfig<C>>,
) -> Result<(), NetworkError> {
let peers = configs.into_iter().map(|c| c.launch()).collect::<Vec<_>>();
let peers = futures::future::join_all(peers).await;
for peer in peers {
self.peers.push(peer?);
}
Ok(())
}
}
impl<C, Pool> Testnet<C, Pool>
where
C: BlockReader + HeaderProvider + Clone + 'static,
Pool: TransactionPool,
{
/// Return a mutable slice of all peers.
pub fn peers_mut(&mut self) -> &mut [Peer<C, Pool>] {
&mut self.peers
}
/// Return a slice of all peers.
pub fn peers(&self) -> &[Peer<C, Pool>] {
&self.peers
}
/// Remove a peer from the [`Testnet`] and return it.
///
/// # Panics
/// If the index is out of bounds.
pub fn remove_peer(&mut self, index: usize) -> Peer<C, Pool> {
self.peers.remove(index)
}
/// Return a mutable iterator over all peers.
pub fn peers_iter_mut(&mut self) -> impl Iterator<Item = &mut Peer<C, Pool>> + '_ {
self.peers.iter_mut()
}
/// Return an iterator over all peers.
pub fn peers_iter(&self) -> impl Iterator<Item = &Peer<C, Pool>> + '_ {
self.peers.iter()
}
/// Add a peer to the [`Testnet`] with the given [`PeerConfig`].
pub async fn add_peer_with_config(
&mut self,
config: PeerConfig<C>,
) -> Result<(), NetworkError> {
let PeerConfig { config, client, secret_key } = config;
let network = NetworkManager::new(config).await?;
let peer = Peer {
network,
client,
secret_key,
request_handler: None,
transactions_manager: None,
pool: None,
};
self.peers.push(peer);
Ok(())
}
/// Returns all handles to the networks
pub fn handles(&self) -> impl Iterator<Item = NetworkHandle<EthNetworkPrimitives>> + '_ {
self.peers.iter().map(|p| p.handle())
}
/// Maps the pool of each peer with the given closure
pub fn map_pool<F, P>(self, f: F) -> Testnet<C, P>
where
F: Fn(Peer<C, Pool>) -> Peer<C, P>,
P: TransactionPool,
{
Testnet { peers: self.peers.into_iter().map(f).collect() }
}
/// Apply a closure on each peer
pub fn for_each<F>(&self, f: F)
where
F: Fn(&Peer<C, Pool>),
{
self.peers.iter().for_each(f)
}
/// Apply a closure on each peer
pub fn for_each_mut<F>(&mut self, f: F)
where
F: FnMut(&mut Peer<C, Pool>),
{
self.peers.iter_mut().for_each(f)
}
}
impl<C, Pool> Testnet<C, Pool>
where
C: ChainSpecProvider<ChainSpec: EthereumHardforks>
+ StateProviderFactory
+ BlockReaderIdExt
+ HeaderProvider
+ Clone
+ 'static,
Pool: TransactionPool,
{
/// Installs an eth pool on each peer
pub fn with_eth_pool(self) -> Testnet<C, EthTransactionPool<C, InMemoryBlobStore>> {
self.map_pool(|peer| {
let blob_store = InMemoryBlobStore::default();
let pool = TransactionValidationTaskExecutor::eth(
peer.client.clone(),
blob_store.clone(),
TokioTaskExecutor::default(),
);
peer.map_transactions_manager(EthTransactionPool::eth_pool(
pool,
blob_store,
Default::default(),
))
})
}
/// Installs an eth pool on each peer with custom transaction manager config
pub fn with_eth_pool_config(
self,
tx_manager_config: TransactionsManagerConfig,
) -> Testnet<C, EthTransactionPool<C, InMemoryBlobStore>> {
self.with_eth_pool_config_and_policy(tx_manager_config, Default::default())
}
/// Installs an eth pool on each peer with custom transaction manager config and policy.
pub fn with_eth_pool_config_and_policy(
self,
tx_manager_config: TransactionsManagerConfig,
policy: TransactionPropagationKind,
) -> Testnet<C, EthTransactionPool<C, InMemoryBlobStore>> {
self.map_pool(|peer| {
let blob_store = InMemoryBlobStore::default();
let pool = TransactionValidationTaskExecutor::eth(
peer.client.clone(),
blob_store.clone(),
TokioTaskExecutor::default(),
);
peer.map_transactions_manager_with(
EthTransactionPool::eth_pool(pool, blob_store, Default::default()),
tx_manager_config.clone(),
policy,
)
})
}
}
impl<C, Pool> Testnet<C, Pool>
where
C: BlockReader<
Block = reth_ethereum_primitives::Block,
Receipt = reth_ethereum_primitives::Receipt,
Header = alloy_consensus::Header,
> + HeaderProvider
+ Clone
+ Unpin
+ 'static,
Pool: TransactionPool<
Transaction: PoolTransaction<
Consensus = TransactionSigned,
Pooled = PooledTransactionVariant,
>,
> + Unpin
+ 'static,
{
/// Spawns the testnet to a separate task
pub fn spawn(self) -> TestnetHandle<C, Pool> {
let (tx, rx) = oneshot::channel::<oneshot::Sender<Self>>();
let peers = self.peers.iter().map(|peer| peer.peer_handle()).collect::<Vec<_>>();
let mut net = self;
let handle = tokio::task::spawn(async move {
let mut tx = None;
tokio::select! {
_ = &mut net => {}
inc = rx => {
tx = inc.ok();
}
}
if let Some(tx) = tx {
let _ = tx.send(net);
}
});
TestnetHandle { _handle: handle, peers, terminate: tx }
}
}
impl Testnet<NoopProvider, TestPool> {
/// Same as [`Self::try_create`] but panics on error
pub async fn create(num_peers: usize) -> Self {
Self::try_create(num_peers).await.unwrap()
}
/// Creates a new [`Testnet`] with the given number of peers
pub async fn try_create(num_peers: usize) -> Result<Self, NetworkError> {
let mut this = Self::default();
this.extend_peer_with_config((0..num_peers).map(|_| Default::default())).await?;
Ok(this)
}
/// Add a peer to the [`Testnet`]
pub async fn add_peer(&mut self) -> Result<(), NetworkError> {
self.add_peer_with_config(Default::default()).await
}
}
impl<C, Pool> Default for Testnet<C, Pool> {
fn default() -> Self {
Self { peers: Vec::new() }
}
}
impl<C, Pool> fmt::Debug for Testnet<C, Pool> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Testnet {{}}").finish_non_exhaustive()
}
}
impl<C, Pool> Future for Testnet<C, Pool>
where
C: BlockReader<
Block = reth_ethereum_primitives::Block,
Receipt = reth_ethereum_primitives::Receipt,
Header = alloy_consensus::Header,
> + HeaderProvider
+ Unpin
+ 'static,
Pool: TransactionPool<
Transaction: PoolTransaction<
Consensus = TransactionSigned,
Pooled = PooledTransactionVariant,
>,
> + Unpin
+ 'static,
{
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.get_mut();
for peer in &mut this.peers {
let _ = peer.poll_unpin(cx);
}
Poll::Pending
}
}
/// A handle to a [`Testnet`] that can be shared.
#[derive(Debug)]
pub struct TestnetHandle<C, Pool> {
_handle: JoinHandle<()>,
peers: Vec<PeerHandle<Pool>>,
terminate: oneshot::Sender<oneshot::Sender<Testnet<C, Pool>>>,
}
// === impl TestnetHandle ===
impl<C, Pool> TestnetHandle<C, Pool> {
/// Terminates the task and returns the [`Testnet`] back.
pub async fn terminate(self) -> Testnet<C, Pool> {
let (tx, rx) = oneshot::channel();
self.terminate.send(tx).unwrap();
rx.await.unwrap()
}
/// Returns the [`PeerHandle`]s of this [`Testnet`].
pub fn peers(&self) -> &[PeerHandle<Pool>] {
&self.peers
}
/// Connects all peers with each other.
///
/// This establishes sessions concurrently between all peers.
///
/// Returns once all sessions are established.
pub async fn connect_peers(&self) {
if self.peers.len() < 2 {
return
}
// add an event stream for _each_ peer
let streams =
self.peers.iter().map(|handle| NetworkEventStream::new(handle.event_listener()));
// add all peers to each other
for (idx, handle) in self.peers.iter().enumerate().take(self.peers.len() - 1) {
for idx in (idx + 1)..self.peers.len() {
let neighbour = &self.peers[idx];
handle.network.add_peer(*neighbour.peer_id(), neighbour.local_addr());
}
}
// await all sessions to be established
let num_sessions_per_peer = self.peers.len() - 1;
let fut = streams.into_iter().map(|mut stream| async move {
stream.take_session_established(num_sessions_per_peer).await
});
futures::future::join_all(fut).await;
}
}
/// A peer in the [`Testnet`].
#[pin_project]
#[derive(Debug)]
pub struct Peer<C, Pool = TestPool> {
#[pin]
network: NetworkManager<EthNetworkPrimitives>,
#[pin]
request_handler: Option<EthRequestHandler<C, EthNetworkPrimitives>>,
#[pin]
transactions_manager: Option<
TransactionsManager<
Pool,
EthNetworkPrimitives,
NetworkPolicies<TransactionPropagationKind, StrictEthAnnouncementFilter>,
>,
>,
pool: Option<Pool>,
client: C,
secret_key: SecretKey,
}
// === impl Peer ===
impl<C, Pool> Peer<C, Pool>
where
C: BlockReader + HeaderProvider + Clone + 'static,
Pool: TransactionPool,
{
/// Returns the number of connected peers.
pub fn num_peers(&self) -> usize {
self.network.num_connected_peers()
}
/// Adds an additional protocol handler to the peer.
pub fn add_rlpx_sub_protocol(&mut self, protocol: impl IntoRlpxSubProtocol) {
self.network.add_rlpx_sub_protocol(protocol);
}
/// Returns a handle to the peer's network.
pub fn peer_handle(&self) -> PeerHandle<Pool> {
PeerHandle {
network: self.network.handle().clone(),
pool: self.pool.clone(),
transactions: self.transactions_manager.as_ref().map(|mgr| mgr.handle()),
}
}
/// The address that listens for incoming connections.
pub const fn local_addr(&self) -> SocketAddr {
self.network.local_addr()
}
/// The [`PeerId`] of this peer.
pub fn peer_id(&self) -> PeerId {
*self.network.peer_id()
}
/// Returns mutable access to the network.
pub const fn network_mut(&mut self) -> &mut NetworkManager<EthNetworkPrimitives> {
&mut self.network
}
/// Returns the [`NetworkHandle`] of this peer.
pub fn handle(&self) -> NetworkHandle<EthNetworkPrimitives> {
self.network.handle().clone()
}
/// Returns the [`TestPool`] of this peer.
pub const fn pool(&self) -> Option<&Pool> {
self.pool.as_ref()
}
/// Set a new request handler that's connected to the peer's network
pub fn install_request_handler(&mut self) {
let (tx, rx) = channel(ETH_REQUEST_CHANNEL_CAPACITY);
self.network.set_eth_request_handler(tx);
let peers = self.network.peers_handle();
let request_handler = EthRequestHandler::new(self.client.clone(), peers, rx);
self.request_handler = Some(request_handler);
}
/// Set a new transactions manager that's connected to the peer's network
pub fn install_transactions_manager(&mut self, pool: Pool) {
let (tx, rx) = unbounded_channel();
self.network.set_transactions(tx);
let transactions_manager = TransactionsManager::new(
self.handle(),
pool.clone(),
rx,
TransactionsManagerConfig::default(),
);
self.transactions_manager = Some(transactions_manager);
self.pool = Some(pool);
}
/// Set a new transactions manager that's connected to the peer's network
pub fn map_transactions_manager<P>(self, pool: P) -> Peer<C, P>
where
P: TransactionPool,
{
let Self { mut network, request_handler, client, secret_key, .. } = self;
let (tx, rx) = unbounded_channel();
network.set_transactions(tx);
let transactions_manager = TransactionsManager::new(
network.handle().clone(),
pool.clone(),
rx,
TransactionsManagerConfig::default(),
);
Peer {
network,
request_handler,
transactions_manager: Some(transactions_manager),
pool: Some(pool),
client,
secret_key,
}
}
/// Map transactions manager with custom config
pub fn map_transactions_manager_with_config<P>(
self,
pool: P,
config: TransactionsManagerConfig,
) -> Peer<C, P>
where
P: TransactionPool,
{
self.map_transactions_manager_with(pool, config, Default::default())
}
/// Map transactions manager with custom config and the given policy.
pub fn map_transactions_manager_with<P>(
self,
pool: P,
config: TransactionsManagerConfig,
policy: TransactionPropagationKind,
) -> Peer<C, P>
where
P: TransactionPool,
{
let Self { mut network, request_handler, client, secret_key, .. } = self;
let (tx, rx) = unbounded_channel();
network.set_transactions(tx);
let announcement_policy = StrictEthAnnouncementFilter::default();
let policies = NetworkPolicies::new(policy, announcement_policy);
let transactions_manager = TransactionsManager::with_policy(
network.handle().clone(),
pool.clone(),
rx,
config,
policies,
);
Peer {
network,
request_handler,
transactions_manager: Some(transactions_manager),
pool: Some(pool),
client,
secret_key,
}
}
}
impl<C> Peer<C>
where
C: BlockReader + HeaderProvider + Clone + 'static,
{
/// Installs a new [`TestPool`]
pub fn install_test_pool(&mut self) {
self.install_transactions_manager(TestPoolBuilder::default().into())
}
}
impl<C, Pool> Future for Peer<C, Pool>
where
C: BlockReader<
Block = reth_ethereum_primitives::Block,
Receipt = reth_ethereum_primitives::Receipt,
Header = alloy_consensus::Header,
> + HeaderProvider
+ Unpin
+ 'static,
Pool: TransactionPool<
Transaction: PoolTransaction<
Consensus = TransactionSigned,
Pooled = PooledTransactionVariant,
>,
> + Unpin
+ 'static,
{
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
if let Some(request) = this.request_handler.as_pin_mut() {
let _ = request.poll(cx);
}
if let Some(tx_manager) = this.transactions_manager.as_pin_mut() {
let _ = tx_manager.poll(cx);
}
this.network.poll(cx)
}
}
/// A helper config for setting up the reth networking stack.
#[derive(Debug)]
pub struct PeerConfig<C = NoopProvider> {
config: NetworkConfig<C>,
client: C,
secret_key: SecretKey,
}
/// A handle to a peer in the [`Testnet`].
#[derive(Debug)]
pub struct PeerHandle<Pool> {
network: NetworkHandle<EthNetworkPrimitives>,
transactions: Option<TransactionsHandle<EthNetworkPrimitives>>,
pool: Option<Pool>,
}
// === impl PeerHandle ===
impl<Pool> PeerHandle<Pool> {
/// Returns the [`PeerId`] used in the network.
pub fn peer_id(&self) -> &PeerId {
self.network.peer_id()
}
/// Returns the [`PeersHandle`] from the network.
pub fn peer_handle(&self) -> &PeersHandle {
self.network.peers_handle()
}
/// Returns the local socket as configured for the network.
pub fn local_addr(&self) -> SocketAddr {
self.network.local_addr()
}
/// Creates a new [`NetworkEvent`] listener channel.
pub fn event_listener(&self) -> EventStream<NetworkEvent> {
self.network.event_listener()
}
/// Returns the [`TransactionsHandle`] of this peer.
pub const fn transactions(&self) -> Option<&TransactionsHandle> {
self.transactions.as_ref()
}
/// Returns the [`TestPool`] of this peer.
pub const fn pool(&self) -> Option<&Pool> {
self.pool.as_ref()
}
/// Returns the [`NetworkHandle`] of this peer.
pub const fn network(&self) -> &NetworkHandle<EthNetworkPrimitives> {
&self.network
}
}
// === impl PeerConfig ===
impl<C> PeerConfig<C>
where
C: BlockReader + HeaderProvider + Clone + 'static,
{
/// Launches the network and returns the [Peer] that manages it
pub async fn launch(self) -> Result<Peer<C>, NetworkError> {
let Self { config, client, secret_key } = self;
let network = NetworkManager::new(config).await?;
let peer = Peer {
network,
client,
secret_key,
request_handler: None,
transactions_manager: None,
pool: None,
};
Ok(peer)
}
/// Initialize the network with a random secret key, allowing the devp2p and discovery to bind
/// to any available IP and port.
pub fn new(client: C) -> Self
where
C: ChainSpecProvider<ChainSpec: Hardforks>,
{
let secret_key = SecretKey::new(&mut rand_08::thread_rng());
let config = Self::network_config_builder(secret_key).build(client.clone());
Self { config, client, secret_key }
}
/// Initialize the network with a given secret key, allowing devp2p and discovery to bind any
/// available IP and port.
pub fn with_secret_key(client: C, secret_key: SecretKey) -> Self
where
C: ChainSpecProvider<ChainSpec: Hardforks>,
{
let config = Self::network_config_builder(secret_key).build(client.clone());
Self { config, client, secret_key }
}
/// Initialize the network with a given capabilities.
pub fn with_protocols(client: C, protocols: impl IntoIterator<Item = Protocol>) -> Self
where
C: ChainSpecProvider<ChainSpec: Hardforks>,
{
let secret_key = SecretKey::new(&mut rand_08::thread_rng());
let builder = Self::network_config_builder(secret_key);
let hello_message =
HelloMessageWithProtocols::builder(builder.get_peer_id()).protocols(protocols).build();
let config = builder.hello_message(hello_message).build(client.clone());
Self { config, client, secret_key }
}
fn network_config_builder(secret_key: SecretKey) -> NetworkConfigBuilder {
NetworkConfigBuilder::new(secret_key)
.listener_addr(SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, 0)))
.discovery_addr(SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, 0)))
.disable_dns_discovery()
.disable_discv4_discovery()
}
}
impl Default for PeerConfig {
fn default() -> Self {
Self::new(NoopProvider::default())
}
}
/// A helper type to await network events
///
/// This makes it easier to await established connections
#[derive(Debug)]
pub struct NetworkEventStream {
inner: EventStream<NetworkEvent>,
}
// === impl NetworkEventStream ===
impl NetworkEventStream {
/// Create a new [`NetworkEventStream`] from the given network event receiver stream.
pub const fn new(inner: EventStream<NetworkEvent>) -> Self {
Self { inner }
}
/// Awaits the next event for a session to be closed
pub async fn next_session_closed(&mut self) -> Option<(PeerId, Option<DisconnectReason>)> {
while let Some(ev) = self.inner.next().await {
if let NetworkEvent::Peer(PeerEvent::SessionClosed { peer_id, reason }) = ev {
return Some((peer_id, reason))
}
}
None
}
/// Awaits the next event for an established session
pub async fn next_session_established(&mut self) -> Option<PeerId> {
while let Some(ev) = self.inner.next().await {
match ev {
NetworkEvent::ActivePeerSession { info, .. } |
NetworkEvent::Peer(PeerEvent::SessionEstablished(info)) => {
return Some(info.peer_id)
}
_ => {}
}
}
None
}
/// Awaits the next `num` events for an established session
pub async fn take_session_established(&mut self, mut num: usize) -> Vec<PeerId> {
if num == 0 {
return Vec::new();
}
let mut peers = Vec::with_capacity(num);
while let Some(ev) = self.inner.next().await {
if let NetworkEvent::ActivePeerSession { info: SessionInfo { peer_id, .. }, .. } = ev {
peers.push(peer_id);
num -= 1;
if num == 0 {
return peers;
}
}
}
peers
}
/// Ensures that the first two events are a [`NetworkEvent::Peer`] and
/// [`PeerEvent::PeerAdded`][`NetworkEvent::ActivePeerSession`], returning the [`PeerId`] of the
/// established session.
pub async fn peer_added_and_established(&mut self) -> Option<PeerId> {
let peer_id = match self.inner.next().await {
Some(NetworkEvent::Peer(PeerEvent::PeerAdded(peer_id))) => peer_id,
_ => return None,
};
match self.inner.next().await {
Some(NetworkEvent::ActivePeerSession {
info: SessionInfo { peer_id: peer_id2, .. },
..
}) => {
debug_assert_eq!(
peer_id, peer_id2,
"PeerAdded peer_id {peer_id} does not match SessionEstablished peer_id {peer_id2}"
);
Some(peer_id)
}
_ => None,
}
}
/// Awaits the next event for a peer added.
pub async fn peer_added(&mut self) -> Option<PeerId> {
let peer_id = match self.inner.next().await {
Some(NetworkEvent::Peer(PeerEvent::PeerAdded(peer_id))) => peer_id,
_ => return None,
};
Some(peer_id)
}
/// Awaits the next event for a peer removed.
pub async fn peer_removed(&mut self) -> Option<PeerId> {
let peer_id = match self.inner.next().await {
Some(NetworkEvent::Peer(PeerEvent::PeerRemoved(peer_id))) => peer_id,
_ => return None,
};
Some(peer_id)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network/tests/it/transaction_hash_fetching.rs | crates/net/network/tests/it/transaction_hash_fetching.rs | use alloy_primitives::U256;
use reth_network::{
test_utils::Testnet,
transactions::{TransactionPropagationMode::Max, TransactionsManagerConfig},
};
use reth_provider::test_utils::{ExtendedAccount, MockEthProvider};
use reth_tracing::init_test_tracing;
use reth_transaction_pool::{test_utils::TransactionGenerator, PoolTransaction, TransactionPool};
use tokio::time::Duration;
#[tokio::test(flavor = "multi_thread")]
#[ignore]
async fn transaction_hash_fetching() {
init_test_tracing();
let mut config = TransactionsManagerConfig { propagation_mode: Max(0), ..Default::default() };
config.transaction_fetcher_config.max_inflight_requests = 1;
let provider = MockEthProvider::default();
let num_peers = 10;
let net = Testnet::create_with(num_peers, provider.clone()).await;
// install request handlers
let net = net.with_eth_pool_config(config);
let handle = net.spawn();
// connect all the peers first
handle.connect_peers().await;
let listening_peer = &handle.peers()[num_peers - 1];
let mut listening_peer_tx_listener =
listening_peer.pool().unwrap().pending_transactions_listener();
let num_tx_per_peer = 10;
// Generate transactions for peers
for i in 1..num_peers {
let peer = &handle.peers()[i];
let peer_pool = peer.pool().unwrap();
for _ in 0..num_tx_per_peer {
let mut tx_gen = TransactionGenerator::new(rand::rng());
let tx = tx_gen.gen_eip1559_pooled();
let sender = tx.sender();
provider.add_account(sender, ExtendedAccount::new(0, U256::from(100_000_000)));
peer_pool.add_external_transaction(tx).await.unwrap();
}
}
// Total expected transactions
let total_expected_tx = num_tx_per_peer * (num_peers - 1);
let mut received_tx = 0;
loop {
tokio::select! {
Some(_) = listening_peer_tx_listener.recv() => {
received_tx += 1;
if received_tx >= total_expected_tx {
break;
}
}
_ = tokio::time::sleep(Duration::from_secs(10)) => {
panic!("Timed out waiting for transactions. Received {received_tx}/{total_expected_tx}");
}
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network/tests/it/session.rs | crates/net/network/tests/it/session.rs | //! Session tests
use futures::StreamExt;
use reth_eth_wire::EthVersion;
use reth_network::{
test_utils::{NetworkEventStream, PeerConfig, Testnet},
NetworkEvent, NetworkEventListenerProvider,
};
use reth_network_api::{
events::{PeerEvent, SessionInfo},
NetworkInfo, Peers,
};
use reth_storage_api::noop::NoopProvider;
#[tokio::test(flavor = "multi_thread")]
async fn test_session_established_with_highest_version() {
reth_tracing::init_test_tracing();
let net = Testnet::create(2).await;
net.for_each(|peer| assert_eq!(0, peer.num_peers()));
let mut handles = net.handles();
let handle0 = handles.next().unwrap();
let handle1 = handles.next().unwrap();
drop(handles);
let handle = net.spawn();
let mut events = handle0.event_listener().take(2);
handle0.add_peer(*handle1.peer_id(), handle1.local_addr());
while let Some(event) = events.next().await {
match event {
NetworkEvent::Peer(PeerEvent::PeerAdded(peer_id)) => {
assert_eq!(handle1.peer_id(), &peer_id);
}
NetworkEvent::ActivePeerSession { info, .. } => {
let SessionInfo { peer_id, status, .. } = info;
assert_eq!(handle1.peer_id(), &peer_id);
assert_eq!(status.version, EthVersion::LATEST);
}
ev => {
panic!("unexpected event {ev:?}")
}
}
}
handle.terminate().await;
}
#[tokio::test(flavor = "multi_thread")]
async fn test_session_established_with_different_capability() {
reth_tracing::init_test_tracing();
let mut net = Testnet::create(1).await;
let p1 = PeerConfig::with_protocols(NoopProvider::default(), Some(EthVersion::Eth66.into()));
net.add_peer_with_config(p1).await.unwrap();
net.for_each(|peer| assert_eq!(0, peer.num_peers()));
let mut handles = net.handles();
let handle0 = handles.next().unwrap();
let handle1 = handles.next().unwrap();
drop(handles);
let handle = net.spawn();
let mut events = handle0.event_listener().take(2);
handle0.add_peer(*handle1.peer_id(), handle1.local_addr());
while let Some(event) = events.next().await {
match event {
NetworkEvent::Peer(PeerEvent::PeerAdded(peer_id)) => {
assert_eq!(handle1.peer_id(), &peer_id);
}
NetworkEvent::ActivePeerSession { info, .. } => {
let SessionInfo { peer_id, status, .. } = info;
assert_eq!(handle1.peer_id(), &peer_id);
assert_eq!(status.version, EthVersion::Eth66);
}
ev => {
panic!("unexpected event: {ev:?}")
}
}
}
handle.terminate().await;
}
#[tokio::test(flavor = "multi_thread")]
async fn test_capability_version_mismatch() {
reth_tracing::init_test_tracing();
let mut net = Testnet::create(0).await;
let p0 = PeerConfig::with_protocols(NoopProvider::default(), Some(EthVersion::Eth66.into()));
net.add_peer_with_config(p0).await.unwrap();
let p1 = PeerConfig::with_protocols(NoopProvider::default(), Some(EthVersion::Eth67.into()));
net.add_peer_with_config(p1).await.unwrap();
net.for_each(|peer| assert_eq!(0, peer.num_peers()));
let mut handles = net.handles();
let handle0 = handles.next().unwrap();
let handle1 = handles.next().unwrap();
drop(handles);
let handle = net.spawn();
let events = handle0.event_listener();
let mut event_stream = NetworkEventStream::new(events);
handle0.add_peer(*handle1.peer_id(), handle1.local_addr());
let added_peer_id = event_stream.peer_added().await.unwrap();
assert_eq!(added_peer_id, *handle1.peer_id());
// peer with mismatched capability version should fail to connect and be removed.
let removed_peer_id = event_stream.peer_removed().await.unwrap();
assert_eq!(removed_peer_id, *handle1.peer_id());
handle.terminate().await;
}
#[tokio::test(flavor = "multi_thread")]
async fn test_eth69_peers_can_connect() {
reth_tracing::init_test_tracing();
let mut net = Testnet::create(0).await;
// Create two peers that only support ETH69
let p0 = PeerConfig::with_protocols(NoopProvider::default(), Some(EthVersion::Eth69.into()));
net.add_peer_with_config(p0).await.unwrap();
let p1 = PeerConfig::with_protocols(NoopProvider::default(), Some(EthVersion::Eth69.into()));
net.add_peer_with_config(p1).await.unwrap();
net.for_each(|peer| assert_eq!(0, peer.num_peers()));
let mut handles = net.handles();
let handle0 = handles.next().unwrap();
let handle1 = handles.next().unwrap();
drop(handles);
let handle = net.spawn();
let mut events = handle0.event_listener().take(2);
handle0.add_peer(*handle1.peer_id(), handle1.local_addr());
while let Some(event) = events.next().await {
match event {
NetworkEvent::Peer(PeerEvent::PeerAdded(peer_id)) => {
assert_eq!(handle1.peer_id(), &peer_id);
}
NetworkEvent::ActivePeerSession { info, .. } => {
let SessionInfo { peer_id, status, .. } = info;
assert_eq!(handle1.peer_id(), &peer_id);
// Both peers support only ETH69, so they should connect with ETH69
assert_eq!(status.version, EthVersion::Eth69);
}
ev => {
panic!("unexpected event: {ev:?}")
}
}
}
handle.terminate().await;
}
#[tokio::test(flavor = "multi_thread")]
async fn test_peers_negotiate_highest_version_eth69() {
reth_tracing::init_test_tracing();
let mut net = Testnet::create(0).await;
// Create one peer with multiple ETH versions including ETH69
let p0 = PeerConfig::with_protocols(
NoopProvider::default(),
vec![
EthVersion::Eth69.into(),
EthVersion::Eth68.into(),
EthVersion::Eth67.into(),
EthVersion::Eth66.into(),
],
);
net.add_peer_with_config(p0).await.unwrap();
// Create another peer with multiple ETH versions including ETH69
let p1 = PeerConfig::with_protocols(
NoopProvider::default(),
vec![EthVersion::Eth69.into(), EthVersion::Eth68.into(), EthVersion::Eth67.into()],
);
net.add_peer_with_config(p1).await.unwrap();
net.for_each(|peer| assert_eq!(0, peer.num_peers()));
let mut handles = net.handles();
let handle0 = handles.next().unwrap();
let handle1 = handles.next().unwrap();
drop(handles);
let handle = net.spawn();
let mut events = handle0.event_listener().take(2);
handle0.add_peer(*handle1.peer_id(), handle1.local_addr());
while let Some(event) = events.next().await {
match event {
NetworkEvent::Peer(PeerEvent::PeerAdded(peer_id)) => {
assert_eq!(handle1.peer_id(), &peer_id);
}
NetworkEvent::ActivePeerSession { info, .. } => {
let SessionInfo { peer_id, status, .. } = info;
assert_eq!(handle1.peer_id(), &peer_id);
// Both peers support ETH69, so they should negotiate to the highest version: ETH69
assert_eq!(status.version, EthVersion::Eth69);
}
ev => {
panic!("unexpected event: {ev:?}")
}
}
}
handle.terminate().await;
}
#[tokio::test(flavor = "multi_thread")]
async fn test_eth69_vs_eth68_incompatible() {
reth_tracing::init_test_tracing();
let mut net = Testnet::create(0).await;
// Create one peer that only supports ETH69
let p0 = PeerConfig::with_protocols(NoopProvider::default(), Some(EthVersion::Eth69.into()));
net.add_peer_with_config(p0).await.unwrap();
// Create another peer that only supports ETH68
let p1 = PeerConfig::with_protocols(NoopProvider::default(), Some(EthVersion::Eth68.into()));
net.add_peer_with_config(p1).await.unwrap();
net.for_each(|peer| assert_eq!(0, peer.num_peers()));
let mut handles = net.handles();
let handle0 = handles.next().unwrap();
let handle1 = handles.next().unwrap();
drop(handles);
let handle = net.spawn();
let events = handle0.event_listener();
let mut event_stream = NetworkEventStream::new(events);
handle0.add_peer(*handle1.peer_id(), handle1.local_addr());
let added_peer_id = event_stream.peer_added().await.unwrap();
assert_eq!(added_peer_id, *handle1.peer_id());
// Peers with no shared ETH version should fail to connect and be removed.
let removed_peer_id = event_stream.peer_removed().await.unwrap();
assert_eq!(removed_peer_id, *handle1.peer_id());
handle.terminate().await;
}
#[tokio::test(flavor = "multi_thread")]
async fn test_eth69_mixed_version_negotiation() {
reth_tracing::init_test_tracing();
let mut net = Testnet::create(0).await;
// Create one peer that supports ETH69 + ETH68
let p0 = PeerConfig::with_protocols(
NoopProvider::default(),
vec![EthVersion::Eth69.into(), EthVersion::Eth68.into()],
);
net.add_peer_with_config(p0).await.unwrap();
// Create another peer that only supports ETH68
let p1 = PeerConfig::with_protocols(NoopProvider::default(), Some(EthVersion::Eth68.into()));
net.add_peer_with_config(p1).await.unwrap();
net.for_each(|peer| assert_eq!(0, peer.num_peers()));
let mut handles = net.handles();
let handle0 = handles.next().unwrap();
let handle1 = handles.next().unwrap();
drop(handles);
let handle = net.spawn();
let mut events = handle0.event_listener().take(2);
handle0.add_peer(*handle1.peer_id(), handle1.local_addr());
while let Some(event) = events.next().await {
match event {
NetworkEvent::Peer(PeerEvent::PeerAdded(peer_id)) => {
assert_eq!(handle1.peer_id(), &peer_id);
}
NetworkEvent::ActivePeerSession { info, .. } => {
let SessionInfo { peer_id, status, .. } = info;
assert_eq!(handle1.peer_id(), &peer_id);
// Should negotiate to ETH68 (highest common version)
assert_eq!(status.version, EthVersion::Eth68);
}
ev => {
panic!("unexpected event: {ev:?}")
}
}
}
handle.terminate().await;
}
#[tokio::test(flavor = "multi_thread")]
async fn test_multiple_peers_different_eth_versions() {
reth_tracing::init_test_tracing();
let mut net = Testnet::create(0).await;
// Create a peer that supports all versions (ETH66-ETH69)
let p0 = PeerConfig::with_protocols(
NoopProvider::default(),
vec![
EthVersion::Eth69.into(),
EthVersion::Eth68.into(),
EthVersion::Eth67.into(),
EthVersion::Eth66.into(),
],
);
net.add_peer_with_config(p0).await.unwrap();
// Create a peer that only supports newer versions (ETH68-ETH69)
let p1 = PeerConfig::with_protocols(
NoopProvider::default(),
vec![EthVersion::Eth69.into(), EthVersion::Eth68.into()],
);
net.add_peer_with_config(p1).await.unwrap();
// Create a peer that only supports older versions (ETH66-ETH67)
let p2 = PeerConfig::with_protocols(
NoopProvider::default(),
vec![EthVersion::Eth67.into(), EthVersion::Eth66.into()],
);
net.add_peer_with_config(p2).await.unwrap();
net.for_each(|peer| assert_eq!(0, peer.num_peers()));
let mut handles = net.handles();
let handle0 = handles.next().unwrap(); // All versions peer
let handle1 = handles.next().unwrap(); // Newer versions peer
let handle2 = handles.next().unwrap(); // Older versions peer
drop(handles);
let handle = net.spawn();
let events = handle0.event_listener();
let mut event_stream = NetworkEventStream::new(events);
// Connect peer0 (all versions) to peer1 (newer versions) - should negotiate ETH69
handle0.add_peer(*handle1.peer_id(), handle1.local_addr());
let added_peer_id = event_stream.peer_added().await.unwrap();
assert_eq!(added_peer_id, *handle1.peer_id());
let established_peer_id = event_stream.next_session_established().await.unwrap();
assert_eq!(established_peer_id, *handle1.peer_id());
// Connect peer0 (all versions) to peer2 (older versions) - should negotiate ETH67
handle0.add_peer(*handle2.peer_id(), handle2.local_addr());
let added_peer_id = event_stream.peer_added().await.unwrap();
assert_eq!(added_peer_id, *handle2.peer_id());
let established_peer_id = event_stream.next_session_established().await.unwrap();
assert_eq!(established_peer_id, *handle2.peer_id());
// Both connections should be established successfully
handle.terminate().await;
}
#[tokio::test(flavor = "multi_thread")]
async fn test_eth69_capability_negotiation_fallback() {
reth_tracing::init_test_tracing();
let mut net = Testnet::create(0).await;
// Create a peer that prefers ETH69 but supports fallback to ETH67
let p0 = PeerConfig::with_protocols(
NoopProvider::default(),
vec![EthVersion::Eth69.into(), EthVersion::Eth67.into()],
);
net.add_peer_with_config(p0).await.unwrap();
// Create a peer that skips ETH68 and only supports ETH67/ETH66
let p1 = PeerConfig::with_protocols(
NoopProvider::default(),
vec![EthVersion::Eth67.into(), EthVersion::Eth66.into()],
);
net.add_peer_with_config(p1).await.unwrap();
net.for_each(|peer| assert_eq!(0, peer.num_peers()));
let mut handles = net.handles();
let handle0 = handles.next().unwrap();
let handle1 = handles.next().unwrap();
drop(handles);
let handle = net.spawn();
let mut events = handle0.event_listener().take(2);
handle0.add_peer(*handle1.peer_id(), handle1.local_addr());
while let Some(event) = events.next().await {
match event {
NetworkEvent::Peer(PeerEvent::PeerAdded(peer_id)) => {
assert_eq!(handle1.peer_id(), &peer_id);
}
NetworkEvent::ActivePeerSession { info, .. } => {
let SessionInfo { peer_id, status, .. } = info;
assert_eq!(handle1.peer_id(), &peer_id);
// Should fallback to ETH67 (skipping ETH68 which neither supports)
assert_eq!(status.version, EthVersion::Eth67);
}
ev => {
panic!("unexpected event: {ev:?}")
}
}
}
handle.terminate().await;
}
#[tokio::test(flavor = "multi_thread")]
async fn test_overlapping_version_sets_negotiation() {
reth_tracing::init_test_tracing();
let mut net = Testnet::create(0).await;
// Peer 0: supports ETH69, ETH67, ETH66 (skips ETH68)
let p0 = PeerConfig::with_protocols(
NoopProvider::default(),
vec![EthVersion::Eth69.into(), EthVersion::Eth67.into(), EthVersion::Eth66.into()],
);
net.add_peer_with_config(p0).await.unwrap();
// Peer 1: supports ETH68, ETH67, ETH66 (skips ETH69)
let p1 = PeerConfig::with_protocols(
NoopProvider::default(),
vec![EthVersion::Eth68.into(), EthVersion::Eth67.into(), EthVersion::Eth66.into()],
);
net.add_peer_with_config(p1).await.unwrap();
net.for_each(|peer| assert_eq!(0, peer.num_peers()));
let mut handles = net.handles();
let handle0 = handles.next().unwrap();
let handle1 = handles.next().unwrap();
drop(handles);
let handle = net.spawn();
let mut events = handle0.event_listener().take(2);
handle0.add_peer(*handle1.peer_id(), handle1.local_addr());
while let Some(event) = events.next().await {
match event {
NetworkEvent::Peer(PeerEvent::PeerAdded(peer_id)) => {
assert_eq!(handle1.peer_id(), &peer_id);
}
NetworkEvent::ActivePeerSession { info, .. } => {
let SessionInfo { peer_id, status, .. } = info;
assert_eq!(handle1.peer_id(), &peer_id);
// Should negotiate to ETH67 (highest common version between ETH69,67,66 and
// ETH68,67,66)
assert_eq!(status.version, EthVersion::Eth67);
}
ev => {
panic!("unexpected event: {ev:?}")
}
}
}
handle.terminate().await;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network/tests/it/multiplex.rs | crates/net/network/tests/it/multiplex.rs | #![allow(unreachable_pub)]
//! Testing gossiping of transactions.
use std::{
net::SocketAddr,
pin::Pin,
task::{ready, Context, Poll},
};
use alloy_primitives::bytes::BytesMut;
use futures::{Stream, StreamExt};
use reth_eth_wire::{
capability::SharedCapabilities, multiplex::ProtocolConnection, protocol::Protocol,
};
use reth_network::{
protocol::{ConnectionHandler, OnNotSupported, ProtocolHandler},
test_utils::{NetworkEventStream, Testnet},
NetworkConfigBuilder, NetworkEventListenerProvider, NetworkManager,
};
use reth_network_api::{Direction, NetworkInfo, PeerId, Peers};
use reth_provider::{noop::NoopProvider, test_utils::MockEthProvider};
use secp256k1::SecretKey;
use tokio::sync::{mpsc, oneshot};
use tokio_stream::wrappers::UnboundedReceiverStream;
use crate::multiplex::proto::{PingPongProtoMessage, PingPongProtoMessageKind};
/// A simple Rlpx subprotocol that sends pings and pongs
mod proto {
use super::*;
use alloy_primitives::bytes::{Buf, BufMut};
use reth_eth_wire::Capability;
#[repr(u8)]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum PingPongProtoMessageId {
Ping = 0x00,
Pong = 0x01,
PingMessage = 0x02,
PongMessage = 0x03,
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum PingPongProtoMessageKind {
Ping,
Pong,
PingMessage(String),
PongMessage(String),
}
/// A protocol message, containing a message ID and payload.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct PingPongProtoMessage {
pub message_type: PingPongProtoMessageId,
pub message: PingPongProtoMessageKind,
}
impl PingPongProtoMessage {
/// Returns the capability for the `ping` protocol.
pub const fn capability() -> Capability {
Capability::new_static("ping", 1)
}
/// Returns the protocol for the `test` protocol.
pub const fn protocol() -> Protocol {
Protocol::new(Self::capability(), 4)
}
/// Creates a ping message
pub const fn ping() -> Self {
Self {
message_type: PingPongProtoMessageId::Ping,
message: PingPongProtoMessageKind::Ping,
}
}
/// Creates a pong message
pub const fn pong() -> Self {
Self {
message_type: PingPongProtoMessageId::Pong,
message: PingPongProtoMessageKind::Pong,
}
}
/// Creates a ping message
pub fn ping_message(msg: impl Into<String>) -> Self {
Self {
message_type: PingPongProtoMessageId::PingMessage,
message: PingPongProtoMessageKind::PingMessage(msg.into()),
}
}
/// Creates a ping message
pub fn pong_message(msg: impl Into<String>) -> Self {
Self {
message_type: PingPongProtoMessageId::PongMessage,
message: PingPongProtoMessageKind::PongMessage(msg.into()),
}
}
/// Creates a new `TestProtoMessage` with the given message ID and payload.
pub fn encoded(&self) -> BytesMut {
let mut buf = BytesMut::new();
buf.put_u8(self.message_type as u8);
match &self.message {
PingPongProtoMessageKind::Ping | PingPongProtoMessageKind::Pong => {}
PingPongProtoMessageKind::PingMessage(msg) |
PingPongProtoMessageKind::PongMessage(msg) => {
buf.put(msg.as_bytes());
}
}
buf
}
/// Decodes a `TestProtoMessage` from the given message buffer.
pub fn decode_message(buf: &mut &[u8]) -> Option<Self> {
if buf.is_empty() {
return None
}
let id = buf[0];
buf.advance(1);
let message_type = match id {
0x00 => PingPongProtoMessageId::Ping,
0x01 => PingPongProtoMessageId::Pong,
0x02 => PingPongProtoMessageId::PingMessage,
0x03 => PingPongProtoMessageId::PongMessage,
_ => return None,
};
let message = match message_type {
PingPongProtoMessageId::Ping => PingPongProtoMessageKind::Ping,
PingPongProtoMessageId::Pong => PingPongProtoMessageKind::Pong,
PingPongProtoMessageId::PingMessage => PingPongProtoMessageKind::PingMessage(
String::from_utf8_lossy(&buf[..]).into_owned(),
),
PingPongProtoMessageId::PongMessage => PingPongProtoMessageKind::PongMessage(
String::from_utf8_lossy(&buf[..]).into_owned(),
),
};
Some(Self { message_type, message })
}
}
}
#[derive(Debug)]
struct PingPongProtoHandler {
state: ProtocolState,
}
impl ProtocolHandler for PingPongProtoHandler {
type ConnectionHandler = PingPongConnectionHandler;
fn on_incoming(&self, _socket_addr: SocketAddr) -> Option<Self::ConnectionHandler> {
Some(PingPongConnectionHandler { state: self.state.clone() })
}
fn on_outgoing(
&self,
_socket_addr: SocketAddr,
_peer_id: PeerId,
) -> Option<Self::ConnectionHandler> {
Some(PingPongConnectionHandler { state: self.state.clone() })
}
}
#[derive(Clone, Debug)]
struct ProtocolState {
events: mpsc::UnboundedSender<ProtocolEvent>,
}
#[derive(Debug)]
enum ProtocolEvent {
Established {
#[expect(dead_code)]
direction: Direction,
peer_id: PeerId,
to_connection: mpsc::UnboundedSender<Command>,
},
}
enum Command {
/// Send a ping message to the peer.
PingMessage {
msg: String,
/// The response will be sent to this channel.
response: oneshot::Sender<String>,
},
}
struct PingPongConnectionHandler {
state: ProtocolState,
}
impl ConnectionHandler for PingPongConnectionHandler {
type Connection = PingPongProtoConnection;
fn protocol(&self) -> Protocol {
PingPongProtoMessage::protocol()
}
fn on_unsupported_by_peer(
self,
_supported: &SharedCapabilities,
_direction: Direction,
_peer_id: PeerId,
) -> OnNotSupported {
OnNotSupported::Disconnect
}
fn into_connection(
self,
direction: Direction,
_peer_id: PeerId,
conn: ProtocolConnection,
) -> Self::Connection {
let (tx, rx) = mpsc::unbounded_channel();
self.state
.events
.send(ProtocolEvent::Established { direction, peer_id: _peer_id, to_connection: tx })
.ok();
PingPongProtoConnection {
conn,
initial_ping: direction.is_outgoing().then(PingPongProtoMessage::ping),
commands: UnboundedReceiverStream::new(rx),
pending_pong: None,
}
}
}
struct PingPongProtoConnection {
conn: ProtocolConnection,
initial_ping: Option<PingPongProtoMessage>,
commands: UnboundedReceiverStream<Command>,
pending_pong: Option<oneshot::Sender<String>>,
}
impl Stream for PingPongProtoConnection {
type Item = BytesMut;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = self.get_mut();
if let Some(initial_ping) = this.initial_ping.take() {
return Poll::Ready(Some(initial_ping.encoded()))
}
loop {
if let Poll::Ready(Some(cmd)) = this.commands.poll_next_unpin(cx) {
return match cmd {
Command::PingMessage { msg, response } => {
this.pending_pong = Some(response);
Poll::Ready(Some(PingPongProtoMessage::ping_message(msg).encoded()))
}
}
}
let Some(msg) = ready!(this.conn.poll_next_unpin(cx)) else { return Poll::Ready(None) };
let Some(msg) = PingPongProtoMessage::decode_message(&mut &msg[..]) else {
return Poll::Ready(None)
};
match msg.message {
PingPongProtoMessageKind::Ping => {
return Poll::Ready(Some(PingPongProtoMessage::pong().encoded()))
}
PingPongProtoMessageKind::Pong => {}
PingPongProtoMessageKind::PingMessage(msg) => {
return Poll::Ready(Some(PingPongProtoMessage::pong_message(msg).encoded()))
}
PingPongProtoMessageKind::PongMessage(msg) => {
if let Some(sender) = this.pending_pong.take() {
sender.send(msg).ok();
}
continue
}
}
return Poll::Pending
}
}
}
#[tokio::test(flavor = "multi_thread")]
async fn test_connect_to_non_multiplex_peer() {
reth_tracing::init_test_tracing();
let net = Testnet::create(1).await;
let secret_key = SecretKey::new(&mut rand_08::thread_rng());
let config = NetworkConfigBuilder::eth(secret_key)
.listener_port(0)
.disable_discovery()
.build(NoopProvider::default());
let mut network = NetworkManager::new(config).await.unwrap();
let (tx, _) = mpsc::unbounded_channel();
network.add_rlpx_sub_protocol(PingPongProtoHandler { state: ProtocolState { events: tx } });
let handle = network.handle().clone();
tokio::task::spawn(network);
// create networkeventstream to get the next session event easily.
let events = handle.event_listener();
let mut event_stream = NetworkEventStream::new(events);
let mut handles = net.handles();
let handle0 = handles.next().unwrap();
drop(handles);
let _handle = net.spawn();
handle.add_peer(*handle0.peer_id(), handle0.local_addr());
let added_peer_id = event_stream.peer_added().await.unwrap();
assert_eq!(added_peer_id, *handle0.peer_id());
// peer with mismatched capability version should fail to connect and be removed.
let removed_peer_id = event_stream.peer_removed().await.unwrap();
assert_eq!(removed_peer_id, *handle0.peer_id());
}
#[tokio::test(flavor = "multi_thread")]
async fn test_proto_multiplex() {
reth_tracing::init_test_tracing();
let provider = MockEthProvider::default();
let mut net = Testnet::create_with(2, provider.clone()).await;
let (tx, mut from_peer0) = mpsc::unbounded_channel();
net.peers_mut()[0]
.add_rlpx_sub_protocol(PingPongProtoHandler { state: ProtocolState { events: tx } });
let (tx, mut from_peer1) = mpsc::unbounded_channel();
net.peers_mut()[1]
.add_rlpx_sub_protocol(PingPongProtoHandler { state: ProtocolState { events: tx } });
let handle = net.spawn();
// connect all the peers
handle.connect_peers().await;
let peer0_to_peer1 = from_peer0.recv().await.unwrap();
let peer0_conn = match peer0_to_peer1 {
ProtocolEvent::Established { direction: _, peer_id, to_connection } => {
assert_eq!(peer_id, *handle.peers()[1].peer_id());
to_connection
}
};
let peer1_to_peer0 = from_peer1.recv().await.unwrap();
let peer1_conn = match peer1_to_peer0 {
ProtocolEvent::Established { direction: _, peer_id, to_connection } => {
assert_eq!(peer_id, *handle.peers()[0].peer_id());
to_connection
}
};
let (tx, rx) = oneshot::channel();
// send a ping message from peer0 to peer1
peer0_conn.send(Command::PingMessage { msg: "hello!".to_string(), response: tx }).unwrap();
let response = rx.await.unwrap();
assert_eq!(response, "hello!");
let (tx, rx) = oneshot::channel();
// send a ping message from peer1 to peer0
peer1_conn
.send(Command::PingMessage { msg: "hello from peer1!".to_string(), response: tx })
.unwrap();
let response = rx.await.unwrap();
assert_eq!(response, "hello from peer1!");
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network/tests/it/connect.rs | crates/net/network/tests/it/connect.rs | //! Connection tests
use alloy_primitives::map::HashSet;
use futures::StreamExt;
use reth_chainspec::{MAINNET, SEPOLIA};
use reth_discv4::Discv4Config;
use reth_eth_wire::{DisconnectReason, EthNetworkPrimitives, HeadersDirection};
use reth_network::{
test_utils::{NetworkEventStream, PeerConfig, Testnet},
BlockDownloaderProvider, NetworkConfigBuilder, NetworkEvent, NetworkEventListenerProvider,
NetworkManager, PeersConfig,
};
use reth_network_api::{
events::{PeerEvent, SessionInfo},
NetworkInfo, Peers, PeersInfo,
};
use reth_network_p2p::{
headers::client::{HeadersClient, HeadersRequest},
sync::{NetworkSyncUpdater, SyncState},
};
use reth_network_peers::{mainnet_nodes, NodeRecord, TrustedPeer};
use reth_network_types::peers::config::PeerBackoffDurations;
use reth_storage_api::noop::NoopProvider;
use reth_tracing::init_test_tracing;
use reth_transaction_pool::test_utils::testing_pool;
use secp256k1::SecretKey;
use std::time::Duration;
use tokio::task;
use url::Host;
#[tokio::test(flavor = "multi_thread")]
async fn test_establish_connections() {
reth_tracing::init_test_tracing();
for _ in 0..3 {
let net = Testnet::create(3).await;
net.for_each(|peer| assert_eq!(0, peer.num_peers()));
let mut handles = net.handles();
let handle0 = handles.next().unwrap();
let handle1 = handles.next().unwrap();
let handle2 = handles.next().unwrap();
drop(handles);
let handle = net.spawn();
let listener0 = handle0.event_listener();
let mut listener1 = handle1.event_listener();
let mut listener2 = handle2.event_listener();
handle0.add_peer(*handle1.peer_id(), handle1.local_addr());
handle0.add_peer(*handle2.peer_id(), handle2.local_addr());
let mut expected_connections = HashSet::from([*handle1.peer_id(), *handle2.peer_id()]);
let mut expected_peers = expected_connections.clone();
// wait for all initiator connections
let mut established = listener0.take(4);
while let Some(ev) = established.next().await {
match ev {
NetworkEvent::Peer(PeerEvent::SessionClosed { .. } | PeerEvent::PeerRemoved(_)) => {
panic!("unexpected event")
}
NetworkEvent::ActivePeerSession { info, .. } |
NetworkEvent::Peer(PeerEvent::SessionEstablished(info)) => {
let SessionInfo { peer_id, .. } = info;
assert!(expected_connections.remove(&peer_id));
}
NetworkEvent::Peer(PeerEvent::PeerAdded(peer_id)) => {
assert!(expected_peers.remove(&peer_id))
}
}
}
assert!(expected_connections.is_empty());
assert!(expected_peers.is_empty());
// also await the established session on both target
futures::future::join(listener1.next(), listener2.next()).await;
let net = handle.terminate().await;
assert_eq!(net.peers()[0].num_peers(), 2);
assert_eq!(net.peers()[1].num_peers(), 1);
assert_eq!(net.peers()[2].num_peers(), 1);
}
}
#[tokio::test(flavor = "multi_thread")]
async fn test_already_connected() {
reth_tracing::init_test_tracing();
let mut net = Testnet::default();
let secret_key = SecretKey::new(&mut rand_08::thread_rng());
let client = NoopProvider::default();
let p1 = PeerConfig::default();
// initialize two peers with the same identifier
let p2 = PeerConfig::with_secret_key(client.clone(), secret_key);
let p3 = PeerConfig::with_secret_key(client, secret_key);
net.extend_peer_with_config(vec![p1, p2, p3]).await.unwrap();
let mut handles = net.handles();
let handle0 = handles.next().unwrap();
let handle1 = handles.next().unwrap();
let handle2 = handles.next().unwrap();
drop(handles);
let _handle = net.spawn();
let mut listener0 = NetworkEventStream::new(handle0.event_listener());
let mut listener2 = NetworkEventStream::new(handle2.event_listener());
handle0.add_peer(*handle1.peer_id(), handle1.local_addr());
let peer = listener0.next_session_established().await.unwrap();
assert_eq!(peer, *handle1.peer_id());
handle2.add_peer(*handle0.peer_id(), handle0.local_addr());
let peer = listener2.next_session_established().await.unwrap();
assert_eq!(peer, *handle0.peer_id());
let (peer, reason) = listener2.next_session_closed().await.unwrap();
assert_eq!(peer, *handle0.peer_id());
let reason = reason.unwrap();
assert_eq!(reason, DisconnectReason::AlreadyConnected);
assert_eq!(handle0.num_connected_peers(), 1);
assert_eq!(handle1.num_connected_peers(), 1);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_get_peer() {
reth_tracing::init_test_tracing();
let mut net = Testnet::default();
let secret_key = SecretKey::new(&mut rand_08::thread_rng());
let secret_key_1 = SecretKey::new(&mut rand_08::thread_rng());
let client = NoopProvider::default();
let p1 = PeerConfig::default();
let p2 = PeerConfig::with_secret_key(client.clone(), secret_key);
let p3 = PeerConfig::with_secret_key(client, secret_key_1);
net.extend_peer_with_config(vec![p1, p2, p3]).await.unwrap();
let mut handles = net.handles();
let handle0 = handles.next().unwrap();
let handle1 = handles.next().unwrap();
let handle2 = handles.next().unwrap();
drop(handles);
let _handle = net.spawn();
let mut listener0 = NetworkEventStream::new(handle0.event_listener());
handle0.add_peer(*handle1.peer_id(), handle1.local_addr());
let _ = listener0.next_session_established().await.unwrap();
handle0.add_peer(*handle2.peer_id(), handle2.local_addr());
let _ = listener0.next_session_established().await.unwrap();
let peers = handle0.get_all_peers().await.unwrap();
assert_eq!(handle0.num_connected_peers(), peers.len());
}
#[tokio::test(flavor = "multi_thread")]
async fn test_get_peer_by_id() {
reth_tracing::init_test_tracing();
let mut net = Testnet::default();
let secret_key = SecretKey::new(&mut rand_08::thread_rng());
let secret_key_1 = SecretKey::new(&mut rand_08::thread_rng());
let client = NoopProvider::default();
let p1 = PeerConfig::default();
let p2 = PeerConfig::with_secret_key(client.clone(), secret_key);
let p3 = PeerConfig::with_secret_key(client, secret_key_1);
net.extend_peer_with_config(vec![p1, p2, p3]).await.unwrap();
let mut handles = net.handles();
let handle0 = handles.next().unwrap();
let handle1 = handles.next().unwrap();
let handle2 = handles.next().unwrap();
drop(handles);
let _handle = net.spawn();
let mut listener0 = NetworkEventStream::new(handle0.event_listener());
handle0.add_peer(*handle1.peer_id(), handle1.local_addr());
let _ = listener0.next_session_established().await.unwrap();
let peer = handle0.get_peer_by_id(*handle1.peer_id()).await.unwrap();
assert!(peer.is_some());
let peer = handle0.get_peer_by_id(*handle2.peer_id()).await.unwrap();
assert!(peer.is_none());
}
#[tokio::test(flavor = "multi_thread")]
#[ignore]
async fn test_connect_with_boot_nodes() {
reth_tracing::init_test_tracing();
let secret_key = SecretKey::new(&mut rand_08::thread_rng());
let mut discv4 = Discv4Config::builder();
discv4.add_boot_nodes(mainnet_nodes());
let config =
NetworkConfigBuilder::eth(secret_key).discovery(discv4).build(NoopProvider::default());
let network = NetworkManager::new(config).await.unwrap();
let handle = network.handle().clone();
let mut events = handle.event_listener();
tokio::task::spawn(network);
while let Some(ev) = events.next().await {
dbg!(ev);
}
}
#[tokio::test(flavor = "multi_thread")]
#[ignore]
async fn test_connect_with_builder() {
reth_tracing::init_test_tracing();
let secret_key = SecretKey::new(&mut rand_08::thread_rng());
let mut discv4 = Discv4Config::builder();
discv4.add_boot_nodes(mainnet_nodes());
let client = NoopProvider::default();
let config = NetworkConfigBuilder::eth(secret_key).discovery(discv4).build(client.clone());
let (handle, network, _, requests) = NetworkManager::new(config)
.await
.unwrap()
.into_builder()
.request_handler(client)
.split_with_handle();
let mut events = handle.event_listener();
tokio::task::spawn(async move {
tokio::join!(network, requests);
});
let h = handle.clone();
task::spawn(async move {
loop {
tokio::time::sleep(Duration::from_secs(5)).await;
dbg!(h.num_connected_peers());
}
});
while let Some(ev) = events.next().await {
dbg!(ev);
}
}
// expects a `ENODE="enode://"` env var that holds the record
#[tokio::test(flavor = "multi_thread")]
#[ignore]
async fn test_connect_to_trusted_peer() {
reth_tracing::init_test_tracing();
let secret_key = SecretKey::new(&mut rand_08::thread_rng());
let discv4 = Discv4Config::builder();
let client = NoopProvider::default();
let config = NetworkConfigBuilder::eth(secret_key).discovery(discv4).build(client.clone());
let transactions_manager_config = config.transactions_manager_config.clone();
let (handle, network, transactions, requests) = NetworkManager::new(config)
.await
.unwrap()
.into_builder()
.request_handler(client)
.transactions(testing_pool(), transactions_manager_config)
.split_with_handle();
let mut events = handle.event_listener();
tokio::task::spawn(async move {
tokio::join!(network, requests, transactions);
});
let node: NodeRecord = std::env::var("ENODE").unwrap().parse().unwrap();
handle.add_trusted_peer(node.id, node.tcp_addr());
let h = handle.clone();
h.update_sync_state(SyncState::Syncing);
task::spawn(async move {
loop {
tokio::time::sleep(Duration::from_secs(5)).await;
dbg!(h.num_connected_peers());
}
});
let fetcher = handle.fetch_client().await.unwrap();
let headers = fetcher
.get_headers(HeadersRequest {
start: 73174u64.into(),
limit: 10,
direction: HeadersDirection::Falling,
})
.await;
dbg!(&headers);
while let Some(ev) = events.next().await {
dbg!(ev);
}
}
#[tokio::test(flavor = "multi_thread")]
async fn test_shutdown() {
reth_tracing::init_test_tracing();
let net = Testnet::create(3).await;
let mut handles = net.handles();
let handle0 = handles.next().unwrap();
let handle1 = handles.next().unwrap();
let handle2 = handles.next().unwrap();
drop(handles);
let _handle = net.spawn();
let mut listener0 = NetworkEventStream::new(handle0.event_listener());
let mut listener1 = NetworkEventStream::new(handle1.event_listener());
handle0.add_peer(*handle1.peer_id(), handle1.local_addr());
handle0.add_peer(*handle2.peer_id(), handle2.local_addr());
handle1.add_peer(*handle2.peer_id(), handle2.local_addr());
let mut expected_connections = HashSet::from([*handle1.peer_id(), *handle2.peer_id()]);
// Before shutting down, we have two connected peers
let peer1 = listener0.next_session_established().await.unwrap();
let peer2 = listener0.next_session_established().await.unwrap();
assert_eq!(handle0.num_connected_peers(), 2);
assert!(expected_connections.contains(&peer1));
assert!(expected_connections.contains(&peer2));
handle0.shutdown().await.unwrap();
// All sessions get disconnected
let (peer1, _reason) = listener0.next_session_closed().await.unwrap();
let (peer2, _reason) = listener0.next_session_closed().await.unwrap();
assert_eq!(handle0.num_connected_peers(), 0);
assert!(expected_connections.remove(&peer1));
assert!(expected_connections.remove(&peer2));
// Connected peers receive a shutdown signal
let (_peer, reason) = listener1.next_session_closed().await.unwrap();
assert_eq!(reason, Some(DisconnectReason::ClientQuitting));
// New connections ignored
handle0.add_peer(*handle1.peer_id(), handle1.local_addr());
assert_eq!(handle0.num_connected_peers(), 0);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_trusted_peer_only() {
init_test_tracing();
let net = Testnet::create(2).await;
let mut handles = net.handles();
// handle0 is used to test that:
// * outgoing connections to untrusted peers are not allowed
// * outgoing connections to trusted peers are allowed and succeed
let handle0 = handles.next().unwrap();
// handle1 is used to test that:
// * incoming connections from untrusted peers are not allowed
// * incoming connections from trusted peers are allowed and succeed
let handle1 = handles.next().unwrap();
drop(handles);
let _handle = net.spawn();
let secret_key = SecretKey::new(&mut rand_08::thread_rng());
let peers_config = PeersConfig::default()
.with_backoff_durations(PeerBackoffDurations::test())
.with_ban_duration(Duration::from_millis(200))
.with_trusted_nodes_only(true);
let config = NetworkConfigBuilder::eth(secret_key)
.listener_port(0)
.disable_discovery()
.peer_config(peers_config)
.build(NoopProvider::default());
let network = NetworkManager::new(config).await.unwrap();
let handle = network.handle().clone();
tokio::task::spawn(network);
// create networkeventstream to get the next session event easily.
let events = handle.event_listener();
let mut event_stream = NetworkEventStream::new(events);
// only connect to trusted peers.
// connect to an untrusted peer should fail.
handle.add_peer(*handle0.peer_id(), handle0.local_addr());
// wait 1 second, the number of connection is still 0.
tokio::time::sleep(Duration::from_secs(1)).await;
assert_eq!(handle.num_connected_peers(), 0);
// add to trusted peer.
handle.add_trusted_peer(*handle0.peer_id(), handle0.local_addr());
let outgoing_peer_id = event_stream.next_session_established().await.unwrap();
assert_eq!(outgoing_peer_id, *handle0.peer_id());
assert_eq!(handle.num_connected_peers(), 1);
// only receive connections from trusted peers.
handle1.add_peer(*handle.peer_id(), handle.local_addr());
// wait 1 second, the number of connections is still 1, because peer1 is untrusted.
tokio::time::sleep(Duration::from_secs(1)).await;
assert_eq!(handle.num_connected_peers(), 1);
handle.add_trusted_peer(*handle1.peer_id(), handle1.local_addr());
// wait for the next session established event to check the handle1 incoming connection
let outgoing_peer_id1 = event_stream.next_session_established().await.unwrap();
assert_eq!(outgoing_peer_id1, *handle1.peer_id());
tokio::time::sleep(Duration::from_secs(1)).await;
assert_eq!(handle.num_connected_peers(), 2);
// check that handle0 and handle1 both have peers.
assert_eq!(handle0.num_connected_peers(), 1);
assert_eq!(handle1.num_connected_peers(), 1);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_network_state_change() {
let net = Testnet::create(1).await;
let secret_key = SecretKey::new(&mut rand_08::thread_rng());
let peers_config =
PeersConfig::default().with_refill_slots_interval(Duration::from_millis(500));
let config = NetworkConfigBuilder::eth(secret_key)
.listener_port(0)
.disable_discovery()
.peer_config(peers_config)
.build(NoopProvider::default());
let network = NetworkManager::new(config).await.unwrap();
let handle = network.handle().clone();
tokio::task::spawn(network);
let mut handles = net.handles();
let handle0 = handles.next().unwrap();
drop(handles);
let _handle = net.spawn();
// Set network state to Hibernate.
handle.set_network_hibernate();
handle.add_peer(*handle0.peer_id(), handle0.local_addr());
// wait 2 seconds, the number of connections is still 0, because network is Hibernate.
tokio::time::sleep(Duration::from_secs(2)).await;
assert_eq!(handle.num_connected_peers(), 0);
// Set network state to Active.
handle.set_network_active();
// wait 2 seconds, the number of connections should be 1, because network is Active and outbound
// slot should be filled.
tokio::time::sleep(Duration::from_secs(2)).await;
assert_eq!(handle.num_connected_peers(), 1);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_exceed_outgoing_connections() {
let net = Testnet::create(2).await;
let secret_key = SecretKey::new(&mut rand_08::thread_rng());
let peers_config = PeersConfig::default().with_max_outbound(1);
let config = NetworkConfigBuilder::eth(secret_key)
.listener_port(0)
.disable_discovery()
.peer_config(peers_config)
.build(NoopProvider::default());
let network = NetworkManager::new(config).await.unwrap();
let handle = network.handle().clone();
tokio::task::spawn(network);
// create networkeventstream to get the next session event easily.
let events = handle.event_listener();
let mut event_stream = NetworkEventStream::new(events);
let mut handles = net.handles();
let handle0 = handles.next().unwrap();
let handle1 = handles.next().unwrap();
drop(handles);
let _handle = net.spawn();
handle.add_peer(*handle0.peer_id(), handle0.local_addr());
let outgoing_peer_id = event_stream.next_session_established().await.unwrap();
assert_eq!(outgoing_peer_id, *handle0.peer_id());
handle.add_peer(*handle1.peer_id(), handle1.local_addr());
// wait 2 seconds, the number of connections is still 1, indicating that the max outbound is in
// effect.
tokio::time::sleep(Duration::from_secs(2)).await;
assert_eq!(handle.num_connected_peers(), 1);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_disconnect_incoming_when_exceeded_incoming_connections() {
let net = Testnet::create(1).await;
let secret_key = SecretKey::new(&mut rand_08::thread_rng());
let peers_config = PeersConfig::default().with_max_inbound(0);
let config = NetworkConfigBuilder::eth(secret_key)
.listener_port(0)
.disable_discovery()
.peer_config(peers_config)
.build(NoopProvider::default());
let network = NetworkManager::new(config).await.unwrap();
let other_peer_handle = net.handles().next().unwrap();
let handle = network.handle().clone();
other_peer_handle.add_peer(*handle.peer_id(), handle.local_addr());
tokio::task::spawn(network);
let net_handle = net.spawn();
tokio::time::sleep(Duration::from_secs(1)).await;
assert_eq!(handle.num_connected_peers(), 0);
net_handle.terminate().await;
}
#[tokio::test(flavor = "multi_thread")]
async fn test_always_accept_incoming_connections_from_trusted_peers() {
reth_tracing::init_test_tracing();
let peer1 = new_random_peer(10, vec![]).await;
let peer2 = new_random_peer(0, vec![]).await;
// setup the peer with max_inbound = 1, and add other_peer_3 as trust nodes
let trusted_peer2 = TrustedPeer {
host: Host::Ipv4(peer2.local_addr().ip().to_string().parse().unwrap()),
tcp_port: peer2.local_addr().port(),
udp_port: peer2.local_addr().port(),
id: *peer2.peer_id(),
};
let peer = new_random_peer(0, vec![trusted_peer2.clone()]).await;
let handle = peer.handle().clone();
let peer1_handle = peer1.handle().clone();
let peer2_handle = peer2.handle().clone();
tokio::task::spawn(peer);
tokio::task::spawn(peer1);
tokio::task::spawn(peer2);
let mut events = NetworkEventStream::new(handle.event_listener());
let mut events_peer1 = NetworkEventStream::new(peer1_handle.event_listener());
// incoming connection should fail because exceeding max_inbound
peer1_handle.add_peer(*handle.peer_id(), handle.local_addr());
let (peer_id, reason) = events_peer1.next_session_closed().await.unwrap();
assert_eq!(peer_id, *handle.peer_id());
assert_eq!(reason, Some(DisconnectReason::TooManyPeers));
let peer_id = events.next_session_established().await.unwrap();
assert_eq!(peer_id, *peer1_handle.peer_id());
// outbound connection from `peer2` should succeed
peer2_handle.add_peer(*handle.peer_id(), handle.local_addr());
let peer_id = events.next_session_established().await.unwrap();
assert_eq!(peer_id, *peer2_handle.peer_id());
assert_eq!(handle.num_connected_peers(), 1);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_rejected_by_already_connect() {
reth_tracing::init_test_tracing();
let other_peer1 = new_random_peer(10, vec![]).await;
let other_peer2 = new_random_peer(10, vec![]).await;
// setup the peer with max_inbound = 2
let peer = new_random_peer(2, vec![]).await;
let handle = peer.handle().clone();
let other_peer_handle1 = other_peer1.handle().clone();
let other_peer_handle2 = other_peer2.handle().clone();
tokio::task::spawn(peer);
tokio::task::spawn(other_peer1);
tokio::task::spawn(other_peer2);
let mut events = NetworkEventStream::new(handle.event_listener());
// incoming connection should succeed
other_peer_handle1.add_peer(*handle.peer_id(), handle.local_addr());
let peer_id = events.next_session_established().await.unwrap();
assert_eq!(peer_id, *other_peer_handle1.peer_id());
assert_eq!(handle.num_connected_peers(), 1);
// incoming connection from the same peer should be rejected by already connected
// and num_inbount should still be 1
other_peer_handle1.add_peer(*handle.peer_id(), handle.local_addr());
tokio::time::sleep(Duration::from_secs(1)).await;
// incoming connection from other_peer2 should succeed
other_peer_handle2.add_peer(*handle.peer_id(), handle.local_addr());
let peer_id = events.next_session_established().await.unwrap();
assert_eq!(peer_id, *other_peer_handle2.peer_id());
// wait 2 seconds and check that other_peer2 is not rejected by TooManyPeers
tokio::time::sleep(Duration::from_secs(2)).await;
assert_eq!(handle.num_connected_peers(), 2);
}
async fn new_random_peer(
max_in_bound: usize,
trusted_nodes: Vec<TrustedPeer>,
) -> NetworkManager<EthNetworkPrimitives> {
let secret_key = SecretKey::new(&mut rand_08::thread_rng());
let peers_config =
PeersConfig::default().with_max_inbound(max_in_bound).with_trusted_nodes(trusted_nodes);
let config = NetworkConfigBuilder::new(secret_key)
.listener_port(0)
.disable_discovery()
.peer_config(peers_config)
.build_with_noop_provider(MAINNET.clone());
NetworkManager::new(config).await.unwrap()
}
#[tokio::test(flavor = "multi_thread")]
async fn test_connect_many() {
reth_tracing::init_test_tracing();
let net = Testnet::create_with(5, NoopProvider::default()).await;
// install request handlers
let net = net.with_eth_pool();
let handle = net.spawn();
// connect all the peers
handle.connect_peers().await;
// check that all the peers are connected
for peer in handle.peers() {
assert_eq!(peer.network().num_connected_peers(), 4);
}
}
#[tokio::test(flavor = "multi_thread")]
async fn test_disconnect_then_connect() {
reth_tracing::init_test_tracing();
let net = Testnet::create(2).await;
net.for_each(|peer| assert_eq!(0, peer.num_peers()));
let mut handles = net.handles();
let handle0 = handles.next().unwrap();
let handle1 = handles.next().unwrap();
drop(handles);
let _handle = net.spawn();
let mut listener0 = NetworkEventStream::new(handle0.event_listener());
handle0.add_peer(*handle1.peer_id(), handle1.local_addr());
let peer = listener0.next_session_established().await.unwrap();
assert_eq!(peer, *handle1.peer_id());
handle0.disconnect_peer(*handle1.peer_id());
let (peer, _) = listener0.next_session_closed().await.unwrap();
assert_eq!(peer, *handle1.peer_id());
handle0.connect_peer(*handle1.peer_id(), handle1.local_addr());
let peer = listener0.next_session_established().await.unwrap();
assert_eq!(peer, *handle1.peer_id());
}
#[tokio::test(flavor = "multi_thread")]
async fn test_connect_peer_in_different_network_should_fail() {
reth_tracing::init_test_tracing();
// peer in mainnet.
let peer = new_random_peer(10, vec![]).await;
let peer_handle = peer.handle().clone();
tokio::task::spawn(peer);
// peer in sepolia.
let secret_key = SecretKey::new(&mut rand_08::thread_rng());
// If the remote disconnect first, then we would not get a fatal protocol error. So set
// max_backoff_count to 0 to speed up the removal of the peer.
let peers_config = PeersConfig::default().with_max_backoff_count(0);
let config = NetworkConfigBuilder::eth(secret_key)
.listener_port(0)
.disable_discovery()
.peer_config(peers_config)
.build_with_noop_provider(SEPOLIA.clone());
let network = NetworkManager::new(config).await.unwrap();
let handle = network.handle().clone();
tokio::task::spawn(network);
// create networkeventstream to get the next session event easily.
let events = handle.event_listener();
let mut event_stream = NetworkEventStream::new(events);
handle.add_peer(*peer_handle.peer_id(), peer_handle.local_addr());
let added_peer_id = event_stream.peer_added().await.unwrap();
assert_eq!(added_peer_id, *peer_handle.peer_id());
let removed_peer_id = event_stream.peer_removed().await.unwrap();
assert_eq!(removed_peer_id, *peer_handle.peer_id());
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network/tests/it/big_pooled_txs_req.rs | crates/net/network/tests/it/big_pooled_txs_req.rs | use alloy_primitives::{Signature, B256};
use reth_eth_wire::{GetPooledTransactions, PooledTransactions};
use reth_ethereum_primitives::TransactionSigned;
use reth_network::{
test_utils::{NetworkEventStream, Testnet},
NetworkEventListenerProvider, PeerRequest,
};
use reth_network_api::{NetworkInfo, Peers};
use reth_network_p2p::sync::{NetworkSyncUpdater, SyncState};
use reth_primitives_traits::SignedTransaction;
use reth_provider::test_utils::MockEthProvider;
use reth_transaction_pool::{
test_utils::{testing_pool, MockTransaction},
TransactionPool,
};
use tokio::sync::oneshot;
// peer0: `GetPooledTransactions` requester
// peer1: `GetPooledTransactions` responder
#[tokio::test(flavor = "multi_thread")]
async fn test_large_tx_req() {
reth_tracing::init_test_tracing();
// create 2000 fake txs
let txs: Vec<MockTransaction> = (0..2000)
.map(|_| {
// replace rng txhash with real txhash
let mut tx = MockTransaction::eip1559();
let ts =
TransactionSigned::new_unhashed(tx.clone().into(), Signature::test_signature());
tx.set_hash(ts.recalculate_hash());
tx
})
.collect();
let txs_hashes: Vec<B256> = txs.iter().map(|tx| *tx.get_hash()).collect();
// setup testnet
let mut net = Testnet::create_with(2, MockEthProvider::default()).await;
// install request handlers
net.for_each_mut(|peer| peer.install_request_handler());
// insert generated txs into responding peer's pool
let pool1 = testing_pool();
pool1.add_external_transactions(txs).await;
// install transactions managers
net.peers_mut()[0].install_transactions_manager(testing_pool());
net.peers_mut()[1].install_transactions_manager(pool1);
// connect peers together and check for connection existence
let handle0 = net.peers()[0].handle();
let handle1 = net.peers()[1].handle();
let mut events0 = NetworkEventStream::new(handle0.event_listener());
let _handle = net.spawn();
handle0.add_peer(*handle1.peer_id(), handle1.local_addr());
let connected = events0.next_session_established().await.unwrap();
assert_eq!(connected, *handle1.peer_id());
// stop syncing
handle0.update_sync_state(SyncState::Idle);
handle1.update_sync_state(SyncState::Idle);
assert!(!handle0.is_syncing() && !handle1.is_syncing());
// make `GetPooledTransactions` request
let (send, receive) = oneshot::channel();
handle0.send_request(
*handle1.peer_id(),
PeerRequest::GetPooledTransactions {
request: GetPooledTransactions(txs_hashes.clone()),
response: send,
},
);
// check all txs have been received
match receive.await.unwrap() {
Ok(PooledTransactions(txs)) => {
for tx in txs {
assert!(txs_hashes.contains(tx.hash()));
}
}
Err(e) => {
panic!("error: {e:?}");
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network/tests/it/startup.rs | crates/net/network/tests/it/startup.rs | use std::{
io,
net::{IpAddr, Ipv4Addr, SocketAddr, SocketAddrV4},
};
use reth_chainspec::MAINNET;
use reth_discv4::{Discv4Config, NatResolver, DEFAULT_DISCOVERY_ADDR, DEFAULT_DISCOVERY_PORT};
use reth_network::{
error::{NetworkError, ServiceKind},
Discovery, NetworkConfigBuilder, NetworkManager,
};
use reth_network_api::{NetworkInfo, PeersInfo};
use reth_storage_api::noop::NoopProvider;
use secp256k1::SecretKey;
use tokio::net::TcpListener;
fn is_addr_in_use_kind(err: &NetworkError, kind: ServiceKind) -> bool {
match err {
NetworkError::AddressAlreadyInUse { kind: k, error } => {
*k == kind && error.kind() == io::ErrorKind::AddrInUse
}
NetworkError::Discv5Error(reth_discv5::Error::Discv5Error(discv5::Error::Io(err))) => {
err.kind() == io::ErrorKind::AddrInUse
}
_ => false,
}
}
#[tokio::test(flavor = "multi_thread")]
async fn test_is_default_syncing() {
let secret_key = SecretKey::new(&mut rand_08::thread_rng());
let config = NetworkConfigBuilder::eth(secret_key)
.disable_discovery()
.listener_port(0)
.build(NoopProvider::default());
let network = NetworkManager::new(config).await.unwrap();
assert!(!network.handle().is_syncing());
}
#[tokio::test(flavor = "multi_thread")]
async fn test_listener_addr_in_use() {
let secret_key = SecretKey::new(&mut rand_08::thread_rng());
let config = NetworkConfigBuilder::eth(secret_key)
.disable_discovery()
.listener_port(0)
.build(NoopProvider::default());
let network = NetworkManager::new(config).await.unwrap();
let listener_port = network.local_addr().port();
let config = NetworkConfigBuilder::eth(secret_key)
.listener_port(listener_port)
.disable_discovery()
.build(NoopProvider::default());
let addr = config.listener_addr;
let result = NetworkManager::new(config).await;
let err = result.err().unwrap();
assert!(is_addr_in_use_kind(&err, ServiceKind::Listener(addr)), "{err:?}");
}
#[tokio::test(flavor = "multi_thread")]
async fn test_discovery_addr_in_use() {
let secret_key = SecretKey::new(&mut rand_08::thread_rng());
let disc_config = Discv4Config::default();
let addr = SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, 0));
let any_port_listener = TcpListener::bind(addr).await.unwrap();
let port = any_port_listener.local_addr().unwrap().port();
let addr = SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, port));
let _discovery =
Discovery::new(addr, addr, secret_key, Some(disc_config), None, None).await.unwrap();
let disc_config = Discv4Config::default();
let result = Discovery::new(addr, addr, secret_key, Some(disc_config), None, None).await;
assert!(is_addr_in_use_kind(&result.err().unwrap(), ServiceKind::Discovery(addr)));
}
#[tokio::test(flavor = "multi_thread")]
async fn test_discv5_and_discv4_same_socket_fails() {
let secret_key = SecretKey::new(&mut rand_08::thread_rng());
let config = NetworkConfigBuilder::eth(secret_key)
.listener_port(DEFAULT_DISCOVERY_PORT)
.discovery_v5(
reth_discv5::Config::builder((DEFAULT_DISCOVERY_ADDR, DEFAULT_DISCOVERY_PORT).into())
.discv5_config(
discv5::ConfigBuilder::new(discv5::ListenConfig::from_ip(
DEFAULT_DISCOVERY_ADDR,
DEFAULT_DISCOVERY_PORT,
))
.build(),
),
)
.disable_dns_discovery()
.build(NoopProvider::default());
let addr = config.listener_addr;
let result = NetworkManager::new(config).await;
let err = result.err().unwrap();
assert!(is_addr_in_use_kind(&err, ServiceKind::Listener(addr)), "{err:?}")
}
#[tokio::test(flavor = "multi_thread")]
async fn test_discv5_and_rlpx_same_socket_ok_without_discv4() {
let test_port: u16 = TcpListener::bind("127.0.0.1:0") // 0 means OS assigns a free port
.await
.expect("Failed to bind to a port")
.local_addr()
.unwrap()
.port();
let secret_key = SecretKey::new(&mut rand_08::thread_rng());
let config = NetworkConfigBuilder::eth(secret_key)
.listener_port(test_port)
.disable_discv4_discovery()
.discovery_v5(
reth_discv5::Config::builder((DEFAULT_DISCOVERY_ADDR, test_port).into()).discv5_config(
discv5::ConfigBuilder::new(discv5::ListenConfig::from_ip(
DEFAULT_DISCOVERY_ADDR,
test_port,
))
.build(),
),
)
.disable_dns_discovery()
.build(NoopProvider::default());
let _network = NetworkManager::new(config).await.expect("should build");
}
// <https://github.com/paradigmxyz/reth/issues/8851>
#[tokio::test(flavor = "multi_thread")]
async fn test_tcp_port_node_record_no_discovery() {
let secret_key = SecretKey::new(&mut rand_08::thread_rng());
let config = NetworkConfigBuilder::eth(secret_key)
.listener_port(0)
.disable_discovery()
.build_with_noop_provider(MAINNET.clone());
let network = NetworkManager::new(config).await.unwrap();
let local_addr = network.local_addr();
// ensure we retrieved the port the OS chose
assert_ne!(local_addr.port(), 0);
let record = network.handle().local_node_record();
assert_eq!(record.tcp_port, local_addr.port());
}
// <https://github.com/paradigmxyz/reth/issues/8851>
#[tokio::test(flavor = "multi_thread")]
async fn test_tcp_port_node_record_discovery() {
let secret_key = SecretKey::new(&mut rand_08::thread_rng());
let config = NetworkConfigBuilder::eth(secret_key)
.listener_port(0)
.discovery_port(0)
.disable_dns_discovery()
.build_with_noop_provider(MAINNET.clone());
let network = NetworkManager::new(config).await.unwrap();
let local_addr = network.local_addr();
// ensure we retrieved the port the OS chose
assert_ne!(local_addr.port(), 0);
let record = network.handle().local_node_record();
assert_eq!(record.tcp_port, local_addr.port());
assert_ne!(record.udp_port, 0);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_node_record_address_with_nat() {
let secret_key = SecretKey::new(&mut rand_08::thread_rng());
let config = NetworkConfigBuilder::eth(secret_key)
.add_nat(Some(NatResolver::ExternalIp("10.1.1.1".parse().unwrap())))
.disable_discv4_discovery()
.disable_dns_discovery()
.listener_port(0)
.build_with_noop_provider(MAINNET.clone());
let network = NetworkManager::new(config).await.unwrap();
let record = network.handle().local_node_record();
assert_eq!(record.address, IpAddr::V4(Ipv4Addr::new(10, 1, 1, 1)));
}
#[tokio::test(flavor = "multi_thread")]
async fn test_node_record_address_with_nat_disable_discovery() {
let secret_key = SecretKey::new(&mut rand_08::thread_rng());
let config = NetworkConfigBuilder::eth(secret_key)
.add_nat(Some(NatResolver::ExternalIp("10.1.1.1".parse().unwrap())))
.disable_discovery()
.disable_nat()
.listener_port(0)
.build_with_noop_provider(MAINNET.clone());
let network = NetworkManager::new(config).await.unwrap();
let record = network.handle().local_node_record();
assert_eq!(record.address, IpAddr::V4(std::net::Ipv4Addr::LOCALHOST));
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network/tests/it/txgossip.rs | crates/net/network/tests/it/txgossip.rs | //! Testing gossiping of transactions.
use alloy_consensus::TxLegacy;
use alloy_primitives::{Signature, U256};
use futures::StreamExt;
use reth_ethereum_primitives::TransactionSigned;
use reth_network::{
test_utils::{NetworkEventStream, Testnet},
transactions::config::TransactionPropagationKind,
NetworkEvent, NetworkEventListenerProvider, Peers,
};
use reth_network_api::{events::PeerEvent, PeerKind, PeersInfo};
use reth_provider::test_utils::{ExtendedAccount, MockEthProvider};
use reth_transaction_pool::{
test_utils::TransactionGenerator, AddedTransactionOutcome, PoolTransaction, TransactionPool,
};
use std::sync::Arc;
use tokio::join;
#[tokio::test(flavor = "multi_thread")]
async fn test_tx_gossip() {
reth_tracing::init_test_tracing();
let provider = MockEthProvider::default();
let net = Testnet::create_with(2, provider.clone()).await;
// install request handlers
let net = net.with_eth_pool();
let handle = net.spawn();
// connect all the peers
handle.connect_peers().await;
let peer0 = &handle.peers()[0];
let peer1 = &handle.peers()[1];
let peer0_pool = peer0.pool().unwrap();
let mut peer0_tx_listener = peer0.pool().unwrap().pending_transactions_listener();
let mut peer1_tx_listener = peer1.pool().unwrap().pending_transactions_listener();
let mut tx_gen = TransactionGenerator::new(rand::rng());
let tx = tx_gen.gen_eip1559_pooled();
// ensure the sender has balance
let sender = tx.sender();
provider.add_account(sender, ExtendedAccount::new(0, U256::from(100_000_000)));
// insert pending tx in peer0's pool
let AddedTransactionOutcome { hash, .. } =
peer0_pool.add_external_transaction(tx).await.unwrap();
let inserted = peer0_tx_listener.recv().await.unwrap();
assert_eq!(inserted, hash);
// ensure tx is gossiped to peer1
let received = peer1_tx_listener.recv().await.unwrap();
assert_eq!(received, hash);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_tx_propagation_policy_trusted_only() {
reth_tracing::init_test_tracing();
let provider = MockEthProvider::default();
let policy = TransactionPropagationKind::Trusted;
let net = Testnet::create_with(2, provider.clone()).await;
let net = net.with_eth_pool_config_and_policy(Default::default(), policy);
let handle = net.spawn();
// connect all the peers
handle.connect_peers().await;
let peer_0_handle = &handle.peers()[0];
let peer_1_handle = &handle.peers()[1];
let mut peer0_tx_listener = peer_0_handle.pool().unwrap().pending_transactions_listener();
let mut peer1_tx_listener = peer_1_handle.pool().unwrap().pending_transactions_listener();
let mut tx_gen = TransactionGenerator::new(rand::rng());
let tx = tx_gen.gen_eip1559_pooled();
// ensure the sender has balance
let sender = tx.sender();
provider.add_account(sender, ExtendedAccount::new(0, U256::from(100_000_000)));
// insert the tx in peer0's pool
let outcome_0 = peer_0_handle.pool().unwrap().add_external_transaction(tx).await.unwrap();
let inserted = peer0_tx_listener.recv().await.unwrap();
assert_eq!(inserted, outcome_0.hash);
// ensure tx is not gossiped to peer1
peer1_tx_listener.try_recv().expect_err("Empty");
let mut event_stream_0 = NetworkEventStream::new(peer_0_handle.network().event_listener());
let mut event_stream_1 = NetworkEventStream::new(peer_1_handle.network().event_listener());
// disconnect peer1 from peer0
peer_0_handle.network().remove_peer(*peer_1_handle.peer_id(), PeerKind::Static);
join!(event_stream_0.next_session_closed(), event_stream_1.next_session_closed());
// re register peer1 as trusted
peer_0_handle.network().add_trusted_peer(*peer_1_handle.peer_id(), peer_1_handle.local_addr());
join!(event_stream_0.next_session_established(), event_stream_1.next_session_established());
let mut tx_gen = TransactionGenerator::new(rand::rng());
let tx = tx_gen.gen_eip1559_pooled();
// ensure the sender has balance
let sender = tx.sender();
provider.add_account(sender, ExtendedAccount::new(0, U256::from(100_000_000)));
// insert pending tx in peer0's pool
let outcome_1 = peer_0_handle.pool().unwrap().add_external_transaction(tx).await.unwrap();
let inserted = peer0_tx_listener.recv().await.unwrap();
assert_eq!(inserted, outcome_1.hash);
// ensure peer1 now receives the pending txs from peer0
let mut buff = Vec::with_capacity(2);
buff.push(peer1_tx_listener.recv().await.unwrap());
buff.push(peer1_tx_listener.recv().await.unwrap());
assert!(buff.contains(&outcome_1.hash));
}
#[tokio::test(flavor = "multi_thread")]
async fn test_4844_tx_gossip_penalization() {
reth_tracing::init_test_tracing();
let provider = MockEthProvider::default();
let net = Testnet::create_with(2, provider.clone()).await;
// install request handlers
let net = net.with_eth_pool();
let handle = net.spawn();
let peer0 = &handle.peers()[0];
let peer1 = &handle.peers()[1];
// connect all the peers
handle.connect_peers().await;
let mut peer1_tx_listener = peer1.pool().unwrap().pending_transactions_listener();
let mut tx_gen = TransactionGenerator::new(rand::rng());
// peer 0 will be penalized for sending txs[0] over gossip
let txs = vec![tx_gen.gen_eip4844_pooled(), tx_gen.gen_eip1559_pooled()];
for tx in &txs {
let sender = tx.sender();
provider.add_account(sender, ExtendedAccount::new(0, U256::from(100_000_000)));
}
let signed_txs: Vec<Arc<TransactionSigned>> =
txs.iter().map(|tx| Arc::new(tx.transaction().clone().into_inner())).collect();
let network_handle = peer0.network();
let peer0_reputation_before =
peer1.peer_handle().peer_by_id(*peer0.peer_id()).await.unwrap().reputation();
// sends txs directly to peer1
network_handle.send_transactions(*peer1.peer_id(), signed_txs);
let received = peer1_tx_listener.recv().await.unwrap();
let peer0_reputation_after =
peer1.peer_handle().peer_by_id(*peer0.peer_id()).await.unwrap().reputation();
assert_ne!(peer0_reputation_before, peer0_reputation_after);
assert_eq!(received, *txs[1].transaction().tx_hash());
// this will return an [`Empty`] error because blob txs are disallowed to be broadcasted
assert!(peer1_tx_listener.try_recv().is_err());
}
#[tokio::test(flavor = "multi_thread")]
async fn test_sending_invalid_transactions() {
reth_tracing::init_test_tracing();
let provider = MockEthProvider::default();
let net = Testnet::create_with(2, provider.clone()).await;
// install request handlers
let net = net.with_eth_pool();
let handle = net.spawn();
let peer0 = &handle.peers()[0];
let peer1 = &handle.peers()[1];
// connect all the peers
handle.connect_peers().await;
assert_eq!(peer0.network().num_connected_peers(), 1);
let mut peer1_events = peer1.network().event_listener();
let mut tx_listener = peer1.pool().unwrap().new_transactions_listener();
for idx in 0..10 {
// send invalid txs to peer1
let tx = TxLegacy {
chain_id: None,
nonce: idx,
gas_price: 0,
gas_limit: 0,
to: Default::default(),
value: Default::default(),
input: Default::default(),
};
let tx = TransactionSigned::new_unhashed(tx.into(), Signature::test_signature());
peer0.network().send_transactions(*peer1.peer_id(), vec![Arc::new(tx)]);
}
// await disconnect for bad tx spam
if let Some(ev) = peer1_events.next().await {
match ev {
NetworkEvent::Peer(PeerEvent::SessionClosed { peer_id, .. }) => {
assert_eq!(peer_id, *peer0.peer_id());
}
NetworkEvent::ActivePeerSession { .. } |
NetworkEvent::Peer(PeerEvent::SessionEstablished { .. }) => {
panic!("unexpected SessionEstablished event")
}
NetworkEvent::Peer(PeerEvent::PeerAdded(_)) => {
panic!("unexpected PeerAdded event")
}
NetworkEvent::Peer(PeerEvent::PeerRemoved(_)) => {
panic!("unexpected PeerRemoved event")
}
}
}
// ensure txs never made it to the pool
assert!(tx_listener.try_recv().is_err());
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network/tests/it/main.rs | crates/net/network/tests/it/main.rs | #![allow(missing_docs)]
mod big_pooled_txs_req;
mod connect;
mod multiplex;
mod requests;
mod session;
mod startup;
mod transaction_hash_fetching;
mod txgossip;
const fn main() {}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network/tests/it/requests.rs | crates/net/network/tests/it/requests.rs | #![allow(unreachable_pub)]
//! Tests for eth related requests
use alloy_consensus::Header;
use rand::Rng;
use reth_eth_wire::{EthVersion, HeadersDirection};
use reth_ethereum_primitives::Block;
use reth_network::{
test_utils::{NetworkEventStream, PeerConfig, Testnet},
BlockDownloaderProvider, NetworkEventListenerProvider,
};
use reth_network_api::{NetworkInfo, Peers};
use reth_network_p2p::{
bodies::client::BodiesClient,
headers::client::{HeadersClient, HeadersRequest},
};
use reth_provider::test_utils::MockEthProvider;
use reth_transaction_pool::test_utils::{TestPool, TransactionGenerator};
use std::sync::Arc;
use tokio::sync::oneshot;
#[tokio::test(flavor = "multi_thread")]
async fn test_get_body() {
reth_tracing::init_test_tracing();
let mut rng = rand::rng();
let mock_provider = Arc::new(MockEthProvider::default());
let mut tx_gen = TransactionGenerator::new(rand::rng());
let mut net = Testnet::create_with(2, mock_provider.clone()).await;
// install request handlers
net.for_each_mut(|peer| peer.install_request_handler());
let handle0 = net.peers()[0].handle();
let mut events0 = NetworkEventStream::new(handle0.event_listener());
let handle1 = net.peers()[1].handle();
let _handle = net.spawn();
let fetch0 = handle0.fetch_client().await.unwrap();
handle0.add_peer(*handle1.peer_id(), handle1.local_addr());
let connected = events0.next_session_established().await.unwrap();
assert_eq!(connected, *handle1.peer_id());
// request some blocks
for _ in 0..100 {
// Set a new random block to the mock storage and request it via the network
let block_hash = rng.random();
let mut block: Block = Block::default();
block.body.transactions.push(tx_gen.gen_eip4844());
mock_provider.add_block(block_hash, block.clone());
let res = fetch0.get_block_bodies(vec![block_hash]).await;
assert!(res.is_ok(), "{res:?}");
let blocks = res.unwrap().1;
assert_eq!(blocks.len(), 1);
assert_eq!(blocks[0], block.body);
}
}
#[tokio::test(flavor = "multi_thread")]
async fn test_get_body_range() {
reth_tracing::init_test_tracing();
let mut rng = rand::rng();
let mock_provider = Arc::new(MockEthProvider::default());
let mut tx_gen = TransactionGenerator::new(rand::rng());
let mut net = Testnet::create_with(2, mock_provider.clone()).await;
// install request handlers
net.for_each_mut(|peer| peer.install_request_handler());
let handle0 = net.peers()[0].handle();
let mut events0 = NetworkEventStream::new(handle0.event_listener());
let handle1 = net.peers()[1].handle();
let _handle = net.spawn();
let fetch0 = handle0.fetch_client().await.unwrap();
handle0.add_peer(*handle1.peer_id(), handle1.local_addr());
let connected = events0.next_session_established().await.unwrap();
assert_eq!(connected, *handle1.peer_id());
let mut all_blocks = Vec::new();
let mut block_hashes = Vec::new();
// add some blocks
for _ in 0..100 {
let block_hash = rng.random();
let mut block: Block = Block::default();
block.body.transactions.push(tx_gen.gen_eip4844());
mock_provider.add_block(block_hash, block.clone());
all_blocks.push(block);
block_hashes.push(block_hash);
}
// ensure we can fetch the correct bodies
for idx in 0..100 {
let count = std::cmp::min(100 - idx, 10); // Limit to 10 bodies per request
let hashes_to_fetch = &block_hashes[idx..idx + count];
let res = fetch0.get_block_bodies(hashes_to_fetch.to_vec()).await;
assert!(res.is_ok(), "{res:?}");
let bodies = res.unwrap().1;
assert_eq!(bodies.len(), count);
for i in 0..bodies.len() {
assert_eq!(bodies[i], all_blocks[idx + i].body);
}
}
}
#[tokio::test(flavor = "multi_thread")]
async fn test_get_header() {
reth_tracing::init_test_tracing();
let mut rng = rand::rng();
let mock_provider = Arc::new(MockEthProvider::default());
let mut net = Testnet::create_with(2, mock_provider.clone()).await;
// install request handlers
net.for_each_mut(|peer| peer.install_request_handler());
let handle0 = net.peers()[0].handle();
let mut events0 = NetworkEventStream::new(handle0.event_listener());
let handle1 = net.peers()[1].handle();
let _handle = net.spawn();
let fetch0 = handle0.fetch_client().await.unwrap();
handle0.add_peer(*handle1.peer_id(), handle1.local_addr());
let connected = events0.next_session_established().await.unwrap();
assert_eq!(connected, *handle1.peer_id());
let start: u64 = rng.random();
let mut hash = rng.random();
// request some headers
for idx in 0..100 {
// Set a new random header to the mock storage and request it via the network
let header = Header { number: start + idx, parent_hash: hash, ..Default::default() };
hash = rng.random();
mock_provider.add_header(hash, header.clone());
let req =
HeadersRequest { start: hash.into(), limit: 1, direction: HeadersDirection::Falling };
let res = fetch0.get_headers(req).await;
assert!(res.is_ok(), "{res:?}");
let headers = res.unwrap().1;
assert_eq!(headers.len(), 1);
assert_eq!(headers[0], header);
}
}
#[tokio::test(flavor = "multi_thread")]
async fn test_get_header_range() {
reth_tracing::init_test_tracing();
let mut rng = rand::rng();
let mock_provider = Arc::new(MockEthProvider::default());
let mut net = Testnet::create_with(2, mock_provider.clone()).await;
// install request handlers
net.for_each_mut(|peer| peer.install_request_handler());
let handle0 = net.peers()[0].handle();
let mut events0 = NetworkEventStream::new(handle0.event_listener());
let handle1 = net.peers()[1].handle();
let _handle = net.spawn();
let fetch0 = handle0.fetch_client().await.unwrap();
handle0.add_peer(*handle1.peer_id(), handle1.local_addr());
let connected = events0.next_session_established().await.unwrap();
assert_eq!(connected, *handle1.peer_id());
let start: u64 = rng.random();
let mut hash = rng.random();
let mut all_headers = Vec::new();
// add some headers
for idx in 0..100 {
// Set a new random header to the mock storage and request it via the network
let header = Header { number: start + idx, parent_hash: hash, ..Default::default() };
hash = rng.random();
mock_provider.add_header(hash, header.clone());
all_headers.push(header.seal(hash));
}
// ensure we can fetch the correct headers
for idx in 0..100 {
let count = 100 - idx;
let header = &all_headers[idx];
let req = HeadersRequest {
start: header.hash().into(),
limit: count as u64,
direction: HeadersDirection::Rising,
};
let res = fetch0.get_headers(req).await;
assert!(res.is_ok(), "{res:?}");
let headers = res.unwrap().1;
assert_eq!(headers.len(), count);
assert_eq!(headers[0].number, start + idx as u64);
for i in 0..headers.len() {
assert_eq!(&headers[i], all_headers[idx + i].inner());
}
}
}
#[tokio::test(flavor = "multi_thread")]
async fn test_get_header_range_falling() {
reth_tracing::init_test_tracing();
let mut rng = rand::rng();
let mock_provider = Arc::new(MockEthProvider::default());
let mut net = Testnet::create_with(2, mock_provider.clone()).await;
// install request handlers
net.for_each_mut(|peer| peer.install_request_handler());
let handle0 = net.peers()[0].handle();
let mut events0 = NetworkEventStream::new(handle0.event_listener());
let handle1 = net.peers()[1].handle();
let _handle = net.spawn();
let fetch0 = handle0.fetch_client().await.unwrap();
handle0.add_peer(*handle1.peer_id(), handle1.local_addr());
let connected = events0.next_session_established().await.unwrap();
assert_eq!(connected, *handle1.peer_id());
let start: u64 = rng.random();
let mut hash = rng.random();
let mut all_headers = Vec::new();
// add some headers
for idx in 0..100 {
// Set a new random header to the mock storage
let header = Header { number: start + idx, parent_hash: hash, ..Default::default() };
hash = rng.random();
mock_provider.add_header(hash, header.clone());
all_headers.push(header.seal(hash));
}
// ensure we can fetch the correct headers in falling direction
// start from the last header and work backwards
for idx in (0..100).rev() {
let count = std::cmp::min(idx + 1, 100); // Can't fetch more than idx+1 headers when going backwards
let header = &all_headers[idx];
let req = HeadersRequest {
start: header.hash().into(),
limit: count as u64,
direction: HeadersDirection::Falling,
};
let res = fetch0.get_headers(req).await;
assert!(res.is_ok(), "{res:?}");
let headers = res.unwrap().1;
assert_eq!(headers.len(), count);
assert_eq!(headers[0].number, start + idx as u64);
// When fetching in Falling direction, headers come in reverse order
for i in 0..headers.len() {
assert_eq!(&headers[i], all_headers[idx - i].inner());
}
}
}
#[tokio::test(flavor = "multi_thread")]
async fn test_eth68_get_receipts() {
reth_tracing::init_test_tracing();
let mut rng = rand::rng();
let mock_provider = Arc::new(MockEthProvider::default());
let mut net: Testnet<Arc<MockEthProvider>, TestPool> = Testnet::default();
// Create peers with ETH68 protocol explicitly
let p0 = PeerConfig::with_protocols(mock_provider.clone(), Some(EthVersion::Eth68.into()));
net.add_peer_with_config(p0).await.unwrap();
let p1 = PeerConfig::with_protocols(mock_provider.clone(), Some(EthVersion::Eth68.into()));
net.add_peer_with_config(p1).await.unwrap();
// install request handlers
net.for_each_mut(|peer| peer.install_request_handler());
let handle0 = net.peers()[0].handle();
let mut events0 = NetworkEventStream::new(handle0.event_listener());
let handle1 = net.peers()[1].handle();
let _handle = net.spawn();
handle0.add_peer(*handle1.peer_id(), handle1.local_addr());
let connected = events0.next_session_established().await.unwrap();
assert_eq!(connected, *handle1.peer_id());
// Create test receipts and add them to the mock provider
for block_num in 1..=10 {
let block_hash = rng.random();
let header = Header { number: block_num, ..Default::default() };
// Create some test receipts
let receipts = vec![
reth_ethereum_primitives::Receipt {
cumulative_gas_used: 21000,
success: true,
..Default::default()
},
reth_ethereum_primitives::Receipt {
cumulative_gas_used: 42000,
success: false,
..Default::default()
},
];
mock_provider.add_header(block_hash, header.clone());
mock_provider.add_receipts(header.number, receipts);
// Test receipt request via low-level peer request
let (tx, rx) = oneshot::channel();
handle0.send_request(
*handle1.peer_id(),
reth_network::PeerRequest::GetReceipts {
request: reth_eth_wire::GetReceipts(vec![block_hash]),
response: tx,
},
);
let result = rx.await.unwrap();
let receipts_response = result.unwrap();
assert_eq!(receipts_response.0.len(), 1);
assert_eq!(receipts_response.0[0].len(), 2);
// Eth68 receipts should have bloom filters - verify the structure
assert_eq!(receipts_response.0[0][0].receipt.cumulative_gas_used, 21000);
assert_eq!(receipts_response.0[0][1].receipt.cumulative_gas_used, 42000);
}
}
#[tokio::test(flavor = "multi_thread")]
async fn test_eth69_get_headers() {
reth_tracing::init_test_tracing();
let mut rng = rand::rng();
let mock_provider = Arc::new(MockEthProvider::default());
let mut net: Testnet<Arc<MockEthProvider>, TestPool> = Testnet::default();
// Create peers with ETH69 protocol
let p0 = PeerConfig::with_protocols(mock_provider.clone(), Some(EthVersion::Eth69.into()));
net.add_peer_with_config(p0).await.unwrap();
let p1 = PeerConfig::with_protocols(mock_provider.clone(), Some(EthVersion::Eth69.into()));
net.add_peer_with_config(p1).await.unwrap();
// install request handlers
net.for_each_mut(|peer| peer.install_request_handler());
let handle0 = net.peers()[0].handle();
let mut events0 = NetworkEventStream::new(handle0.event_listener());
let handle1 = net.peers()[1].handle();
let _handle = net.spawn();
let fetch0 = handle0.fetch_client().await.unwrap();
handle0.add_peer(*handle1.peer_id(), handle1.local_addr());
let connected = events0.next_session_established().await.unwrap();
assert_eq!(connected, *handle1.peer_id());
let start: u64 = rng.random();
let mut hash = rng.random();
// request some headers via eth69 connection
for idx in 0..50 {
let header = Header { number: start + idx, parent_hash: hash, ..Default::default() };
hash = rng.random();
mock_provider.add_header(hash, header.clone());
let req =
HeadersRequest { start: hash.into(), limit: 1, direction: HeadersDirection::Falling };
let res = fetch0.get_headers(req).await;
assert!(res.is_ok(), "{res:?}");
let headers = res.unwrap().1;
assert_eq!(headers.len(), 1);
assert_eq!(headers[0], header);
}
}
#[tokio::test(flavor = "multi_thread")]
async fn test_eth69_get_bodies() {
reth_tracing::init_test_tracing();
let mut rng = rand::rng();
let mock_provider = Arc::new(MockEthProvider::default());
let mut tx_gen = TransactionGenerator::new(rand::rng());
let mut net: Testnet<Arc<MockEthProvider>, TestPool> = Testnet::default();
// Create peers with ETH69 protocol
let p0 = PeerConfig::with_protocols(mock_provider.clone(), Some(EthVersion::Eth69.into()));
net.add_peer_with_config(p0).await.unwrap();
let p1 = PeerConfig::with_protocols(mock_provider.clone(), Some(EthVersion::Eth69.into()));
net.add_peer_with_config(p1).await.unwrap();
// install request handlers
net.for_each_mut(|peer| peer.install_request_handler());
let handle0 = net.peers()[0].handle();
let mut events0 = NetworkEventStream::new(handle0.event_listener());
let handle1 = net.peers()[1].handle();
let _handle = net.spawn();
let fetch0 = handle0.fetch_client().await.unwrap();
handle0.add_peer(*handle1.peer_id(), handle1.local_addr());
let connected = events0.next_session_established().await.unwrap();
assert_eq!(connected, *handle1.peer_id());
// request some blocks via eth69 connection
for _ in 0..50 {
let block_hash = rng.random();
let mut block: Block = Block::default();
block.body.transactions.push(tx_gen.gen_eip4844());
mock_provider.add_block(block_hash, block.clone());
let res = fetch0.get_block_bodies(vec![block_hash]).await;
assert!(res.is_ok(), "{res:?}");
let blocks = res.unwrap().1;
assert_eq!(blocks.len(), 1);
assert_eq!(blocks[0], block.body);
}
}
#[tokio::test(flavor = "multi_thread")]
async fn test_eth69_get_receipts() {
reth_tracing::init_test_tracing();
let mut rng = rand::rng();
let mock_provider = Arc::new(MockEthProvider::default());
let mut net: Testnet<Arc<MockEthProvider>, TestPool> = Testnet::default();
// Create peers with ETH69 protocol
let p0 = PeerConfig::with_protocols(mock_provider.clone(), Some(EthVersion::Eth69.into()));
net.add_peer_with_config(p0).await.unwrap();
let p1 = PeerConfig::with_protocols(mock_provider.clone(), Some(EthVersion::Eth69.into()));
net.add_peer_with_config(p1).await.unwrap();
// install request handlers
net.for_each_mut(|peer| peer.install_request_handler());
let handle0 = net.peers()[0].handle();
let mut events0 = NetworkEventStream::new(handle0.event_listener());
let handle1 = net.peers()[1].handle();
let _handle = net.spawn();
handle0.add_peer(*handle1.peer_id(), handle1.local_addr());
// Wait for the session to be established
let connected = events0.next_session_established().await.unwrap();
assert_eq!(connected, *handle1.peer_id());
// Create test receipts and add them to the mock provider
for block_num in 1..=10 {
let block_hash = rng.random();
let header = Header { number: block_num, ..Default::default() };
// Create some test receipts
let receipts = vec![
reth_ethereum_primitives::Receipt {
cumulative_gas_used: 21000,
success: true,
..Default::default()
},
reth_ethereum_primitives::Receipt {
cumulative_gas_used: 42000,
success: false,
..Default::default()
},
];
mock_provider.add_header(block_hash, header.clone());
mock_provider.add_receipts(header.number, receipts);
let (tx, rx) = oneshot::channel();
handle0.send_request(
*handle1.peer_id(),
reth_network::PeerRequest::GetReceipts69 {
request: reth_eth_wire::GetReceipts(vec![block_hash]),
response: tx,
},
);
let result = rx.await.unwrap();
let receipts_response = match result {
Ok(resp) => resp,
Err(e) => panic!("Failed to get receipts response: {e:?}"),
};
assert_eq!(receipts_response.0.len(), 1);
assert_eq!(receipts_response.0[0].len(), 2);
// ETH69 receipts do not include bloom filters - verify the structure
assert_eq!(receipts_response.0[0][0].cumulative_gas_used, 21000);
assert_eq!(receipts_response.0[0][1].cumulative_gas_used, 42000);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network/benches/tx_manager_hash_fetching.rs | crates/net/network/benches/tx_manager_hash_fetching.rs | #![allow(missing_docs)]
use alloy_primitives::{B256, U256};
use criterion::{measurement::WallTime, *};
use rand::SeedableRng;
use reth_eth_wire::EthVersion;
use reth_eth_wire_types::EthNetworkPrimitives;
use reth_network::{
test_utils::{
transactions::{buffer_hash_to_tx_fetcher, new_mock_session},
Testnet,
},
transactions::{
fetcher::TransactionFetcher, TransactionFetcherConfig, TransactionPropagationMode::Max,
TransactionsManagerConfig,
},
};
use reth_network_peers::PeerId;
use reth_provider::test_utils::{ExtendedAccount, MockEthProvider};
use reth_transaction_pool::{test_utils::TransactionGenerator, PoolTransaction, TransactionPool};
use std::collections::HashMap;
use tokio::runtime::Runtime as TokioRuntime;
criterion_group!(
name = tx_fetch_benches;
config = Criterion::default();
targets = tx_fetch_bench, fetch_pending_hashes,
);
pub fn benchmark_fetch_pending_hashes(group: &mut BenchmarkGroup<'_, WallTime>, peers_num: usize) {
let mut tx_fetcher = TransactionFetcher::<EthNetworkPrimitives>::default();
let mut peers = HashMap::default();
for _i in 0..peers_num {
// NOTE: the worst case, each tx in the cache belongs to a different peer.
let peer = PeerId::random();
let hash = B256::random();
let (mut peer_data, _) = new_mock_session(peer, EthVersion::Eth66);
peer_data.seen_transactions_mut().insert(hash);
peers.insert(peer, peer_data);
buffer_hash_to_tx_fetcher(&mut tx_fetcher, hash, peer, 0, None);
}
let group_id = format!("fetch pending hashes, peers num: {peers_num}");
group.bench_function(group_id, |b| {
b.iter(|| {
tx_fetcher.on_fetch_pending_hashes(&peers, |_| true);
});
});
}
pub fn fetch_pending_hashes(c: &mut Criterion) {
let mut group = c.benchmark_group("Fetch Pending Hashes");
for peers in [5, 10, 20, 100, 1000, 10000, 100000] {
benchmark_fetch_pending_hashes(&mut group, peers);
}
group.finish();
}
pub fn tx_fetch_bench(c: &mut Criterion) {
let rt = TokioRuntime::new().unwrap();
let mut group = c.benchmark_group("Transaction Fetch");
group.sample_size(30);
group.bench_function("fetch_transactions", |b| {
b.to_async(&rt).iter_with_setup(
|| {
tokio::task::block_in_place(|| {
tokio::runtime::Handle::current().block_on(async {
let tx_manager_config = TransactionsManagerConfig {
propagation_mode: Max(0),
transaction_fetcher_config: TransactionFetcherConfig {
max_inflight_requests: 1,
..Default::default()
},
..Default::default()
};
let provider = MockEthProvider::default();
let num_peers = 10;
let net = Testnet::create_with(num_peers, provider.clone()).await;
// install request handlers
let net = net.with_eth_pool_config(tx_manager_config);
let handle = net.spawn();
// connect all the peers first
handle.connect_peers().await;
let listening_peer = &handle.peers()[num_peers - 1];
let listening_peer_tx_listener =
listening_peer.pool().unwrap().pending_transactions_listener();
let num_tx_per_peer = 10;
for i in 1..num_peers {
let peer = &handle.peers()[i];
let peer_pool = peer.pool().unwrap();
for _ in 0..num_tx_per_peer {
let mut tx_gen =
TransactionGenerator::new(rand::rngs::StdRng::seed_from_u64(0));
let tx = tx_gen.gen_eip1559_pooled();
let sender = tx.sender();
provider.add_account(
sender,
ExtendedAccount::new(0, U256::from(100_000_000)),
);
peer_pool.add_external_transaction(tx.clone()).await.unwrap();
}
}
// Total expected transactions
let total_expected_tx = num_tx_per_peer * (num_peers - 1);
(listening_peer_tx_listener, total_expected_tx)
})
})
},
|(mut listening_peer_tx_listener, total_expected_tx)| async move {
let mut received_tx = 0;
while listening_peer_tx_listener.recv().await.is_some() {
received_tx += 1;
if received_tx >= total_expected_tx {
break;
}
}
},
)
});
group.finish();
}
criterion_main!(tx_fetch_benches);
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network/benches/broadcast.rs | crates/net/network/benches/broadcast.rs | #![allow(missing_docs)]
use alloy_primitives::U256;
use criterion::*;
use futures::StreamExt;
use rand::SeedableRng;
use reth_network::{test_utils::Testnet, NetworkEventListenerProvider};
use reth_network_api::Peers;
use reth_provider::test_utils::{ExtendedAccount, MockEthProvider};
use reth_transaction_pool::{test_utils::TransactionGenerator, PoolTransaction};
use std::sync::Arc;
use tokio::{runtime::Runtime as TokioRuntime, sync::mpsc::unbounded_channel};
criterion_group!(
name = broadcast_benches;
config = Criterion::default();
targets = broadcast_ingress_bench
);
pub fn broadcast_ingress_bench(c: &mut Criterion) {
let rt = TokioRuntime::new().unwrap();
let mut group = c.benchmark_group("Broadcast Ingress");
group.sample_size(10);
group.bench_function("receive_broadcasts", move |b| {
b.to_async(&rt).iter_with_setup(
|| {
// `b.to_async(rt)` automatically enters the
// runtime context and simply calling `block_on` here will cause the code to panic.
tokio::task::block_in_place(|| {
tokio::runtime::Handle::current().block_on(async {
let provider = MockEthProvider::default();
let mut net = Testnet::create_with(2, provider.clone()).await;
let mut peer0 = net.remove_peer(0);
let (tx, transactions_rx) = unbounded_channel();
peer0.network_mut().set_transactions(tx);
let mut events0 = peer0.handle().event_listener();
let net = net.with_eth_pool();
let handle = net.spawn();
let peer1 = handle.peers()[0].network().clone();
let peer0_id = peer0.peer_id();
peer1.add_peer(peer0_id, peer0.local_addr());
// await connection
tokio::select! {
_ = events0.next() => {}
_ = &mut peer0 => {}
}
// prepare some transactions
let mut tx_gen =
TransactionGenerator::new(rand::rngs::StdRng::seed_from_u64(0));
let num_broadcasts = 10;
for _ in 0..num_broadcasts {
for _ in 0..2 {
let mut txs = Vec::new();
let tx = tx_gen.gen_eip1559_pooled();
// ensure the sender has balance
provider.add_account(
tx.sender(),
ExtendedAccount::new(0, U256::from(100_000_000)),
);
txs.push(Arc::new(tx.transaction().clone().into_inner()));
peer1.send_transactions(peer0_id, txs);
}
}
(num_broadcasts, transactions_rx, peer0, handle)
})
})
},
|(num_txs, mut transactions_rx, mut peer0, _handle)| async move {
let mut count = 0;
loop {
tokio::select! {
_ = transactions_rx.recv() => {
count += 1;
if count == num_txs {
break;
}
},
_ = &mut peer0 => {
}
}
}
},
)
});
}
criterion_main!(broadcast_benches);
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/eth-wire-types/src/lib.rs | crates/net/eth-wire-types/src/lib.rs | //! Types for the eth wire protocol: <https://github.com/ethereum/devp2p/blob/master/caps/eth.md>
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![cfg_attr(not(feature = "std"), no_std)]
extern crate alloc;
mod status;
pub use status::{Status, StatusBuilder, StatusEth69, StatusMessage, UnifiedStatus};
pub mod version;
pub use version::{EthVersion, ProtocolVersion};
pub mod message;
pub use message::{EthMessage, EthMessageID, ProtocolMessage};
pub mod header;
pub use header::*;
pub mod blocks;
pub use blocks::*;
pub mod broadcast;
pub use broadcast::*;
pub mod transactions;
pub use transactions::*;
pub mod state;
pub use state::*;
pub mod receipts;
pub use receipts::*;
pub mod disconnect_reason;
pub use disconnect_reason::*;
pub mod capability;
pub use capability::*;
pub mod primitives;
pub use primitives::*;
pub mod snap;
pub use snap::*;
/// re-export for convenience
pub use alloy_eips::eip1898::{BlockHashOrNumber, HashOrNumber};
pub use alloy_eips::eip2718::Encodable2718;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/eth-wire-types/src/version.rs | crates/net/eth-wire-types/src/version.rs | //! Support for representing the version of the `eth`
use crate::alloc::string::ToString;
use alloc::string::String;
use alloy_rlp::{Decodable, Encodable, Error as RlpError};
use bytes::BufMut;
use core::{fmt, str::FromStr};
use derive_more::Display;
use reth_codecs_derive::add_arbitrary_tests;
/// Error thrown when failed to parse a valid [`EthVersion`].
#[derive(Debug, Clone, PartialEq, Eq, thiserror::Error)]
#[error("Unknown eth protocol version: {0}")]
pub struct ParseVersionError(String);
/// The `eth` protocol version.
#[repr(u8)]
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, PartialOrd, Ord, Display)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))]
pub enum EthVersion {
/// The `eth` protocol version 66.
Eth66 = 66,
/// The `eth` protocol version 67.
Eth67 = 67,
/// The `eth` protocol version 68.
Eth68 = 68,
/// The `eth` protocol version 69.
Eth69 = 69,
}
impl EthVersion {
/// The latest known eth version
pub const LATEST: Self = Self::Eth68;
/// All known eth versions
pub const ALL_VERSIONS: &'static [Self] = &[Self::Eth69, Self::Eth68, Self::Eth67, Self::Eth66];
/// Returns true if the version is eth/66
pub const fn is_eth66(&self) -> bool {
matches!(self, Self::Eth66)
}
/// Returns true if the version is eth/67
pub const fn is_eth67(&self) -> bool {
matches!(self, Self::Eth67)
}
/// Returns true if the version is eth/68
pub const fn is_eth68(&self) -> bool {
matches!(self, Self::Eth68)
}
/// Returns true if the version is eth/69
pub const fn is_eth69(&self) -> bool {
matches!(self, Self::Eth69)
}
}
/// RLP encodes `EthVersion` as a single byte (66-69).
impl Encodable for EthVersion {
fn encode(&self, out: &mut dyn BufMut) {
(*self as u8).encode(out)
}
fn length(&self) -> usize {
(*self as u8).length()
}
}
/// RLP decodes a single byte into `EthVersion`.
/// Returns error if byte is not a valid version (66-69).
impl Decodable for EthVersion {
fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
let version = u8::decode(buf)?;
Self::try_from(version).map_err(|_| RlpError::Custom("invalid eth version"))
}
}
/// Allow for converting from a `&str` to an `EthVersion`.
///
/// # Example
/// ```
/// use reth_eth_wire_types::EthVersion;
///
/// let version = EthVersion::try_from("67").unwrap();
/// assert_eq!(version, EthVersion::Eth67);
/// ```
impl TryFrom<&str> for EthVersion {
type Error = ParseVersionError;
#[inline]
fn try_from(s: &str) -> Result<Self, Self::Error> {
match s {
"66" => Ok(Self::Eth66),
"67" => Ok(Self::Eth67),
"68" => Ok(Self::Eth68),
"69" => Ok(Self::Eth69),
_ => Err(ParseVersionError(s.to_string())),
}
}
}
/// Allow for converting from a u8 to an `EthVersion`.
///
/// # Example
/// ```
/// use reth_eth_wire_types::EthVersion;
///
/// let version = EthVersion::try_from(67).unwrap();
/// assert_eq!(version, EthVersion::Eth67);
/// ```
impl TryFrom<u8> for EthVersion {
type Error = ParseVersionError;
#[inline]
fn try_from(u: u8) -> Result<Self, Self::Error> {
match u {
66 => Ok(Self::Eth66),
67 => Ok(Self::Eth67),
68 => Ok(Self::Eth68),
69 => Ok(Self::Eth69),
_ => Err(ParseVersionError(u.to_string())),
}
}
}
impl FromStr for EthVersion {
type Err = ParseVersionError;
#[inline]
fn from_str(s: &str) -> Result<Self, Self::Err> {
Self::try_from(s)
}
}
impl From<EthVersion> for u8 {
#[inline]
fn from(v: EthVersion) -> Self {
v as Self
}
}
impl From<EthVersion> for &'static str {
#[inline]
fn from(v: EthVersion) -> &'static str {
match v {
EthVersion::Eth66 => "66",
EthVersion::Eth67 => "67",
EthVersion::Eth68 => "68",
EthVersion::Eth69 => "69",
}
}
}
/// `RLPx` `p2p` protocol version
#[derive(Copy, Clone, Debug, Default, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))]
#[add_arbitrary_tests(rlp)]
pub enum ProtocolVersion {
/// `p2p` version 4
V4 = 4,
/// `p2p` version 5
#[default]
V5 = 5,
}
impl fmt::Display for ProtocolVersion {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "v{}", *self as u8)
}
}
impl Encodable for ProtocolVersion {
fn encode(&self, out: &mut dyn BufMut) {
(*self as u8).encode(out)
}
fn length(&self) -> usize {
// the version should be a single byte
(*self as u8).length()
}
}
impl Decodable for ProtocolVersion {
fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
let version = u8::decode(buf)?;
match version {
4 => Ok(Self::V4),
5 => Ok(Self::V5),
_ => Err(RlpError::Custom("unknown p2p protocol version")),
}
}
}
#[cfg(test)]
mod tests {
use super::{EthVersion, ParseVersionError};
use alloy_rlp::{Decodable, Encodable, Error as RlpError};
use bytes::BytesMut;
#[test]
fn test_eth_version_try_from_str() {
assert_eq!(EthVersion::Eth66, EthVersion::try_from("66").unwrap());
assert_eq!(EthVersion::Eth67, EthVersion::try_from("67").unwrap());
assert_eq!(EthVersion::Eth68, EthVersion::try_from("68").unwrap());
assert_eq!(EthVersion::Eth69, EthVersion::try_from("69").unwrap());
assert_eq!(Err(ParseVersionError("70".to_string())), EthVersion::try_from("70"));
}
#[test]
fn test_eth_version_from_str() {
assert_eq!(EthVersion::Eth66, "66".parse().unwrap());
assert_eq!(EthVersion::Eth67, "67".parse().unwrap());
assert_eq!(EthVersion::Eth68, "68".parse().unwrap());
assert_eq!(EthVersion::Eth69, "69".parse().unwrap());
assert_eq!(Err(ParseVersionError("70".to_string())), "70".parse::<EthVersion>());
}
#[test]
fn test_eth_version_rlp_encode() {
let versions = [EthVersion::Eth66, EthVersion::Eth67, EthVersion::Eth68, EthVersion::Eth69];
for version in versions {
let mut encoded = BytesMut::new();
version.encode(&mut encoded);
assert_eq!(encoded.len(), 1);
assert_eq!(encoded[0], version as u8);
}
}
#[test]
fn test_eth_version_rlp_decode() {
let test_cases = [
(66_u8, Ok(EthVersion::Eth66)),
(67_u8, Ok(EthVersion::Eth67)),
(68_u8, Ok(EthVersion::Eth68)),
(69_u8, Ok(EthVersion::Eth69)),
(70_u8, Err(RlpError::Custom("invalid eth version"))),
(65_u8, Err(RlpError::Custom("invalid eth version"))),
];
for (input, expected) in test_cases {
let mut encoded = BytesMut::new();
input.encode(&mut encoded);
let mut slice = encoded.as_ref();
let result = EthVersion::decode(&mut slice);
assert_eq!(result, expected);
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/eth-wire-types/src/transactions.rs | crates/net/eth-wire-types/src/transactions.rs | //! Implements the `GetPooledTransactions` and `PooledTransactions` message types.
use alloc::vec::Vec;
use alloy_consensus::transaction::PooledTransaction;
use alloy_eips::eip2718::Encodable2718;
use alloy_primitives::B256;
use alloy_rlp::{RlpDecodableWrapper, RlpEncodableWrapper};
use derive_more::{Constructor, Deref, IntoIterator};
use reth_codecs_derive::add_arbitrary_tests;
/// A list of transaction hashes that the peer would like transaction bodies for.
#[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))]
#[add_arbitrary_tests(rlp)]
pub struct GetPooledTransactions(
/// The transaction hashes to request transaction bodies for.
pub Vec<B256>,
);
impl<T> From<Vec<T>> for GetPooledTransactions
where
T: Into<B256>,
{
fn from(hashes: Vec<T>) -> Self {
Self(hashes.into_iter().map(|h| h.into()).collect())
}
}
/// The response to [`GetPooledTransactions`], containing the transaction bodies associated with
/// the requested hashes.
///
/// This response may not contain all bodies requested, but the bodies should be in the same order
/// as the request's hashes. Hashes may be skipped, and the client should ensure that each body
/// corresponds to a requested hash. Hashes may need to be re-requested if the bodies are not
/// included in the response.
// #[derive_arbitrary(rlp, 10)]
#[derive(
Clone,
Debug,
PartialEq,
Eq,
RlpEncodableWrapper,
RlpDecodableWrapper,
IntoIterator,
Deref,
Constructor,
)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct PooledTransactions<T = PooledTransaction>(
/// The transaction bodies, each of which should correspond to a requested hash.
pub Vec<T>,
);
impl<T: Encodable2718> PooledTransactions<T> {
/// Returns an iterator over the transaction hashes in this response.
pub fn hashes(&self) -> impl Iterator<Item = B256> + '_ {
self.0.iter().map(|tx| tx.trie_hash())
}
}
impl<T, U> TryFrom<Vec<U>> for PooledTransactions<T>
where
T: TryFrom<U>,
{
type Error = T::Error;
fn try_from(txs: Vec<U>) -> Result<Self, Self::Error> {
txs.into_iter().map(T::try_from).collect()
}
}
impl<T> FromIterator<T> for PooledTransactions<T> {
fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
Self(iter.into_iter().collect())
}
}
impl<T> Default for PooledTransactions<T> {
fn default() -> Self {
Self(Default::default())
}
}
#[cfg(test)]
mod tests {
use crate::{message::RequestPair, GetPooledTransactions, PooledTransactions};
use alloy_consensus::{transaction::PooledTransaction, TxEip1559, TxLegacy};
use alloy_primitives::{hex, Signature, TxKind, U256};
use alloy_rlp::{Decodable, Encodable};
use reth_chainspec::MIN_TRANSACTION_GAS;
use reth_ethereum_primitives::{Transaction, TransactionSigned};
use std::str::FromStr;
#[test]
// Test vector from: https://eips.ethereum.org/EIPS/eip-2481
fn encode_get_pooled_transactions() {
let expected = hex!(
"f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"
);
let mut data = vec![];
let request = RequestPair {
request_id: 1111,
message: GetPooledTransactions(vec![
hex!("00000000000000000000000000000000000000000000000000000000deadc0de").into(),
hex!("00000000000000000000000000000000000000000000000000000000feedbeef").into(),
]),
};
request.encode(&mut data);
assert_eq!(data, expected);
}
#[test]
// Test vector from: https://eips.ethereum.org/EIPS/eip-2481
fn decode_get_pooled_transactions() {
let data = hex!(
"f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"
);
let request = RequestPair::<GetPooledTransactions>::decode(&mut &data[..]).unwrap();
assert_eq!(
request,
RequestPair {
request_id: 1111,
message: GetPooledTransactions(vec![
hex!("00000000000000000000000000000000000000000000000000000000deadc0de").into(),
hex!("00000000000000000000000000000000000000000000000000000000feedbeef").into(),
])
}
);
}
#[test]
// Test vector from: https://eips.ethereum.org/EIPS/eip-2481
fn encode_pooled_transactions() {
let expected = hex!(
"f8d7820457f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb"
);
let mut data = vec![];
let txs = vec![
TransactionSigned::new_unhashed(
Transaction::Legacy(TxLegacy {
chain_id: Some(1),
nonce: 0x8u64,
gas_price: 0x4a817c808,
gas_limit: 0x2e248,
to: TxKind::Call(hex!("3535353535353535353535353535353535353535").into()),
value: U256::from(0x200u64),
input: Default::default(),
}),
Signature::new(
U256::from_str(
"0x64b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12",
)
.unwrap(),
U256::from_str(
"0x64b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10",
)
.unwrap(),
false,
),
),
TransactionSigned::new_unhashed(
Transaction::Legacy(TxLegacy {
chain_id: Some(1),
nonce: 0x09u64,
gas_price: 0x4a817c809,
gas_limit: 0x33450,
to: TxKind::Call(hex!("3535353535353535353535353535353535353535").into()),
value: U256::from(0x2d9u64),
input: Default::default(),
}),
Signature::new(
U256::from_str(
"0x52f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb",
)
.unwrap(),
U256::from_str(
"0x52f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb",
)
.unwrap(),
false,
),
),
];
let message: Vec<PooledTransaction> = txs
.into_iter()
.map(|tx| {
PooledTransaction::try_from(tx)
.expect("Failed to convert TransactionSigned to PooledTransaction")
})
.collect();
let request = RequestPair {
request_id: 1111,
message: PooledTransactions(message), /* Assuming PooledTransactions wraps a
* Vec<PooledTransaction> */
};
request.encode(&mut data);
assert_eq!(data, expected);
}
#[test]
// Test vector from: https://eips.ethereum.org/EIPS/eip-2481
fn decode_pooled_transactions() {
let data = hex!(
"f8d7820457f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb"
);
let txs = vec![
TransactionSigned::new_unhashed(
Transaction::Legacy(TxLegacy {
chain_id: Some(1),
nonce: 0x8u64,
gas_price: 0x4a817c808,
gas_limit: 0x2e248,
to: TxKind::Call(hex!("3535353535353535353535353535353535353535").into()),
value: U256::from(0x200u64),
input: Default::default(),
}),
Signature::new(
U256::from_str(
"0x64b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12",
)
.unwrap(),
U256::from_str(
"0x64b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10",
)
.unwrap(),
false,
),
),
TransactionSigned::new_unhashed(
Transaction::Legacy(TxLegacy {
chain_id: Some(1),
nonce: 0x09u64,
gas_price: 0x4a817c809,
gas_limit: 0x33450,
to: TxKind::Call(hex!("3535353535353535353535353535353535353535").into()),
value: U256::from(0x2d9u64),
input: Default::default(),
}),
Signature::new(
U256::from_str(
"0x52f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb",
)
.unwrap(),
U256::from_str(
"0x52f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb",
)
.unwrap(),
false,
),
),
];
let message: Vec<PooledTransaction> = txs
.into_iter()
.map(|tx| {
PooledTransaction::try_from(tx)
.expect("Failed to convert TransactionSigned to PooledTransaction")
})
.collect();
let expected = RequestPair { request_id: 1111, message: PooledTransactions(message) };
let request = RequestPair::<PooledTransactions>::decode(&mut &data[..]).unwrap();
assert_eq!(request, expected);
}
#[test]
fn decode_pooled_transactions_network() {
let data = hex!(
"f9022980f90225f8650f84832156008287fb94cf7f9e66af820a19257a2108375b180b0ec491678204d2802ca035b7bfeb9ad9ece2cbafaaf8e202e706b4cfaeb233f46198f00b44d4a566a981a0612638fb29427ca33b9a3be2a0a561beecfe0269655be160d35e72d366a6a860b87502f872041a8459682f008459682f0d8252089461815774383099e24810ab832a5b2a5425c154d58829a2241af62c000080c001a059e6b67f48fb32e7e570dfb11e042b5ad2e55e3ce3ce9cd989c7e06e07feeafda0016b83f4f980694ed2eee4d10667242b1f40dc406901b34125b008d334d47469f86b0384773594008398968094d3e8763675e4c425df46cc3b5c0f6cbdac39604687038d7ea4c68000802ba0ce6834447c0a4193c40382e6c57ae33b241379c5418caac9cdc18d786fd12071a03ca3ae86580e94550d7c071e3a02eadb5a77830947c9225165cf9100901bee88f86b01843b9aca00830186a094d3e8763675e4c425df46cc3b5c0f6cbdac3960468702769bb01b2a00802ba0e24d8bd32ad906d6f8b8d7741e08d1959df021698b19ee232feba15361587d0aa05406ad177223213df262cb66ccbb2f46bfdccfdfbbb5ffdda9e2c02d977631daf86b02843b9aca00830186a094d3e8763675e4c425df46cc3b5c0f6cbdac39604687038d7ea4c68000802ba00eb96ca19e8a77102767a41fc85a36afd5c61ccb09911cec5d3e86e193d9c5aea03a456401896b1b6055311536bf00a718568c744d8c1f9df59879e8350220ca18"
);
let decoded_transactions =
RequestPair::<PooledTransactions>::decode(&mut &data[..]).unwrap();
let txs = vec![
TransactionSigned::new_unhashed(
Transaction::Legacy(TxLegacy {
chain_id: Some(4),
nonce: 15u64,
gas_price: 2200000000,
gas_limit: 34811,
to: TxKind::Call(hex!("cf7f9e66af820a19257a2108375b180b0ec49167").into()),
value: U256::from(1234u64),
input: Default::default(),
}),
Signature::new(
U256::from_str(
"0x35b7bfeb9ad9ece2cbafaaf8e202e706b4cfaeb233f46198f00b44d4a566a981",
)
.unwrap(),
U256::from_str(
"0x612638fb29427ca33b9a3be2a0a561beecfe0269655be160d35e72d366a6a860",
)
.unwrap(),
true,
),
),
TransactionSigned::new_unhashed(
Transaction::Eip1559(TxEip1559 {
chain_id: 4,
nonce: 26u64,
max_priority_fee_per_gas: 1500000000,
max_fee_per_gas: 1500000013,
gas_limit: MIN_TRANSACTION_GAS,
to: TxKind::Call(hex!("61815774383099e24810ab832a5b2a5425c154d5").into()),
value: U256::from(3000000000000000000u64),
input: Default::default(),
access_list: Default::default(),
}),
Signature::new(
U256::from_str(
"0x59e6b67f48fb32e7e570dfb11e042b5ad2e55e3ce3ce9cd989c7e06e07feeafd",
)
.unwrap(),
U256::from_str(
"0x016b83f4f980694ed2eee4d10667242b1f40dc406901b34125b008d334d47469",
)
.unwrap(),
true,
),
),
TransactionSigned::new_unhashed(
Transaction::Legacy(TxLegacy {
chain_id: Some(4),
nonce: 3u64,
gas_price: 2000000000,
gas_limit: 10000000,
to: TxKind::Call(hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046").into()),
value: U256::from(1000000000000000u64),
input: Default::default(),
}),
Signature::new(
U256::from_str(
"0xce6834447c0a4193c40382e6c57ae33b241379c5418caac9cdc18d786fd12071",
)
.unwrap(),
U256::from_str(
"0x3ca3ae86580e94550d7c071e3a02eadb5a77830947c9225165cf9100901bee88",
)
.unwrap(),
false,
),
),
TransactionSigned::new_unhashed(
Transaction::Legacy(TxLegacy {
chain_id: Some(4),
nonce: 1u64,
gas_price: 1000000000,
gas_limit: 100000,
to: TxKind::Call(hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046").into()),
value: U256::from(693361000000000u64),
input: Default::default(),
}),
Signature::new(
U256::from_str(
"0xe24d8bd32ad906d6f8b8d7741e08d1959df021698b19ee232feba15361587d0a",
)
.unwrap(),
U256::from_str(
"0x5406ad177223213df262cb66ccbb2f46bfdccfdfbbb5ffdda9e2c02d977631da",
)
.unwrap(),
false,
),
),
TransactionSigned::new_unhashed(
Transaction::Legacy(TxLegacy {
chain_id: Some(4),
nonce: 2u64,
gas_price: 1000000000,
gas_limit: 100000,
to: TxKind::Call(hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046").into()),
value: U256::from(1000000000000000u64),
input: Default::default(),
}),
Signature::new(
U256::from_str(
"0xeb96ca19e8a77102767a41fc85a36afd5c61ccb09911cec5d3e86e193d9c5ae",
)
.unwrap(),
U256::from_str(
"0x3a456401896b1b6055311536bf00a718568c744d8c1f9df59879e8350220ca18",
)
.unwrap(),
false,
),
),
];
let message: Vec<PooledTransaction> = txs
.into_iter()
.map(|tx| {
PooledTransaction::try_from(tx)
.expect("Failed to convert TransactionSigned to PooledTransaction")
})
.collect();
let expected_transactions =
RequestPair { request_id: 0, message: PooledTransactions(message) };
// checking tx by tx for easier debugging if there are any regressions
for (decoded, expected) in
decoded_transactions.message.0.iter().zip(expected_transactions.message.0.iter())
{
assert_eq!(decoded, expected);
}
assert_eq!(decoded_transactions, expected_transactions);
}
#[test]
fn encode_pooled_transactions_network() {
let expected = hex!(
"f9022980f90225f8650f84832156008287fb94cf7f9e66af820a19257a2108375b180b0ec491678204d2802ca035b7bfeb9ad9ece2cbafaaf8e202e706b4cfaeb233f46198f00b44d4a566a981a0612638fb29427ca33b9a3be2a0a561beecfe0269655be160d35e72d366a6a860b87502f872041a8459682f008459682f0d8252089461815774383099e24810ab832a5b2a5425c154d58829a2241af62c000080c001a059e6b67f48fb32e7e570dfb11e042b5ad2e55e3ce3ce9cd989c7e06e07feeafda0016b83f4f980694ed2eee4d10667242b1f40dc406901b34125b008d334d47469f86b0384773594008398968094d3e8763675e4c425df46cc3b5c0f6cbdac39604687038d7ea4c68000802ba0ce6834447c0a4193c40382e6c57ae33b241379c5418caac9cdc18d786fd12071a03ca3ae86580e94550d7c071e3a02eadb5a77830947c9225165cf9100901bee88f86b01843b9aca00830186a094d3e8763675e4c425df46cc3b5c0f6cbdac3960468702769bb01b2a00802ba0e24d8bd32ad906d6f8b8d7741e08d1959df021698b19ee232feba15361587d0aa05406ad177223213df262cb66ccbb2f46bfdccfdfbbb5ffdda9e2c02d977631daf86b02843b9aca00830186a094d3e8763675e4c425df46cc3b5c0f6cbdac39604687038d7ea4c68000802ba00eb96ca19e8a77102767a41fc85a36afd5c61ccb09911cec5d3e86e193d9c5aea03a456401896b1b6055311536bf00a718568c744d8c1f9df59879e8350220ca18"
);
let txs = vec![
TransactionSigned::new_unhashed(
Transaction::Legacy(TxLegacy {
chain_id: Some(4),
nonce: 15u64,
gas_price: 2200000000,
gas_limit: 34811,
to: TxKind::Call(hex!("cf7f9e66af820a19257a2108375b180b0ec49167").into()),
value: U256::from(1234u64),
input: Default::default(),
}),
Signature::new(
U256::from_str(
"0x35b7bfeb9ad9ece2cbafaaf8e202e706b4cfaeb233f46198f00b44d4a566a981",
)
.unwrap(),
U256::from_str(
"0x612638fb29427ca33b9a3be2a0a561beecfe0269655be160d35e72d366a6a860",
)
.unwrap(),
true,
),
),
TransactionSigned::new_unhashed(
Transaction::Eip1559(TxEip1559 {
chain_id: 4,
nonce: 26u64,
max_priority_fee_per_gas: 1500000000,
max_fee_per_gas: 1500000013,
gas_limit: MIN_TRANSACTION_GAS,
to: TxKind::Call(hex!("61815774383099e24810ab832a5b2a5425c154d5").into()),
value: U256::from(3000000000000000000u64),
input: Default::default(),
access_list: Default::default(),
}),
Signature::new(
U256::from_str(
"0x59e6b67f48fb32e7e570dfb11e042b5ad2e55e3ce3ce9cd989c7e06e07feeafd",
)
.unwrap(),
U256::from_str(
"0x016b83f4f980694ed2eee4d10667242b1f40dc406901b34125b008d334d47469",
)
.unwrap(),
true,
),
),
TransactionSigned::new_unhashed(
Transaction::Legacy(TxLegacy {
chain_id: Some(4),
nonce: 3u64,
gas_price: 2000000000,
gas_limit: 10000000,
to: TxKind::Call(hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046").into()),
value: U256::from(1000000000000000u64),
input: Default::default(),
}),
Signature::new(
U256::from_str(
"0xce6834447c0a4193c40382e6c57ae33b241379c5418caac9cdc18d786fd12071",
)
.unwrap(),
U256::from_str(
"0x3ca3ae86580e94550d7c071e3a02eadb5a77830947c9225165cf9100901bee88",
)
.unwrap(),
false,
),
),
TransactionSigned::new_unhashed(
Transaction::Legacy(TxLegacy {
chain_id: Some(4),
nonce: 1u64,
gas_price: 1000000000,
gas_limit: 100000,
to: TxKind::Call(hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046").into()),
value: U256::from(693361000000000u64),
input: Default::default(),
}),
Signature::new(
U256::from_str(
"0xe24d8bd32ad906d6f8b8d7741e08d1959df021698b19ee232feba15361587d0a",
)
.unwrap(),
U256::from_str(
"0x5406ad177223213df262cb66ccbb2f46bfdccfdfbbb5ffdda9e2c02d977631da",
)
.unwrap(),
false,
),
),
TransactionSigned::new_unhashed(
Transaction::Legacy(TxLegacy {
chain_id: Some(4),
nonce: 2u64,
gas_price: 1000000000,
gas_limit: 100000,
to: TxKind::Call(hex!("d3e8763675e4c425df46cc3b5c0f6cbdac396046").into()),
value: U256::from(1000000000000000u64),
input: Default::default(),
}),
Signature::new(
U256::from_str(
"0xeb96ca19e8a77102767a41fc85a36afd5c61ccb09911cec5d3e86e193d9c5ae",
)
.unwrap(),
U256::from_str(
"0x3a456401896b1b6055311536bf00a718568c744d8c1f9df59879e8350220ca18",
)
.unwrap(),
false,
),
),
];
let message: Vec<PooledTransaction> = txs
.into_iter()
.map(|tx| {
PooledTransaction::try_from(tx)
.expect("Failed to convert TransactionSigned to PooledTransaction")
})
.collect();
let transactions = RequestPair { request_id: 0, message: PooledTransactions(message) };
let mut encoded = vec![];
transactions.encode(&mut encoded);
assert_eq!(encoded.len(), transactions.length());
let encoded_str = hex::encode(encoded);
let expected_str = hex::encode(expected);
assert_eq!(encoded_str.len(), expected_str.len());
assert_eq!(encoded_str, expected_str);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/eth-wire-types/src/status.rs | crates/net/eth-wire-types/src/status.rs | use crate::EthVersion;
use alloy_chains::{Chain, NamedChain};
use alloy_hardforks::{EthereumHardfork, ForkId, Head};
use alloy_primitives::{hex, B256, U256};
use alloy_rlp::{BufMut, Encodable, RlpDecodable, RlpEncodable};
use core::fmt::{Debug, Display};
use reth_chainspec::{EthChainSpec, Hardforks, MAINNET};
use reth_codecs_derive::add_arbitrary_tests;
/// `UnifiedStatus` is an internal superset of all ETH status fields for all `eth/` versions.
///
/// This type can be converted into [`Status`] or [`StatusEth69`] depending on the version and
/// unsupported fields are stripped out.
#[derive(Clone, Debug, PartialEq, Eq, Copy)]
pub struct UnifiedStatus {
/// The eth protocol version (e.g. eth/66 to eth/69).
pub version: EthVersion,
/// The chain ID identifying the peer’s network.
pub chain: Chain,
/// The genesis block hash of the peer’s chain.
pub genesis: B256,
/// The fork ID as defined by EIP-2124.
pub forkid: ForkId,
/// The latest block hash known to the peer.
pub blockhash: B256,
/// The total difficulty of the peer’s best chain (eth/66–68 only).
pub total_difficulty: Option<U256>,
/// The earliest block this node can serve (eth/69 only).
pub earliest_block: Option<u64>,
/// The latest block number this node has (eth/69 only).
pub latest_block: Option<u64>,
}
impl Default for UnifiedStatus {
fn default() -> Self {
let mainnet_genesis = MAINNET.genesis_hash();
Self {
version: EthVersion::Eth68,
chain: Chain::from_named(NamedChain::Mainnet),
genesis: mainnet_genesis,
forkid: MAINNET
.hardfork_fork_id(EthereumHardfork::Frontier)
.expect("Frontier must exist"),
blockhash: mainnet_genesis,
total_difficulty: Some(U256::from(17_179_869_184u64)),
earliest_block: Some(0),
latest_block: Some(0),
}
}
}
impl UnifiedStatus {
/// Helper for creating the `UnifiedStatus` builder
pub fn builder() -> StatusBuilder {
Default::default()
}
/// Build from chain‑spec + head. Earliest/latest default to full history.
pub fn spec_builder<Spec>(spec: &Spec, head: &Head) -> Self
where
Spec: EthChainSpec + Hardforks,
{
Self::builder()
.chain(spec.chain())
.genesis(spec.genesis_hash())
.forkid(spec.fork_id(head))
.blockhash(head.hash)
.total_difficulty(Some(head.total_difficulty))
.earliest_block(Some(0))
.latest_block(Some(head.number))
.build()
}
/// Override the `(earliest, latest)` history range we’ll advertise to
/// eth/69 peers.
pub const fn set_history_range(&mut self, earliest: u64, latest: u64) {
self.earliest_block = Some(earliest);
self.latest_block = Some(latest);
}
/// Sets the [`EthVersion`] for the status.
pub const fn set_eth_version(&mut self, v: EthVersion) {
self.version = v;
}
/// Consume this `UnifiedStatus` and produce the legacy [`Status`] message used by all
/// `eth/66`–`eth/68`.
pub fn into_legacy(self) -> Status {
Status {
version: self.version,
chain: self.chain,
genesis: self.genesis,
forkid: self.forkid,
blockhash: self.blockhash,
total_difficulty: self.total_difficulty.unwrap_or(U256::ZERO),
}
}
/// Consume this `UnifiedStatus` and produce the [`StatusEth69`] message used by `eth/69`.
pub fn into_eth69(self) -> StatusEth69 {
StatusEth69 {
version: self.version,
chain: self.chain,
genesis: self.genesis,
forkid: self.forkid,
earliest: self.earliest_block.unwrap_or(0),
latest: self.latest_block.unwrap_or(0),
blockhash: self.blockhash,
}
}
/// Convert this `UnifiedStatus` into the appropriate `StatusMessage` variant based on version.
pub fn into_message(self) -> StatusMessage {
if self.version >= EthVersion::Eth69 {
StatusMessage::Eth69(self.into_eth69())
} else {
StatusMessage::Legacy(self.into_legacy())
}
}
/// Build a `UnifiedStatus` from a received `StatusMessage`.
pub const fn from_message(msg: StatusMessage) -> Self {
match msg {
StatusMessage::Legacy(s) => Self {
version: s.version,
chain: s.chain,
genesis: s.genesis,
forkid: s.forkid,
blockhash: s.blockhash,
total_difficulty: Some(s.total_difficulty),
earliest_block: None,
latest_block: None,
},
StatusMessage::Eth69(e) => Self {
version: e.version,
chain: e.chain,
genesis: e.genesis,
forkid: e.forkid,
blockhash: e.blockhash,
total_difficulty: None,
earliest_block: Some(e.earliest),
latest_block: Some(e.latest),
},
}
}
}
/// Builder type for constructing a [`UnifiedStatus`] message.
#[derive(Debug, Default)]
pub struct StatusBuilder {
status: UnifiedStatus,
}
impl StatusBuilder {
/// Consumes the builder and returns the constructed [`UnifiedStatus`].
pub const fn build(self) -> UnifiedStatus {
self.status
}
/// Sets the eth protocol version (e.g., eth/66, eth/69).
pub const fn version(mut self, version: EthVersion) -> Self {
self.status.version = version;
self
}
/// Sets the chain ID
pub const fn chain(mut self, chain: Chain) -> Self {
self.status.chain = chain;
self
}
/// Sets the genesis block hash of the chain.
pub const fn genesis(mut self, genesis: B256) -> Self {
self.status.genesis = genesis;
self
}
/// Sets the fork ID, used for fork compatibility checks.
pub const fn forkid(mut self, forkid: ForkId) -> Self {
self.status.forkid = forkid;
self
}
/// Sets the block hash of the current head.
pub const fn blockhash(mut self, blockhash: B256) -> Self {
self.status.blockhash = blockhash;
self
}
/// Sets the total difficulty, if relevant (Some for eth/66–68).
pub const fn total_difficulty(mut self, td: Option<U256>) -> Self {
self.status.total_difficulty = td;
self
}
/// Sets the earliest available block, if known (Some for eth/69).
pub const fn earliest_block(mut self, earliest: Option<u64>) -> Self {
self.status.earliest_block = earliest;
self
}
/// Sets the latest known block, if known (Some for eth/69).
pub const fn latest_block(mut self, latest: Option<u64>) -> Self {
self.status.latest_block = latest;
self
}
}
/// The status message is used in the eth protocol handshake to ensure that peers are on the same
/// network and are following the same fork.
///
/// When performing a handshake, the total difficulty is not guaranteed to correspond to the block
/// hash. This information should be treated as untrusted.
#[derive(Copy, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))]
#[add_arbitrary_tests(rlp)]
pub struct Status {
/// The current protocol version. For example, peers running `eth/66` would have a version of
/// 66.
pub version: EthVersion,
/// The chain id, as introduced in
/// [EIP155](https://eips.ethereum.org/EIPS/eip-155#list-of-chain-ids).
pub chain: Chain,
/// Total difficulty of the best chain.
pub total_difficulty: U256,
/// The highest difficulty block hash the peer has seen
pub blockhash: B256,
/// The genesis hash of the peer's chain.
pub genesis: B256,
/// The fork identifier, a [CRC32
/// checksum](https://en.wikipedia.org/wiki/Cyclic_redundancy_check#CRC-32_algorithm) for
/// identifying the peer's fork as defined by
/// [EIP-2124](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-2124.md).
/// This was added in [`eth/64`](https://eips.ethereum.org/EIPS/eip-2364)
pub forkid: ForkId,
}
// <https://etherscan.io/block/0>
impl Default for Status {
fn default() -> Self {
let mainnet_genesis = MAINNET.genesis_hash();
Self {
version: EthVersion::Eth68,
chain: Chain::from_named(NamedChain::Mainnet),
total_difficulty: U256::from(17_179_869_184u64),
blockhash: mainnet_genesis,
genesis: mainnet_genesis,
forkid: MAINNET
.hardfork_fork_id(EthereumHardfork::Frontier)
.expect("The Frontier hardfork should always exist"),
}
}
}
impl Display for Status {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
let hexed_blockhash = hex::encode(self.blockhash);
let hexed_genesis = hex::encode(self.genesis);
write!(
f,
"Status {{ version: {}, chain: {}, total_difficulty: {}, blockhash: {}, genesis: {}, forkid: {:X?} }}",
self.version,
self.chain,
self.total_difficulty,
hexed_blockhash,
hexed_genesis,
self.forkid
)
}
}
impl Debug for Status {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
let hexed_blockhash = hex::encode(self.blockhash);
let hexed_genesis = hex::encode(self.genesis);
if f.alternate() {
write!(
f,
"Status {{\n\tversion: {:?},\n\tchain: {:?},\n\ttotal_difficulty: {:?},\n\tblockhash: {},\n\tgenesis: {},\n\tforkid: {:X?}\n}}",
self.version,
self.chain,
self.total_difficulty,
hexed_blockhash,
hexed_genesis,
self.forkid
)
} else {
write!(
f,
"Status {{ version: {:?}, chain: {:?}, total_difficulty: {:?}, blockhash: {}, genesis: {}, forkid: {:X?} }}",
self.version,
self.chain,
self.total_difficulty,
hexed_blockhash,
hexed_genesis,
self.forkid
)
}
}
}
/// Similar to [`Status`], but for `eth/69` version, which does not contain
/// the `total_difficulty` field.
#[derive(Copy, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))]
#[add_arbitrary_tests(rlp)]
pub struct StatusEth69 {
/// The current protocol version.
/// Here, version is `eth/69`.
pub version: EthVersion,
/// The chain id, as introduced in
/// [EIP155](https://eips.ethereum.org/EIPS/eip-155#list-of-chain-ids).
pub chain: Chain,
/// The genesis hash of the peer's chain.
pub genesis: B256,
/// The fork identifier, a [CRC32
/// checksum](https://en.wikipedia.org/wiki/Cyclic_redundancy_check#CRC-32_algorithm) for
/// identifying the peer's fork as defined by
/// [EIP-2124](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-2124.md).
/// This was added in [`eth/64`](https://eips.ethereum.org/EIPS/eip-2364)
pub forkid: ForkId,
/// Earliest block number this node can serve
pub earliest: u64,
/// Latest block number this node has (current head)
pub latest: u64,
/// Hash of the latest block this node has (current head)
pub blockhash: B256,
}
impl Display for StatusEth69 {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
let hexed_blockhash = hex::encode(self.blockhash);
let hexed_genesis = hex::encode(self.genesis);
write!(
f,
"StatusEth69 {{ version: {}, chain: {}, genesis: {}, forkid: {:X?}, earliest: {}, latest: {}, blockhash: {} }}",
self.version,
self.chain,
hexed_genesis,
self.forkid,
self.earliest,
self.latest,
hexed_blockhash,
)
}
}
impl Debug for StatusEth69 {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
let hexed_blockhash = hex::encode(self.blockhash);
let hexed_genesis = hex::encode(self.genesis);
if f.alternate() {
write!(
f,
"Status {{\n\tversion: {:?},\n\tchain: {:?},\n\tblockhash: {},\n\tgenesis: {},\n\tforkid: {:X?}\n}}",
self.version, self.chain, hexed_blockhash, hexed_genesis, self.forkid
)
} else {
write!(
f,
"Status {{ version: {:?}, chain: {:?}, blockhash: {}, genesis: {}, forkid: {:X?} }}",
self.version, self.chain, hexed_blockhash, hexed_genesis, self.forkid
)
}
}
}
/// `StatusMessage` can store either the Legacy version (with TD) or the
/// eth/69 version (omits TD).
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum StatusMessage {
/// The legacy status (`eth/66` through `eth/68`) with `total_difficulty`.
Legacy(Status),
/// The new `eth/69` status with no `total_difficulty`.
Eth69(StatusEth69),
}
impl StatusMessage {
/// Returns the genesis hash from the status message.
pub const fn genesis(&self) -> B256 {
match self {
Self::Legacy(legacy_status) => legacy_status.genesis,
Self::Eth69(status_69) => status_69.genesis,
}
}
/// Returns the protocol version.
pub const fn version(&self) -> EthVersion {
match self {
Self::Legacy(legacy_status) => legacy_status.version,
Self::Eth69(status_69) => status_69.version,
}
}
/// Returns the chain identifier.
pub const fn chain(&self) -> &Chain {
match self {
Self::Legacy(legacy_status) => &legacy_status.chain,
Self::Eth69(status_69) => &status_69.chain,
}
}
/// Returns the fork identifier.
pub const fn forkid(&self) -> ForkId {
match self {
Self::Legacy(legacy_status) => legacy_status.forkid,
Self::Eth69(status_69) => status_69.forkid,
}
}
/// Returns the latest block hash
pub const fn blockhash(&self) -> B256 {
match self {
Self::Legacy(legacy_status) => legacy_status.blockhash,
Self::Eth69(status_69) => status_69.blockhash,
}
}
}
impl Encodable for StatusMessage {
fn encode(&self, out: &mut dyn BufMut) {
match self {
Self::Legacy(s) => s.encode(out),
Self::Eth69(s) => s.encode(out),
}
}
fn length(&self) -> usize {
match self {
Self::Legacy(s) => s.length(),
Self::Eth69(s) => s.length(),
}
}
}
impl Display for StatusMessage {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match self {
Self::Legacy(s) => Display::fmt(s, f),
Self::Eth69(s69) => Display::fmt(s69, f),
}
}
}
#[cfg(test)]
mod tests {
use crate::{EthVersion, Status, StatusEth69, StatusMessage, UnifiedStatus};
use alloy_consensus::constants::MAINNET_GENESIS_HASH;
use alloy_genesis::Genesis;
use alloy_hardforks::{EthereumHardfork, ForkHash, ForkId, Head};
use alloy_primitives::{b256, hex, B256, U256};
use alloy_rlp::{Decodable, Encodable};
use rand::Rng;
use reth_chainspec::{Chain, ChainSpec, ForkCondition, NamedChain};
use std::str::FromStr;
#[test]
fn encode_eth_status_message() {
let expected = hex!(
"f85643018a07aac59dabcdd74bc567a0feb27336ca7923f8fab3bd617fcb6e75841538f71c1bcfc267d7838489d9e13da0d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3c684b715077d80"
);
let status = Status {
version: EthVersion::Eth67,
chain: Chain::from_named(NamedChain::Mainnet),
total_difficulty: U256::from(36206751599115524359527u128),
blockhash: B256::from_str(
"feb27336ca7923f8fab3bd617fcb6e75841538f71c1bcfc267d7838489d9e13d",
)
.unwrap(),
genesis: MAINNET_GENESIS_HASH,
forkid: ForkId { hash: ForkHash([0xb7, 0x15, 0x07, 0x7d]), next: 0 },
};
let mut rlp_status = vec![];
status.encode(&mut rlp_status);
assert_eq!(rlp_status, expected);
}
#[test]
fn decode_eth_status_message() {
let data = hex!(
"f85643018a07aac59dabcdd74bc567a0feb27336ca7923f8fab3bd617fcb6e75841538f71c1bcfc267d7838489d9e13da0d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3c684b715077d80"
);
let expected = Status {
version: EthVersion::Eth67,
chain: Chain::from_named(NamedChain::Mainnet),
total_difficulty: U256::from(36206751599115524359527u128),
blockhash: B256::from_str(
"feb27336ca7923f8fab3bd617fcb6e75841538f71c1bcfc267d7838489d9e13d",
)
.unwrap(),
genesis: MAINNET_GENESIS_HASH,
forkid: ForkId { hash: ForkHash([0xb7, 0x15, 0x07, 0x7d]), next: 0 },
};
let status = Status::decode(&mut &data[..]).unwrap();
assert_eq!(status, expected);
}
#[test]
fn roundtrip_eth69() {
let unified_status = UnifiedStatus::builder()
.version(EthVersion::Eth69)
.chain(Chain::mainnet())
.genesis(MAINNET_GENESIS_HASH)
.forkid(ForkId { hash: ForkHash([0xb7, 0x15, 0x07, 0x7d]), next: 0 })
.blockhash(b256!("0xfeb27336ca7923f8fab3bd617fcb6e75841538f71c1bcfc267d7838489d9e13d"))
.earliest_block(Some(1))
.latest_block(Some(2))
.total_difficulty(None)
.build();
let status_message = unified_status.into_message();
let roundtripped_unified_status = UnifiedStatus::from_message(status_message);
assert_eq!(unified_status, roundtripped_unified_status);
}
#[test]
fn roundtrip_legacy() {
let unified_status = UnifiedStatus::builder()
.version(EthVersion::Eth68)
.chain(Chain::sepolia())
.genesis(MAINNET_GENESIS_HASH)
.forkid(ForkId { hash: ForkHash([0xaa, 0xbb, 0xcc, 0xdd]), next: 0 })
.blockhash(b256!("0xfeb27336ca7923f8fab3bd617fcb6e75841538f71c1bcfc267d7838489d9e13d"))
.total_difficulty(Some(U256::from(42u64)))
.earliest_block(None)
.latest_block(None)
.build();
let status_message = unified_status.into_message();
let roundtripped_unified_status = UnifiedStatus::from_message(status_message);
assert_eq!(unified_status, roundtripped_unified_status);
}
#[test]
fn encode_eth69_status_message() {
let expected = hex!("f8544501a0d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3c684b715077d8083ed14f2840112a880a0feb27336ca7923f8fab3bd617fcb6e75841538f71c1bcfc267d7838489d9e13d");
let status = StatusEth69 {
version: EthVersion::Eth69,
chain: Chain::from_named(NamedChain::Mainnet),
genesis: MAINNET_GENESIS_HASH,
forkid: ForkId { hash: ForkHash([0xb7, 0x15, 0x07, 0x7d]), next: 0 },
earliest: 15_537_394,
latest: 18_000_000,
blockhash: B256::from_str(
"feb27336ca7923f8fab3bd617fcb6e75841538f71c1bcfc267d7838489d9e13d",
)
.unwrap(),
};
let mut rlp_status = vec![];
status.encode(&mut rlp_status);
assert_eq!(rlp_status, expected);
let status = UnifiedStatus::builder()
.version(EthVersion::Eth69)
.chain(Chain::from_named(NamedChain::Mainnet))
.genesis(MAINNET_GENESIS_HASH)
.forkid(ForkId { hash: ForkHash([0xb7, 0x15, 0x07, 0x7d]), next: 0 })
.blockhash(b256!("0xfeb27336ca7923f8fab3bd617fcb6e75841538f71c1bcfc267d7838489d9e13d"))
.earliest_block(Some(15_537_394))
.latest_block(Some(18_000_000))
.build()
.into_message();
let mut rlp_status = vec![];
status.encode(&mut rlp_status);
assert_eq!(rlp_status, expected);
}
#[test]
fn decode_eth69_status_message() {
let data = hex!("f8544501a0d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3c684b715077d8083ed14f2840112a880a0feb27336ca7923f8fab3bd617fcb6e75841538f71c1bcfc267d7838489d9e13d");
let expected = StatusEth69 {
version: EthVersion::Eth69,
chain: Chain::from_named(NamedChain::Mainnet),
genesis: MAINNET_GENESIS_HASH,
forkid: ForkId { hash: ForkHash([0xb7, 0x15, 0x07, 0x7d]), next: 0 },
earliest: 15_537_394,
latest: 18_000_000,
blockhash: B256::from_str(
"feb27336ca7923f8fab3bd617fcb6e75841538f71c1bcfc267d7838489d9e13d",
)
.unwrap(),
};
let status = StatusEth69::decode(&mut &data[..]).unwrap();
assert_eq!(status, expected);
let expected_message = UnifiedStatus::builder()
.version(EthVersion::Eth69)
.chain(Chain::from_named(NamedChain::Mainnet))
.genesis(MAINNET_GENESIS_HASH)
.forkid(ForkId { hash: ForkHash([0xb7, 0x15, 0x07, 0x7d]), next: 0 })
.earliest_block(Some(15_537_394))
.latest_block(Some(18_000_000))
.blockhash(b256!("0xfeb27336ca7923f8fab3bd617fcb6e75841538f71c1bcfc267d7838489d9e13d"))
.build()
.into_message();
let expected_status = if let StatusMessage::Eth69(status69) = expected_message {
status69
} else {
panic!("expected StatusMessage::Eth69 variant");
};
assert_eq!(status, expected_status);
}
#[test]
fn encode_network_status_message() {
let expected = hex!(
"f850423884024190faa0f8514c4680ef27700751b08f37645309ce65a449616a3ea966bf39dd935bb27ba00d21840abff46b96c84b2ac9e10e4f5cdaeb5693cb665db62a2f3b02d2d57b5bc6845d43d2fd80"
);
let status = Status {
version: EthVersion::Eth66,
chain: Chain::from_named(NamedChain::BinanceSmartChain),
total_difficulty: U256::from(37851386u64),
blockhash: B256::from_str(
"f8514c4680ef27700751b08f37645309ce65a449616a3ea966bf39dd935bb27b",
)
.unwrap(),
genesis: B256::from_str(
"0d21840abff46b96c84b2ac9e10e4f5cdaeb5693cb665db62a2f3b02d2d57b5b",
)
.unwrap(),
forkid: ForkId { hash: ForkHash([0x5d, 0x43, 0xd2, 0xfd]), next: 0 },
};
let mut rlp_status = vec![];
status.encode(&mut rlp_status);
assert_eq!(rlp_status, expected);
}
#[test]
fn decode_network_status_message() {
let data = hex!(
"f850423884024190faa0f8514c4680ef27700751b08f37645309ce65a449616a3ea966bf39dd935bb27ba00d21840abff46b96c84b2ac9e10e4f5cdaeb5693cb665db62a2f3b02d2d57b5bc6845d43d2fd80"
);
let expected = Status {
version: EthVersion::Eth66,
chain: Chain::from_named(NamedChain::BinanceSmartChain),
total_difficulty: U256::from(37851386u64),
blockhash: B256::from_str(
"f8514c4680ef27700751b08f37645309ce65a449616a3ea966bf39dd935bb27b",
)
.unwrap(),
genesis: B256::from_str(
"0d21840abff46b96c84b2ac9e10e4f5cdaeb5693cb665db62a2f3b02d2d57b5b",
)
.unwrap(),
forkid: ForkId { hash: ForkHash([0x5d, 0x43, 0xd2, 0xfd]), next: 0 },
};
let status = Status::decode(&mut &data[..]).unwrap();
assert_eq!(status, expected);
}
#[test]
fn decode_another_network_status_message() {
let data = hex!(
"f86142820834936d68fcffffffffffffffffffffffffdeab81b8a0523e8163a6d620a4cc152c547a05f28a03fec91a2a615194cb86df9731372c0ca06499dccdc7c7def3ebb1ce4c6ee27ec6bd02aee570625ca391919faf77ef27bdc6841a67ccd880"
);
let expected = Status {
version: EthVersion::Eth66,
chain: Chain::from_id(2100),
total_difficulty: U256::from_str(
"0x000000000000000000000000006d68fcffffffffffffffffffffffffdeab81b8",
)
.unwrap(),
blockhash: B256::from_str(
"523e8163a6d620a4cc152c547a05f28a03fec91a2a615194cb86df9731372c0c",
)
.unwrap(),
genesis: B256::from_str(
"6499dccdc7c7def3ebb1ce4c6ee27ec6bd02aee570625ca391919faf77ef27bd",
)
.unwrap(),
forkid: ForkId { hash: ForkHash([0x1a, 0x67, 0xcc, 0xd8]), next: 0 },
};
let status = Status::decode(&mut &data[..]).unwrap();
assert_eq!(status, expected);
}
#[test]
fn init_custom_status_fields() {
let mut rng = rand::rng();
let head_hash = rng.random();
let total_difficulty = U256::from(rng.random::<u64>());
// create a genesis that has a random part, so we can check that the hash is preserved
let genesis = Genesis { nonce: rng.random(), ..Default::default() };
// build head
let head = Head {
number: u64::MAX,
hash: head_hash,
difficulty: U256::from(13337),
total_difficulty,
timestamp: u64::MAX,
};
// add a few hardforks
let hardforks = vec![
(EthereumHardfork::Tangerine, ForkCondition::Block(1)),
(EthereumHardfork::SpuriousDragon, ForkCondition::Block(2)),
(EthereumHardfork::Byzantium, ForkCondition::Block(3)),
(EthereumHardfork::MuirGlacier, ForkCondition::Block(5)),
(EthereumHardfork::London, ForkCondition::Block(8)),
(EthereumHardfork::Shanghai, ForkCondition::Timestamp(13)),
];
let mut chainspec =
ChainSpec::builder().genesis(genesis.into()).chain(Chain::from_id(1337));
for (fork, condition) in &hardforks {
chainspec = chainspec.with_fork(*fork, *condition);
}
let spec = chainspec.build();
// calculate proper forkid to check against
let genesis_hash = spec.genesis_hash();
let mut forkhash = ForkHash::from(genesis_hash);
for (_, condition) in hardforks {
forkhash += match condition {
ForkCondition::Block(n) | ForkCondition::Timestamp(n) => n,
_ => unreachable!("only block and timestamp forks are used in this test"),
}
}
let forkid = ForkId { hash: forkhash, next: 0 };
let status = UnifiedStatus::spec_builder(&spec, &head);
assert_eq!(status.chain, Chain::from_id(1337));
assert_eq!(status.forkid, forkid);
assert_eq!(status.total_difficulty.unwrap(), total_difficulty);
assert_eq!(status.blockhash, head_hash);
assert_eq!(status.genesis, genesis_hash);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/eth-wire-types/src/broadcast.rs | crates/net/eth-wire-types/src/broadcast.rs | //! Types for broadcasting new data.
use crate::{EthMessage, EthVersion, NetworkPrimitives};
use alloc::{sync::Arc, vec::Vec};
use alloy_primitives::{
map::{HashMap, HashSet},
Bytes, TxHash, B256, U128,
};
use alloy_rlp::{
Decodable, Encodable, RlpDecodable, RlpDecodableWrapper, RlpEncodable, RlpEncodableWrapper,
};
use core::{fmt::Debug, mem};
use derive_more::{Constructor, Deref, DerefMut, From, IntoIterator};
use reth_codecs_derive::{add_arbitrary_tests, generate_tests};
use reth_ethereum_primitives::TransactionSigned;
use reth_primitives_traits::{Block, SignedTransaction};
/// This informs peers of new blocks that have appeared on the network.
#[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))]
#[add_arbitrary_tests(rlp)]
pub struct NewBlockHashes(
/// New block hashes and the block number for each blockhash.
/// Clients should request blocks using a [`GetBlockBodies`](crate::GetBlockBodies) message.
pub Vec<BlockHashNumber>,
);
// === impl NewBlockHashes ===
impl NewBlockHashes {
/// Returns the latest block in the list of blocks.
pub fn latest(&self) -> Option<&BlockHashNumber> {
self.0.iter().fold(None, |latest, block| {
if let Some(latest) = latest {
return if latest.number > block.number { Some(latest) } else { Some(block) }
}
Some(block)
})
}
}
/// A block hash _and_ a block number.
#[derive(Clone, Debug, PartialEq, Eq, RlpEncodable, RlpDecodable, Default)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))]
#[add_arbitrary_tests(rlp)]
pub struct BlockHashNumber {
/// The block hash
pub hash: B256,
/// The block number
pub number: u64,
}
impl From<Vec<BlockHashNumber>> for NewBlockHashes {
fn from(v: Vec<BlockHashNumber>) -> Self {
Self(v)
}
}
impl From<NewBlockHashes> for Vec<BlockHashNumber> {
fn from(v: NewBlockHashes) -> Self {
v.0
}
}
/// A trait for block payloads transmitted through p2p.
pub trait NewBlockPayload:
Encodable + Decodable + Clone + Eq + Debug + Send + Sync + Unpin + 'static
{
/// The block type.
type Block: Block;
/// Returns a reference to the block.
fn block(&self) -> &Self::Block;
}
/// A new block with the current total difficulty, which includes the difficulty of the returned
/// block.
#[derive(Clone, Debug, PartialEq, Eq, RlpEncodable, RlpDecodable, Default)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))]
pub struct NewBlock<B = reth_ethereum_primitives::Block> {
/// A new block.
pub block: B,
/// The current total difficulty.
pub td: U128,
}
impl<B: Block + 'static> NewBlockPayload for NewBlock<B> {
type Block = B;
fn block(&self) -> &Self::Block {
&self.block
}
}
generate_tests!(#[rlp, 25] NewBlock<reth_ethereum_primitives::Block>, EthNewBlockTests);
/// This informs peers of transactions that have appeared on the network and are not yet included
/// in a block.
#[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))]
#[add_arbitrary_tests(rlp, 10)]
pub struct Transactions<T = TransactionSigned>(
/// New transactions for the peer to include in its mempool.
pub Vec<T>,
);
impl<T: SignedTransaction> Transactions<T> {
/// Returns `true` if the list of transactions contains any blob transactions.
pub fn has_eip4844(&self) -> bool {
self.0.iter().any(|tx| tx.is_eip4844())
}
}
impl<T> From<Vec<T>> for Transactions<T> {
fn from(txs: Vec<T>) -> Self {
Self(txs)
}
}
impl<T> From<Transactions<T>> for Vec<T> {
fn from(txs: Transactions<T>) -> Self {
txs.0
}
}
/// Same as [`Transactions`] but this is intended as egress message send from local to _many_ peers.
///
/// The list of transactions is constructed on per-peers basis, but the underlying transaction
/// objects are shared.
#[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper)]
#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))]
#[add_arbitrary_tests(rlp, 20)]
pub struct SharedTransactions<T = TransactionSigned>(
/// New transactions for the peer to include in its mempool.
pub Vec<Arc<T>>,
);
/// A wrapper type for all different new pooled transaction types
#[derive(Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum NewPooledTransactionHashes {
/// A list of transaction hashes valid for [66-68)
Eth66(NewPooledTransactionHashes66),
/// A list of transaction hashes valid from [68..]
///
/// Note: it is assumed that the payload is valid (all vectors have the same length)
Eth68(NewPooledTransactionHashes68),
}
// === impl NewPooledTransactionHashes ===
impl NewPooledTransactionHashes {
/// Returns the message [`EthVersion`].
pub const fn version(&self) -> EthVersion {
match self {
Self::Eth66(_) => EthVersion::Eth66,
Self::Eth68(_) => EthVersion::Eth68,
}
}
/// Returns `true` if the payload is valid for the given version
pub const fn is_valid_for_version(&self, version: EthVersion) -> bool {
match self {
Self::Eth66(_) => {
matches!(version, EthVersion::Eth67 | EthVersion::Eth66)
}
Self::Eth68(_) => {
matches!(version, EthVersion::Eth68 | EthVersion::Eth69)
}
}
}
/// Returns an iterator over all transaction hashes.
pub fn iter_hashes(&self) -> impl Iterator<Item = &B256> + '_ {
match self {
Self::Eth66(msg) => msg.0.iter(),
Self::Eth68(msg) => msg.hashes.iter(),
}
}
/// Returns an immutable reference to transaction hashes.
pub const fn hashes(&self) -> &Vec<B256> {
match self {
Self::Eth66(msg) => &msg.0,
Self::Eth68(msg) => &msg.hashes,
}
}
/// Returns a mutable reference to transaction hashes.
pub const fn hashes_mut(&mut self) -> &mut Vec<B256> {
match self {
Self::Eth66(msg) => &mut msg.0,
Self::Eth68(msg) => &mut msg.hashes,
}
}
/// Consumes the type and returns all hashes
pub fn into_hashes(self) -> Vec<B256> {
match self {
Self::Eth66(msg) => msg.0,
Self::Eth68(msg) => msg.hashes,
}
}
/// Returns an iterator over all transaction hashes.
pub fn into_iter_hashes(self) -> impl Iterator<Item = B256> {
match self {
Self::Eth66(msg) => msg.0.into_iter(),
Self::Eth68(msg) => msg.hashes.into_iter(),
}
}
/// Shortens the number of hashes in the message, keeping the first `len` hashes and dropping
/// the rest. If `len` is greater than the number of hashes, this has no effect.
pub fn truncate(&mut self, len: usize) {
match self {
Self::Eth66(msg) => msg.0.truncate(len),
Self::Eth68(msg) => {
msg.types.truncate(len);
msg.sizes.truncate(len);
msg.hashes.truncate(len);
}
}
}
/// Returns true if the message is empty
pub const fn is_empty(&self) -> bool {
match self {
Self::Eth66(msg) => msg.0.is_empty(),
Self::Eth68(msg) => msg.hashes.is_empty(),
}
}
/// Returns the number of hashes in the message
pub const fn len(&self) -> usize {
match self {
Self::Eth66(msg) => msg.0.len(),
Self::Eth68(msg) => msg.hashes.len(),
}
}
/// Returns an immutable reference to the inner type if this an eth68 announcement.
pub const fn as_eth68(&self) -> Option<&NewPooledTransactionHashes68> {
match self {
Self::Eth66(_) => None,
Self::Eth68(msg) => Some(msg),
}
}
/// Returns a mutable reference to the inner type if this an eth68 announcement.
pub const fn as_eth68_mut(&mut self) -> Option<&mut NewPooledTransactionHashes68> {
match self {
Self::Eth66(_) => None,
Self::Eth68(msg) => Some(msg),
}
}
/// Returns a mutable reference to the inner type if this an eth66 announcement.
pub const fn as_eth66_mut(&mut self) -> Option<&mut NewPooledTransactionHashes66> {
match self {
Self::Eth66(msg) => Some(msg),
Self::Eth68(_) => None,
}
}
/// Returns the inner type if this an eth68 announcement.
pub fn take_eth68(&mut self) -> Option<NewPooledTransactionHashes68> {
match self {
Self::Eth66(_) => None,
Self::Eth68(msg) => Some(mem::take(msg)),
}
}
/// Returns the inner type if this an eth66 announcement.
pub fn take_eth66(&mut self) -> Option<NewPooledTransactionHashes66> {
match self {
Self::Eth66(msg) => Some(mem::take(msg)),
Self::Eth68(_) => None,
}
}
}
impl<N: NetworkPrimitives> From<NewPooledTransactionHashes> for EthMessage<N> {
fn from(value: NewPooledTransactionHashes) -> Self {
match value {
NewPooledTransactionHashes::Eth66(msg) => Self::NewPooledTransactionHashes66(msg),
NewPooledTransactionHashes::Eth68(msg) => Self::NewPooledTransactionHashes68(msg),
}
}
}
impl From<NewPooledTransactionHashes66> for NewPooledTransactionHashes {
fn from(hashes: NewPooledTransactionHashes66) -> Self {
Self::Eth66(hashes)
}
}
impl From<NewPooledTransactionHashes68> for NewPooledTransactionHashes {
fn from(hashes: NewPooledTransactionHashes68) -> Self {
Self::Eth68(hashes)
}
}
/// This informs peers of transaction hashes for transactions that have appeared on the network,
/// but have not been included in a block.
#[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))]
#[add_arbitrary_tests(rlp)]
pub struct NewPooledTransactionHashes66(
/// Transaction hashes for new transactions that have appeared on the network.
/// Clients should request the transactions with the given hashes using a
/// [`GetPooledTransactions`](crate::GetPooledTransactions) message.
pub Vec<B256>,
);
impl From<Vec<B256>> for NewPooledTransactionHashes66 {
fn from(v: Vec<B256>) -> Self {
Self(v)
}
}
/// Same as [`NewPooledTransactionHashes66`] but extends that beside the transaction hashes,
/// the node sends the transaction types and their sizes (as defined in EIP-2718) as well.
#[derive(Clone, Debug, PartialEq, Eq, Default)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct NewPooledTransactionHashes68 {
/// Transaction types for new transactions that have appeared on the network.
///
/// ## Note on RLP encoding and decoding
///
/// In the [eth/68 spec](https://eips.ethereum.org/EIPS/eip-5793#specification) this is defined
/// the following way:
/// * `[type_0: B_1, type_1: B_1, ...]`
///
/// This would make it seem like the [`Encodable`] and
/// [`Decodable`] implementations should directly use a `Vec<u8>` for
/// encoding and decoding, because it looks like this field should be encoded as a _list_ of
/// bytes.
///
/// However, [this is implemented in geth as a `[]byte`
/// type](https://github.com/ethereum/go-ethereum/blob/82d934b1dd80cdd8190803ea9f73ed2c345e2576/eth/protocols/eth/protocol.go#L308-L313),
/// which [ends up being encoded as a RLP
/// string](https://github.com/ethereum/go-ethereum/blob/82d934b1dd80cdd8190803ea9f73ed2c345e2576/rlp/encode_test.go#L171-L176),
/// **not** a RLP list.
///
/// Because of this, we do not directly use the `Vec<u8>` when encoding and decoding, and
/// instead use the [`Encodable`] and [`Decodable`]
/// implementations for `&[u8]` instead, which encodes into a RLP string, and expects an RLP
/// string when decoding.
pub types: Vec<u8>,
/// Transaction sizes for new transactions that have appeared on the network.
pub sizes: Vec<usize>,
/// Transaction hashes for new transactions that have appeared on the network.
pub hashes: Vec<B256>,
}
#[cfg(feature = "arbitrary")]
impl proptest::prelude::Arbitrary for NewPooledTransactionHashes68 {
type Parameters = ();
fn arbitrary_with(_args: ()) -> Self::Strategy {
use proptest::{collection::vec, prelude::*};
// Generate a single random length for all vectors
let vec_length = any::<usize>().prop_map(|x| x % 100 + 1); // Lengths between 1 and 100
vec_length
.prop_flat_map(|len| {
// Use the generated length to create vectors of TxType, usize, and B256
let types_vec = vec(
proptest_arbitrary_interop::arb::<reth_ethereum_primitives::TxType>()
.prop_map(|ty| ty as u8),
len..=len,
);
// Map the usize values to the range 0..131072(0x20000)
let sizes_vec = vec(proptest::num::usize::ANY.prop_map(|x| x % 131072), len..=len);
let hashes_vec = vec(any::<B256>(), len..=len);
(types_vec, sizes_vec, hashes_vec)
})
.prop_map(|(types, sizes, hashes)| Self { types, sizes, hashes })
.boxed()
}
type Strategy = proptest::prelude::BoxedStrategy<Self>;
}
impl NewPooledTransactionHashes68 {
/// Returns an iterator over tx hashes zipped with corresponding metadata.
pub fn metadata_iter(&self) -> impl Iterator<Item = (&B256, (u8, usize))> {
self.hashes.iter().zip(self.types.iter().copied().zip(self.sizes.iter().copied()))
}
/// Appends a transaction
pub fn push<T: SignedTransaction>(&mut self, tx: &T) {
self.hashes.push(*tx.tx_hash());
self.sizes.push(tx.encode_2718_len());
self.types.push(tx.ty());
}
/// Appends the provided transactions
pub fn extend<'a, T: SignedTransaction>(&mut self, txs: impl IntoIterator<Item = &'a T>) {
for tx in txs {
self.push(tx);
}
}
/// Consumes and appends a transaction
pub fn with_transaction<T: SignedTransaction>(mut self, tx: &T) -> Self {
self.push(tx);
self
}
/// Consumes and appends the provided transactions
pub fn with_transactions<'a, T: SignedTransaction>(
mut self,
txs: impl IntoIterator<Item = &'a T>,
) -> Self {
self.extend(txs);
self
}
}
impl Encodable for NewPooledTransactionHashes68 {
fn encode(&self, out: &mut dyn bytes::BufMut) {
#[derive(RlpEncodable)]
struct EncodableNewPooledTransactionHashes68<'a> {
types: &'a [u8],
sizes: &'a Vec<usize>,
hashes: &'a Vec<B256>,
}
let encodable = EncodableNewPooledTransactionHashes68 {
types: &self.types[..],
sizes: &self.sizes,
hashes: &self.hashes,
};
encodable.encode(out);
}
fn length(&self) -> usize {
#[derive(RlpEncodable)]
struct EncodableNewPooledTransactionHashes68<'a> {
types: &'a [u8],
sizes: &'a Vec<usize>,
hashes: &'a Vec<B256>,
}
let encodable = EncodableNewPooledTransactionHashes68 {
types: &self.types[..],
sizes: &self.sizes,
hashes: &self.hashes,
};
encodable.length()
}
}
impl Decodable for NewPooledTransactionHashes68 {
fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
#[derive(RlpDecodable)]
struct EncodableNewPooledTransactionHashes68 {
types: Bytes,
sizes: Vec<usize>,
hashes: Vec<B256>,
}
let encodable = EncodableNewPooledTransactionHashes68::decode(buf)?;
let msg = Self {
types: encodable.types.into(),
sizes: encodable.sizes,
hashes: encodable.hashes,
};
if msg.hashes.len() != msg.types.len() {
return Err(alloy_rlp::Error::ListLengthMismatch {
expected: msg.hashes.len(),
got: msg.types.len(),
})
}
if msg.hashes.len() != msg.sizes.len() {
return Err(alloy_rlp::Error::ListLengthMismatch {
expected: msg.hashes.len(),
got: msg.sizes.len(),
})
}
Ok(msg)
}
}
/// Validation pass that checks for unique transaction hashes.
pub trait DedupPayload {
/// Value type in [`PartiallyValidData`] map.
type Value;
/// The payload contains no entries.
fn is_empty(&self) -> bool;
/// Returns the number of entries.
fn len(&self) -> usize;
/// Consumes self, returning an iterator over hashes in payload.
fn dedup(self) -> PartiallyValidData<Self::Value>;
}
/// Value in [`PartiallyValidData`] map obtained from an announcement.
pub type Eth68TxMetadata = Option<(u8, usize)>;
impl DedupPayload for NewPooledTransactionHashes {
type Value = Eth68TxMetadata;
fn is_empty(&self) -> bool {
self.is_empty()
}
fn len(&self) -> usize {
self.len()
}
fn dedup(self) -> PartiallyValidData<Self::Value> {
match self {
Self::Eth66(msg) => msg.dedup(),
Self::Eth68(msg) => msg.dedup(),
}
}
}
impl DedupPayload for NewPooledTransactionHashes68 {
type Value = Eth68TxMetadata;
fn is_empty(&self) -> bool {
self.hashes.is_empty()
}
fn len(&self) -> usize {
self.hashes.len()
}
fn dedup(self) -> PartiallyValidData<Self::Value> {
let Self { hashes, mut sizes, mut types } = self;
let mut deduped_data = HashMap::with_capacity_and_hasher(hashes.len(), Default::default());
for hash in hashes.into_iter().rev() {
if let (Some(ty), Some(size)) = (types.pop(), sizes.pop()) {
deduped_data.insert(hash, Some((ty, size)));
}
}
PartiallyValidData::from_raw_data_eth68(deduped_data)
}
}
impl DedupPayload for NewPooledTransactionHashes66 {
type Value = Eth68TxMetadata;
fn is_empty(&self) -> bool {
self.0.is_empty()
}
fn len(&self) -> usize {
self.0.len()
}
fn dedup(self) -> PartiallyValidData<Self::Value> {
let Self(hashes) = self;
let mut deduped_data = HashMap::with_capacity_and_hasher(hashes.len(), Default::default());
let noop_value: Eth68TxMetadata = None;
for hash in hashes.into_iter().rev() {
deduped_data.insert(hash, noop_value);
}
PartiallyValidData::from_raw_data_eth66(deduped_data)
}
}
/// Interface for handling mempool message data. Used in various filters in pipelines in
/// `TransactionsManager` and in queries to `TransactionPool`.
pub trait HandleMempoolData {
/// The announcement contains no entries.
fn is_empty(&self) -> bool;
/// Returns the number of entries.
fn len(&self) -> usize;
/// Retain only entries for which the hash in the entry satisfies a given predicate.
fn retain_by_hash(&mut self, f: impl FnMut(&TxHash) -> bool);
}
/// Extension of [`HandleMempoolData`] interface, for mempool messages that are versioned.
pub trait HandleVersionedMempoolData {
/// Returns the announcement version, either [`Eth66`](EthVersion::Eth66) or
/// [`Eth68`](EthVersion::Eth68).
fn msg_version(&self) -> EthVersion;
}
impl<T: SignedTransaction> HandleMempoolData for Vec<T> {
fn is_empty(&self) -> bool {
self.is_empty()
}
fn len(&self) -> usize {
self.len()
}
fn retain_by_hash(&mut self, mut f: impl FnMut(&TxHash) -> bool) {
self.retain(|tx| f(tx.tx_hash()))
}
}
macro_rules! handle_mempool_data_map_impl {
($data_ty:ty, $(<$generic:ident>)?) => {
impl$(<$generic>)? HandleMempoolData for $data_ty {
fn is_empty(&self) -> bool {
self.data.is_empty()
}
fn len(&self) -> usize {
self.data.len()
}
fn retain_by_hash(&mut self, mut f: impl FnMut(&TxHash) -> bool) {
self.data.retain(|hash, _| f(hash));
}
}
};
}
/// Data that has passed an initial validation pass that is not specific to any mempool message
/// type.
#[derive(Debug, Deref, DerefMut, IntoIterator)]
pub struct PartiallyValidData<V> {
#[deref]
#[deref_mut]
#[into_iterator]
data: HashMap<TxHash, V>,
version: Option<EthVersion>,
}
handle_mempool_data_map_impl!(PartiallyValidData<V>, <V>);
impl<V> PartiallyValidData<V> {
/// Wraps raw data.
pub const fn from_raw_data(data: HashMap<TxHash, V>, version: Option<EthVersion>) -> Self {
Self { data, version }
}
/// Wraps raw data with version [`EthVersion::Eth68`].
pub const fn from_raw_data_eth68(data: HashMap<TxHash, V>) -> Self {
Self::from_raw_data(data, Some(EthVersion::Eth68))
}
/// Wraps raw data with version [`EthVersion::Eth66`].
pub const fn from_raw_data_eth66(data: HashMap<TxHash, V>) -> Self {
Self::from_raw_data(data, Some(EthVersion::Eth66))
}
/// Returns a new [`PartiallyValidData`] with empty data from an [`Eth68`](EthVersion::Eth68)
/// announcement.
pub fn empty_eth68() -> Self {
Self::from_raw_data_eth68(HashMap::default())
}
/// Returns a new [`PartiallyValidData`] with empty data from an [`Eth66`](EthVersion::Eth66)
/// announcement.
pub fn empty_eth66() -> Self {
Self::from_raw_data_eth66(HashMap::default())
}
/// Returns the version of the message this data was received in if different versions of the
/// message exists, either [`Eth66`](EthVersion::Eth66) or [`Eth68`](EthVersion::Eth68).
pub const fn msg_version(&self) -> Option<EthVersion> {
self.version
}
/// Destructs returning the validated data.
pub fn into_data(self) -> HashMap<TxHash, V> {
self.data
}
}
/// Partially validated data from an announcement or a
/// [`PooledTransactions`](crate::PooledTransactions) response.
#[derive(Debug, Deref, DerefMut, IntoIterator, From)]
pub struct ValidAnnouncementData {
#[deref]
#[deref_mut]
#[into_iterator]
data: HashMap<TxHash, Eth68TxMetadata>,
version: EthVersion,
}
handle_mempool_data_map_impl!(ValidAnnouncementData,);
impl ValidAnnouncementData {
/// Destructs returning only the valid hashes and the announcement message version. Caution! If
/// this is [`Eth68`](EthVersion::Eth68) announcement data, this drops the metadata.
pub fn into_request_hashes(self) -> (RequestTxHashes, EthVersion) {
let hashes = self.data.into_keys().collect::<HashSet<_>>();
(RequestTxHashes::new(hashes), self.version)
}
/// Conversion from [`PartiallyValidData`] from an announcement. Note! [`PartiallyValidData`]
/// from an announcement, should have some [`EthVersion`]. Panics if [`PartiallyValidData`] has
/// version set to `None`.
pub fn from_partially_valid_data(data: PartiallyValidData<Eth68TxMetadata>) -> Self {
let PartiallyValidData { data, version } = data;
let version = version.expect("should have eth version for conversion");
Self { data, version }
}
/// Destructs returning the validated data.
pub fn into_data(self) -> HashMap<TxHash, Eth68TxMetadata> {
self.data
}
}
impl HandleVersionedMempoolData for ValidAnnouncementData {
fn msg_version(&self) -> EthVersion {
self.version
}
}
/// Hashes to request from a peer.
#[derive(Debug, Default, Deref, DerefMut, IntoIterator, Constructor)]
pub struct RequestTxHashes {
#[deref]
#[deref_mut]
#[into_iterator(owned, ref)]
hashes: HashSet<TxHash>,
}
impl RequestTxHashes {
/// Returns a new [`RequestTxHashes`] with given capacity for hashes. Caution! Make sure to
/// call [`HashSet::shrink_to_fit`] on [`RequestTxHashes`] when full, especially where it will
/// be stored in its entirety like in the future waiting for a
/// [`GetPooledTransactions`](crate::GetPooledTransactions) request to resolve.
pub fn with_capacity(capacity: usize) -> Self {
Self::new(HashSet::with_capacity_and_hasher(capacity, Default::default()))
}
/// Returns an new empty instance.
fn empty() -> Self {
Self::new(HashSet::default())
}
/// Retains the given number of elements, returning and iterator over the rest.
pub fn retain_count(&mut self, count: usize) -> Self {
let rest_capacity = self.hashes.len().saturating_sub(count);
if rest_capacity == 0 {
return Self::empty()
}
let mut rest = Self::with_capacity(rest_capacity);
let mut i = 0;
self.hashes.retain(|hash| {
if i >= count {
rest.insert(*hash);
return false
}
i += 1;
true
});
rest
}
}
impl FromIterator<(TxHash, Eth68TxMetadata)> for RequestTxHashes {
fn from_iter<I: IntoIterator<Item = (TxHash, Eth68TxMetadata)>>(iter: I) -> Self {
Self::new(iter.into_iter().map(|(hash, _)| hash).collect())
}
}
/// The earliest block, the latest block and hash of the latest block which can be provided.
/// See [BlockRangeUpdate](https://github.com/ethereum/devp2p/blob/master/caps/eth.md#blockrangeupdate-0x11).
#[derive(Clone, Debug, PartialEq, Eq, Default, RlpEncodable, RlpDecodable)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))]
pub struct BlockRangeUpdate {
/// The earliest block which is available.
pub earliest: u64,
/// The latest block which is available.
pub latest: u64,
/// Latest available block's hash.
pub latest_hash: B256,
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_consensus::Typed2718;
use alloy_eips::eip2718::Encodable2718;
use alloy_primitives::{b256, hex, Signature, U256};
use reth_ethereum_primitives::{Transaction, TransactionSigned};
use std::str::FromStr;
/// Takes as input a struct / encoded hex message pair, ensuring that we encode to the exact hex
/// message, and decode to the exact struct.
fn test_encoding_vector<T: Encodable + Decodable + PartialEq + core::fmt::Debug>(
input: (T, &[u8]),
) {
let (expected_decoded, expected_encoded) = input;
let mut encoded = Vec::new();
expected_decoded.encode(&mut encoded);
assert_eq!(hex::encode(&encoded), hex::encode(expected_encoded));
let decoded = T::decode(&mut encoded.as_ref()).unwrap();
assert_eq!(expected_decoded, decoded);
}
#[test]
fn can_return_latest_block() {
let mut blocks = NewBlockHashes(vec![BlockHashNumber { hash: B256::random(), number: 0 }]);
let latest = blocks.latest().unwrap();
assert_eq!(latest.number, 0);
blocks.0.push(BlockHashNumber { hash: B256::random(), number: 100 });
blocks.0.push(BlockHashNumber { hash: B256::random(), number: 2 });
let latest = blocks.latest().unwrap();
assert_eq!(latest.number, 100);
}
#[test]
fn eth_68_tx_hash_roundtrip() {
let vectors = vec![
(
NewPooledTransactionHashes68 { types: vec![], sizes: vec![], hashes: vec![] },
&hex!("c380c0c0")[..],
),
(
NewPooledTransactionHashes68 {
types: vec![0x00],
sizes: vec![0x00],
hashes: vec![
B256::from_str(
"0x0000000000000000000000000000000000000000000000000000000000000000",
)
.unwrap(),
],
},
&hex!(
"e500c180e1a00000000000000000000000000000000000000000000000000000000000000000"
)[..],
),
(
NewPooledTransactionHashes68 {
types: vec![0x00, 0x00],
sizes: vec![0x00, 0x00],
hashes: vec![
B256::from_str(
"0x0000000000000000000000000000000000000000000000000000000000000000",
)
.unwrap(),
B256::from_str(
"0x0000000000000000000000000000000000000000000000000000000000000000",
)
.unwrap(),
],
},
&hex!(
"f84a820000c28080f842a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000"
)[..],
),
(
NewPooledTransactionHashes68 {
types: vec![0x02],
sizes: vec![0xb6],
hashes: vec![
B256::from_str(
"0xfecbed04c7b88d8e7221a0a3f5dc33f220212347fc167459ea5cc9c3eb4c1124",
)
.unwrap(),
],
},
&hex!(
"e602c281b6e1a0fecbed04c7b88d8e7221a0a3f5dc33f220212347fc167459ea5cc9c3eb4c1124"
)[..],
),
(
NewPooledTransactionHashes68 {
types: vec![0xff, 0xff],
sizes: vec![0xffffffff, 0xffffffff],
hashes: vec![
B256::from_str(
"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
)
.unwrap(),
B256::from_str(
"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
)
.unwrap(),
],
},
&hex!(
"f85282ffffca84ffffffff84fffffffff842a0ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa0ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
)[..],
),
(
NewPooledTransactionHashes68 {
types: vec![0xff, 0xff],
sizes: vec![0xffffffff, 0xffffffff],
hashes: vec![
B256::from_str(
"0xbeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcafe",
)
.unwrap(),
B256::from_str(
"0xbeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcafe",
)
.unwrap(),
],
},
&hex!(
"f85282ffffca84ffffffff84fffffffff842a0beefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcafea0beefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcafe"
)[..],
),
(
NewPooledTransactionHashes68 {
types: vec![0x10, 0x10],
sizes: vec![0xdeadc0de, 0xdeadc0de],
hashes: vec![
B256::from_str(
"0x3b9aca00f0671c9a2a1b817a0a78d3fe0c0f776cccb2a8c3c1b412a4f4e4d4e2",
)
.unwrap(),
B256::from_str(
"0x3b9aca00f0671c9a2a1b817a0a78d3fe0c0f776cccb2a8c3c1b412a4f4e4d4e2",
)
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | true |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/eth-wire-types/src/state.rs | crates/net/eth-wire-types/src/state.rs | //! Implements the `GetNodeData` and `NodeData` message types.
use alloc::vec::Vec;
use alloy_primitives::{Bytes, B256};
use alloy_rlp::{RlpDecodableWrapper, RlpEncodableWrapper};
use reth_codecs_derive::add_arbitrary_tests;
/// A request for state tree nodes corresponding to the given hashes.
/// This message was removed in `eth/67`, only clients running `eth/66` or earlier will respond to
/// this message.
#[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))]
#[add_arbitrary_tests(rlp)]
pub struct GetNodeData(pub Vec<B256>);
/// The response to [`GetNodeData`], containing the state tree nodes or contract bytecode
/// corresponding to the requested hashes.
///
/// Not all nodes are guaranteed to be returned by the peer.
/// This message was removed in `eth/67`.
#[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))]
#[add_arbitrary_tests(rlp)]
pub struct NodeData(pub Vec<Bytes>);
#[cfg(test)]
mod tests {
use alloy_primitives::hex;
use crate::{message::RequestPair, GetNodeData, NodeData};
use alloy_rlp::{Decodable, Encodable};
#[test]
// Test vector from: https://eips.ethereum.org/EIPS/eip-2481
fn encode_get_node_data() {
let expected = hex!(
"f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"
);
let mut data = vec![];
let request = RequestPair {
request_id: 1111,
message: GetNodeData(vec![
hex!("00000000000000000000000000000000000000000000000000000000deadc0de").into(),
hex!("00000000000000000000000000000000000000000000000000000000feedbeef").into(),
]),
};
request.encode(&mut data);
assert_eq!(data, expected);
}
#[test]
// Test vector from: https://eips.ethereum.org/EIPS/eip-2481
fn decode_get_node_data() {
let data = hex!(
"f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"
);
let request = RequestPair::<GetNodeData>::decode(&mut &data[..]).unwrap();
assert_eq!(
request,
RequestPair {
request_id: 1111,
message: GetNodeData(vec![
hex!("00000000000000000000000000000000000000000000000000000000deadc0de").into(),
hex!("00000000000000000000000000000000000000000000000000000000feedbeef").into(),
])
}
);
}
#[test]
// Test vector from: https://eips.ethereum.org/EIPS/eip-2481
fn encode_node_data() {
let expected = hex!("ce820457ca84deadc0de84feedbeef");
let mut data = vec![];
let request = RequestPair {
request_id: 1111,
message: NodeData(vec![
hex!("deadc0de").as_slice().into(),
hex!("feedbeef").as_slice().into(),
]),
};
request.encode(&mut data);
assert_eq!(data, expected);
}
#[test]
// Test vector from: https://eips.ethereum.org/EIPS/eip-2481
fn decode_node_data() {
let data = hex!("ce820457ca84deadc0de84feedbeef");
let request = RequestPair::<NodeData>::decode(&mut &data[..]).unwrap();
assert_eq!(
request,
RequestPair {
request_id: 1111,
message: NodeData(vec![
hex!("deadc0de").as_slice().into(),
hex!("feedbeef").as_slice().into(),
])
}
);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/eth-wire-types/src/primitives.rs | crates/net/eth-wire-types/src/primitives.rs | //! Abstraction over primitive types in network messages.
use crate::NewBlockPayload;
use alloy_consensus::{RlpDecodableReceipt, RlpEncodableReceipt, TxReceipt};
use alloy_rlp::{Decodable, Encodable};
use core::fmt::Debug;
use reth_ethereum_primitives::{EthPrimitives, PooledTransactionVariant};
use reth_primitives_traits::{
Block, BlockBody, BlockHeader, BlockTy, NodePrimitives, SignedTransaction,
};
/// Abstraction over primitive types which might appear in network messages.
///
/// This trait defines the types used in the Ethereum Wire Protocol (devp2p) for
/// peer-to-peer communication. While [`NodePrimitives`] defines the core types
/// used throughout the node (consensus format), `NetworkPrimitives` defines how
/// these types are represented when transmitted over the network.
///
/// The key distinction is in transaction handling:
/// - [`NodePrimitives`] defines `SignedTx` - the consensus format stored in blocks
/// - `NetworkPrimitives` defines `BroadcastedTransaction` and `PooledTransaction` - the formats
/// used for network propagation with additional data like blob sidecars
///
/// These traits work together through implementations like [`NetPrimitivesFor`],
/// which ensures type compatibility between a node's internal representation and
/// its network representation.
///
/// See [`crate::EthMessage`] for more context.
pub trait NetworkPrimitives: Send + Sync + Unpin + Clone + Debug + 'static {
/// The block header type.
type BlockHeader: BlockHeader + 'static;
/// The block body type.
type BlockBody: BlockBody + 'static;
/// Full block type.
type Block: Block<Header = Self::BlockHeader, Body = Self::BlockBody>
+ Encodable
+ Decodable
+ 'static;
/// The transaction type which peers announce in `Transactions` messages.
///
/// This is different from `PooledTransactions` to account for the Ethereum case where
/// EIP-4844 blob transactions are not announced over the network and can only be
/// explicitly requested from peers. This is because blob transactions can be quite
/// large and broadcasting them to all peers would cause
/// significant bandwidth usage.
type BroadcastedTransaction: SignedTransaction + 'static;
/// The transaction type which peers return in `PooledTransactions` messages.
///
/// For EIP-4844 blob transactions, this includes the full blob sidecar with
/// KZG commitments and proofs that are needed for validation but are not
/// included in the consensus block format.
type PooledTransaction: SignedTransaction + TryFrom<Self::BroadcastedTransaction> + 'static;
/// The transaction type which peers return in `GetReceipts` messages.
type Receipt: TxReceipt
+ RlpEncodableReceipt
+ RlpDecodableReceipt
+ Encodable
+ Decodable
+ Unpin
+ 'static;
/// The payload type for the `NewBlock` message.
type NewBlockPayload: NewBlockPayload<Block = Self::Block>;
}
/// This is a helper trait for use in bounds, where some of the [`NetworkPrimitives`] associated
/// types must be the same as the [`NodePrimitives`] associated types.
pub trait NetPrimitivesFor<N: NodePrimitives>:
NetworkPrimitives<
BlockHeader = N::BlockHeader,
BlockBody = N::BlockBody,
Block = N::Block,
Receipt = N::Receipt,
>
{
}
impl<N, T> NetPrimitivesFor<N> for T
where
N: NodePrimitives,
T: NetworkPrimitives<
BlockHeader = N::BlockHeader,
BlockBody = N::BlockBody,
Block = N::Block,
Receipt = N::Receipt,
>,
{
}
/// Basic implementation of [`NetworkPrimitives`] combining [`NodePrimitives`] and a pooled
/// transaction.
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Hash)]
pub struct BasicNetworkPrimitives<N: NodePrimitives, Pooled, NewBlock = crate::NewBlock<BlockTy<N>>>(
core::marker::PhantomData<(N, Pooled, NewBlock)>,
);
impl<N, Pooled, NewBlock> NetworkPrimitives for BasicNetworkPrimitives<N, Pooled, NewBlock>
where
N: NodePrimitives,
Pooled: SignedTransaction + TryFrom<N::SignedTx> + 'static,
NewBlock: NewBlockPayload<Block = N::Block>,
{
type BlockHeader = N::BlockHeader;
type BlockBody = N::BlockBody;
type Block = N::Block;
type BroadcastedTransaction = N::SignedTx;
type PooledTransaction = Pooled;
type Receipt = N::Receipt;
type NewBlockPayload = NewBlock;
}
/// Network primitive types used by Ethereum networks.
pub type EthNetworkPrimitives = BasicNetworkPrimitives<EthPrimitives, PooledTransactionVariant>;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/eth-wire-types/src/header.rs | crates/net/eth-wire-types/src/header.rs | //! Header types.
use alloy_rlp::{Decodable, Encodable};
use bytes::BufMut;
use reth_codecs_derive::add_arbitrary_tests;
/// Represents the direction for a headers request depending on the `reverse` field of the request.
/// > The response must contain a number of block headers, of rising number when reverse is 0,
/// > falling when 1
///
/// Ref: <https://github.com/ethereum/devp2p/blob/master/caps/eth.md#getblockheaders-0x03>
///
/// [`HeadersDirection::Rising`] block numbers for `reverse == 0 == false`
/// [`HeadersDirection::Falling`] block numbers for `reverse == 1 == true`
///
/// See also <https://github.com/ethereum/devp2p/blob/master/caps/eth.md#getblockheaders-0x03>
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash, Default)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))]
#[add_arbitrary_tests(rlp)]
pub enum HeadersDirection {
/// Falling block number.
Falling,
/// Rising block number.
#[default]
Rising,
}
impl HeadersDirection {
/// Returns true for rising block numbers
pub const fn is_rising(&self) -> bool {
matches!(self, Self::Rising)
}
/// Returns true for falling block numbers
pub const fn is_falling(&self) -> bool {
matches!(self, Self::Falling)
}
/// Converts the bool into a direction.
///
/// Returns:
///
/// [`HeadersDirection::Rising`] block numbers for `reverse == 0 == false`
/// [`HeadersDirection::Falling`] block numbers for `reverse == 1 == true`
pub const fn new(reverse: bool) -> Self {
if reverse {
Self::Falling
} else {
Self::Rising
}
}
}
impl Encodable for HeadersDirection {
fn encode(&self, out: &mut dyn BufMut) {
bool::from(*self).encode(out)
}
fn length(&self) -> usize {
bool::from(*self).length()
}
}
impl Decodable for HeadersDirection {
fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
let value: bool = Decodable::decode(buf)?;
Ok(value.into())
}
}
impl From<bool> for HeadersDirection {
fn from(reverse: bool) -> Self {
Self::new(reverse)
}
}
impl From<HeadersDirection> for bool {
fn from(value: HeadersDirection) -> Self {
match value {
HeadersDirection::Rising => false,
HeadersDirection::Falling => true,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_consensus::{Header, EMPTY_OMMER_ROOT_HASH, EMPTY_ROOT_HASH};
use alloy_primitives::{address, b256, bloom, bytes, hex, Bytes, B256, U256};
use alloy_rlp::{Decodable, Encodable};
use std::str::FromStr;
// Test vector from: https://eips.ethereum.org/EIPS/eip-2481
#[test]
fn test_encode_block_header() {
let expected = hex!(
"f901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"
);
let header = Header {
difficulty: U256::from(0x8ae_u64),
number: 0xd05_u64,
gas_limit: 0x115c,
gas_used: 0x15b3,
timestamp: 0x1a0a_u64,
extra_data: Bytes::from_str("7788").unwrap(),
ommers_hash: B256::ZERO,
state_root: B256::ZERO,
transactions_root: B256::ZERO,
receipts_root: B256::ZERO,
..Default::default()
};
let mut data = vec![];
header.encode(&mut data);
assert_eq!(hex::encode(&data), hex::encode(expected));
assert_eq!(header.length(), data.len());
}
// Test vector from: https://github.com/ethereum/tests/blob/f47bbef4da376a49c8fc3166f09ab8a6d182f765/BlockchainTests/ValidBlocks/bcEIP1559/baseFee.json#L15-L36
#[test]
fn test_eip1559_block_header_hash() {
let expected_hash =
b256!("0x6a251c7c3c5dca7b42407a3752ff48f3bbca1fab7f9868371d9918daf1988d1f");
let header = Header {
parent_hash: b256!("0xe0a94a7a3c9617401586b1a27025d2d9671332d22d540e0af72b069170380f2a"),
ommers_hash: EMPTY_OMMER_ROOT_HASH,
beneficiary: address!("0xba5e000000000000000000000000000000000000"),
state_root: b256!(
"0xec3c94b18b8a1cff7d60f8d258ec723312932928626b4c9355eb4ab3568ec7f7"
),
transactions_root: b256!(
"0x50f738580ed699f0469702c7ccc63ed2e51bc034be9479b7bff4e68dee84accf"
),
receipts_root: b256!(
"0x29b0562f7140574dd0d50dee8a271b22e1a0a7b78fca58f7c60370d8317ba2a9"
),
logs_bloom: bloom!(
"00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"
),
difficulty: U256::from(0x020000),
number: 0x01_u64,
gas_limit: 0x016345785d8a0000,
gas_used: 0x015534,
timestamp: 0x079e,
extra_data: bytes!("42"),
mix_hash: b256!("0x0000000000000000000000000000000000000000000000000000000000000000"),
nonce: 0u64.into() ,
base_fee_per_gas: Some(0x036b),
withdrawals_root: None,
blob_gas_used: None,
excess_blob_gas: None,
parent_beacon_block_root: None,
requests_hash: None,
};
assert_eq!(header.hash_slow(), expected_hash);
}
// Test vector from: https://eips.ethereum.org/EIPS/eip-2481
#[test]
fn test_decode_block_header() {
let data = hex!(
"f901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"
);
let expected = Header {
difficulty: U256::from(0x8aeu64),
number: 0xd05u64,
gas_limit: 0x115c,
gas_used: 0x15b3,
timestamp: 0x1a0au64,
extra_data: Bytes::from_str("7788").unwrap(),
ommers_hash: B256::ZERO,
state_root: B256::ZERO,
transactions_root: B256::ZERO,
receipts_root: B256::ZERO,
..Default::default()
};
let header = <Header as Decodable>::decode(&mut data.as_slice()).unwrap();
assert_eq!(header, expected);
// make sure the hash matches
let expected_hash =
b256!("0x8c2f2af15b7b563b6ab1e09bed0e9caade7ed730aec98b70a993597a797579a9");
assert_eq!(header.hash_slow(), expected_hash);
}
// Test vector from: https://github.com/ethereum/tests/blob/970503935aeb76f59adfa3b3224aabf25e77b83d/BlockchainTests/ValidBlocks/bcExample/shanghaiExample.json#L15-L34
#[test]
fn test_decode_block_header_with_withdrawals() {
let data = hex!(
"f9021ca018db39e19931515b30b16b3a92c292398039e31d6c267111529c3f2ba0a26c17a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa095efce3d6972874ca8b531b233b7a1d1ff0a56f08b20c8f1b89bef1b001194a5a071e515dd89e8a7973402c2e11646081b4e2209b2d3a1550df5095289dabcb3fba0ed9c51ea52c968e552e370a77a41dac98606e98b915092fb5f949d6452fce1c4b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008001887fffffffffffffff830125b882079e42a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b42188000000000000000009a027f166f1d7c789251299535cb176ba34116e44894476a7886fe5d73d9be5c973"
);
let expected = Header {
parent_hash: B256::from_str(
"18db39e19931515b30b16b3a92c292398039e31d6c267111529c3f2ba0a26c17",
)
.unwrap(),
beneficiary: address!("0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba"),
state_root: B256::from_str(
"95efce3d6972874ca8b531b233b7a1d1ff0a56f08b20c8f1b89bef1b001194a5",
)
.unwrap(),
transactions_root: B256::from_str(
"71e515dd89e8a7973402c2e11646081b4e2209b2d3a1550df5095289dabcb3fb",
)
.unwrap(),
receipts_root: B256::from_str(
"ed9c51ea52c968e552e370a77a41dac98606e98b915092fb5f949d6452fce1c4",
)
.unwrap(),
number: 0x01,
gas_limit: 0x7fffffffffffffff,
gas_used: 0x0125b8,
timestamp: 0x079e,
extra_data: Bytes::from_str("42").unwrap(),
mix_hash: EMPTY_ROOT_HASH,
base_fee_per_gas: Some(0x09),
withdrawals_root: Some(b256!(
"0x27f166f1d7c789251299535cb176ba34116e44894476a7886fe5d73d9be5c973"
)),
..Default::default()
};
let header = <Header as Decodable>::decode(&mut data.as_slice()).unwrap();
assert_eq!(header, expected);
let expected_hash =
b256!("0x85fdec94c534fa0a1534720f167b899d1fc268925c71c0cbf5aaa213483f5a69");
assert_eq!(header.hash_slow(), expected_hash);
}
// Test vector from: https://github.com/ethereum/tests/blob/7e9e0940c0fcdbead8af3078ede70f969109bd85/BlockchainTests/ValidBlocks/bcExample/cancunExample.json
#[test]
fn test_decode_block_header_with_blob_fields_ef_tests() {
let data = hex!(
"f90221a03a9b485972e7353edd9152712492f0c58d89ef80623686b6bf947a4a6dce6cb6a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa03c837fc158e3e93eafcaf2e658a02f5d8f99abc9f1c4c66cdea96c0ca26406aea04409cc4b699384ba5f8248d92b784713610c5ff9c1de51e9239da0dac76de9cea046cab26abf1047b5b119ecc2dda1296b071766c8b1307e1381fcecc90d513d86b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008001887fffffffffffffff8302a86582079e42a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b42188000000000000000009a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b4218302000080"
);
let expected = Header {
parent_hash: B256::from_str(
"3a9b485972e7353edd9152712492f0c58d89ef80623686b6bf947a4a6dce6cb6",
)
.unwrap(),
ommers_hash: EMPTY_OMMER_ROOT_HASH,
beneficiary: address!("0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba"),
state_root: B256::from_str(
"3c837fc158e3e93eafcaf2e658a02f5d8f99abc9f1c4c66cdea96c0ca26406ae",
)
.unwrap(),
transactions_root: B256::from_str(
"4409cc4b699384ba5f8248d92b784713610c5ff9c1de51e9239da0dac76de9ce",
)
.unwrap(),
receipts_root: B256::from_str(
"46cab26abf1047b5b119ecc2dda1296b071766c8b1307e1381fcecc90d513d86",
)
.unwrap(),
logs_bloom: Default::default(),
difficulty: U256::from(0),
number: 0x1,
gas_limit: 0x7fffffffffffffff,
gas_used: 0x02a865,
timestamp: 0x079e,
extra_data: Bytes::from(vec![0x42]),
mix_hash: EMPTY_ROOT_HASH,
nonce: 0u64.into(),
base_fee_per_gas: Some(9),
withdrawals_root: Some(EMPTY_ROOT_HASH),
blob_gas_used: Some(0x020000),
excess_blob_gas: Some(0),
parent_beacon_block_root: None,
requests_hash: None,
};
let header = Header::decode(&mut data.as_slice()).unwrap();
assert_eq!(header, expected);
let expected_hash =
B256::from_str("0x10aca3ebb4cf6ddd9e945a5db19385f9c105ede7374380c50d56384c3d233785")
.unwrap();
assert_eq!(header.hash_slow(), expected_hash);
}
#[test]
fn test_decode_block_header_with_blob_fields() {
// Block from devnet-7
let data = hex!(
"f90239a013a7ec98912f917b3e804654e37c9866092043c13eb8eab94eb64818e886cff5a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794f97e180c050e5ab072211ad2c213eb5aee4df134a0ec229dbe85b0d3643ad0f471e6ec1a36bbc87deffbbd970762d22a53b35d068aa056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080830305988401c9c380808464c40d5499d883010c01846765746888676f312e32302e35856c696e7578a070ccadc40b16e2094954b1064749cc6fbac783c1712f1b271a8aac3eda2f232588000000000000000007a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421808401600000"
);
let expected = Header {
parent_hash: B256::from_str(
"13a7ec98912f917b3e804654e37c9866092043c13eb8eab94eb64818e886cff5",
)
.unwrap(),
ommers_hash: EMPTY_OMMER_ROOT_HASH,
beneficiary: address!("0xf97e180c050e5ab072211ad2c213eb5aee4df134"),
state_root: b256!("0xec229dbe85b0d3643ad0f471e6ec1a36bbc87deffbbd970762d22a53b35d068a"),
transactions_root: EMPTY_ROOT_HASH,
receipts_root: EMPTY_ROOT_HASH,
logs_bloom: Default::default(),
difficulty: U256::from(0),
number: 0x30598,
gas_limit: 0x1c9c380,
gas_used: 0,
timestamp: 0x64c40d54,
extra_data: bytes!("d883010c01846765746888676f312e32302e35856c696e7578"),
mix_hash: b256!("0x70ccadc40b16e2094954b1064749cc6fbac783c1712f1b271a8aac3eda2f2325"),
nonce: 0u64.into(),
base_fee_per_gas: Some(7),
withdrawals_root: Some(EMPTY_ROOT_HASH),
parent_beacon_block_root: None,
blob_gas_used: Some(0),
excess_blob_gas: Some(0x1600000),
requests_hash: None,
};
let header = Header::decode(&mut data.as_slice()).unwrap();
assert_eq!(header, expected);
let expected_hash =
b256!("0x539c9ea0a3ca49808799d3964b8b6607037227de26bc51073c6926963127087b");
assert_eq!(header.hash_slow(), expected_hash);
}
#[test]
fn sanity_direction() {
let reverse = true;
assert_eq!(HeadersDirection::Falling, reverse.into());
assert_eq!(reverse, bool::from(HeadersDirection::Falling));
let reverse = false;
assert_eq!(HeadersDirection::Rising, reverse.into());
assert_eq!(reverse, bool::from(HeadersDirection::Rising));
let mut buf = Vec::new();
let direction = HeadersDirection::Falling;
direction.encode(&mut buf);
assert_eq!(direction, HeadersDirection::decode(&mut buf.as_slice()).unwrap());
let mut buf = Vec::new();
let direction = HeadersDirection::Rising;
direction.encode(&mut buf);
assert_eq!(direction, HeadersDirection::decode(&mut buf.as_slice()).unwrap());
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/eth-wire-types/src/receipts.rs | crates/net/eth-wire-types/src/receipts.rs | //! Implements the `GetReceipts` and `Receipts` message types.
use alloc::vec::Vec;
use alloy_consensus::{ReceiptWithBloom, RlpDecodableReceipt, RlpEncodableReceipt, TxReceipt};
use alloy_primitives::B256;
use alloy_rlp::{RlpDecodableWrapper, RlpEncodableWrapper};
use reth_codecs_derive::add_arbitrary_tests;
use reth_ethereum_primitives::Receipt;
/// A request for transaction receipts from the given block hashes.
#[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))]
#[add_arbitrary_tests(rlp)]
pub struct GetReceipts(
/// The block hashes to request receipts for.
pub Vec<B256>,
);
/// The response to [`GetReceipts`], containing receipt lists that correspond to each block
/// requested.
#[derive(Clone, Debug, PartialEq, Eq, Default)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))]
#[add_arbitrary_tests(rlp)]
pub struct Receipts<T = Receipt>(
/// Each receipt hash should correspond to a block hash in the request.
pub Vec<Vec<ReceiptWithBloom<T>>>,
);
impl<T: RlpEncodableReceipt> alloy_rlp::Encodable for Receipts<T> {
#[inline]
fn encode(&self, out: &mut dyn alloy_rlp::BufMut) {
self.0.encode(out)
}
#[inline]
fn length(&self) -> usize {
self.0.length()
}
}
impl<T: RlpDecodableReceipt> alloy_rlp::Decodable for Receipts<T> {
#[inline]
fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
alloy_rlp::Decodable::decode(buf).map(Self)
}
}
/// Eth/69 receipt response type that removes bloom filters from the protocol.
///
/// This is effectively a subset of [`Receipts`].
#[derive(Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))]
#[add_arbitrary_tests(rlp)]
pub struct Receipts69<T = Receipt>(pub Vec<Vec<T>>);
impl<T: RlpEncodableReceipt + alloy_rlp::Encodable> alloy_rlp::Encodable for Receipts69<T> {
#[inline]
fn encode(&self, out: &mut dyn alloy_rlp::BufMut) {
self.0.encode(out)
}
#[inline]
fn length(&self) -> usize {
self.0.length()
}
}
impl<T: RlpDecodableReceipt + alloy_rlp::Decodable> alloy_rlp::Decodable for Receipts69<T> {
#[inline]
fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
alloy_rlp::Decodable::decode(buf).map(Self)
}
}
impl<T: TxReceipt> Receipts69<T> {
/// Encodes all receipts with the bloom filter.
///
/// Note: This is an expensive operation that recalculates the bloom for each receipt.
pub fn into_with_bloom(self) -> Receipts<T> {
Receipts(
self.0
.into_iter()
.map(|receipts| receipts.into_iter().map(|r| r.into_with_bloom()).collect())
.collect(),
)
}
}
impl<T: TxReceipt> From<Receipts69<T>> for Receipts<T> {
fn from(receipts: Receipts69<T>) -> Self {
receipts.into_with_bloom()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{message::RequestPair, GetReceipts, Receipts};
use alloy_consensus::TxType;
use alloy_primitives::{hex, Log};
use alloy_rlp::{Decodable, Encodable};
#[test]
fn roundtrip_eip1559() {
let receipts = Receipts(vec![vec![ReceiptWithBloom {
receipt: Receipt { tx_type: TxType::Eip1559, ..Default::default() },
logs_bloom: Default::default(),
}]]);
let mut out = vec![];
receipts.encode(&mut out);
let mut out = out.as_slice();
let decoded = Receipts::decode(&mut out).unwrap();
assert_eq!(receipts, decoded);
}
#[test]
// Test vector from: https://eips.ethereum.org/EIPS/eip-2481
fn encode_get_receipts() {
let expected = hex!(
"f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"
);
let mut data = vec![];
let request = RequestPair {
request_id: 1111,
message: GetReceipts(vec![
hex!("00000000000000000000000000000000000000000000000000000000deadc0de").into(),
hex!("00000000000000000000000000000000000000000000000000000000feedbeef").into(),
]),
};
request.encode(&mut data);
assert_eq!(data, expected);
}
#[test]
// Test vector from: https://eips.ethereum.org/EIPS/eip-2481
fn decode_get_receipts() {
let data = hex!(
"f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"
);
let request = RequestPair::<GetReceipts>::decode(&mut &data[..]).unwrap();
assert_eq!(
request,
RequestPair {
request_id: 1111,
message: GetReceipts(vec![
hex!("00000000000000000000000000000000000000000000000000000000deadc0de").into(),
hex!("00000000000000000000000000000000000000000000000000000000feedbeef").into(),
]),
}
);
}
// Test vector from: https://eips.ethereum.org/EIPS/eip-2481
#[test]
fn encode_receipts() {
let expected = hex!(
"f90172820457f9016cf90169f901668001b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f85ff85d940000000000000000000000000000000000000011f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100ff"
);
let mut data = vec![];
let request = RequestPair {
request_id: 1111,
message: Receipts(vec![vec![
ReceiptWithBloom {
receipt: Receipt {
tx_type: TxType::Legacy,
cumulative_gas_used: 0x1u64,
logs: vec![
Log::new_unchecked(
hex!("0000000000000000000000000000000000000011").into(),
vec![
hex!("000000000000000000000000000000000000000000000000000000000000dead").into(),
hex!("000000000000000000000000000000000000000000000000000000000000beef").into(),
],
hex!("0100ff")[..].into(),
),
],
success: false,
},
logs_bloom: hex!("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").into(),
},
]]),
};
request.encode(&mut data);
assert_eq!(data, expected);
}
// Test vector from: https://eips.ethereum.org/EIPS/eip-2481
#[test]
fn decode_receipts() {
let data = hex!(
"f90172820457f9016cf90169f901668001b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f85ff85d940000000000000000000000000000000000000011f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100ff"
);
let request = RequestPair::<Receipts>::decode(&mut &data[..]).unwrap();
assert_eq!(
request,
RequestPair {
request_id: 1111,
message: Receipts(vec![
vec![
ReceiptWithBloom {
receipt: Receipt {
tx_type: TxType::Legacy,
cumulative_gas_used: 0x1u64,
logs: vec![
Log::new_unchecked(
hex!("0000000000000000000000000000000000000011").into(),
vec![
hex!("000000000000000000000000000000000000000000000000000000000000dead").into(),
hex!("000000000000000000000000000000000000000000000000000000000000beef").into(),
],
hex!("0100ff")[..].into(),
),
],
success: false,
},
logs_bloom: hex!("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").into(),
},
],
]),
}
);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/eth-wire-types/src/blocks.rs | crates/net/eth-wire-types/src/blocks.rs | //! Implements the `GetBlockHeaders`, `GetBlockBodies`, `BlockHeaders`, and `BlockBodies` message
//! types.
use crate::HeadersDirection;
use alloc::vec::Vec;
use alloy_eips::BlockHashOrNumber;
use alloy_primitives::B256;
use alloy_rlp::{RlpDecodable, RlpDecodableWrapper, RlpEncodable, RlpEncodableWrapper};
use reth_codecs_derive::{add_arbitrary_tests, generate_tests};
/// A request for a peer to return block headers starting at the requested block.
/// The peer must return at most [`limit`](#structfield.limit) headers.
/// If the [`reverse`](#structfield.reverse) field is `true`, the headers will be returned starting
/// at [`start_block`](#structfield.start_block), traversing towards the genesis block.
/// Otherwise, headers will be returned starting at [`start_block`](#structfield.start_block),
/// traversing towards the latest block.
///
/// If the [`skip`](#structfield.skip) field is non-zero, the peer must skip that amount of headers
/// in the direction specified by [`reverse`](#structfield.reverse).
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, RlpEncodable, RlpDecodable)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))]
#[add_arbitrary_tests(rlp)]
pub struct GetBlockHeaders {
/// The block number or hash that the peer should start returning headers from.
pub start_block: BlockHashOrNumber,
/// The maximum number of headers to return.
pub limit: u64,
/// The number of blocks that the node should skip while traversing and returning headers.
/// A skip value of zero denotes that the peer should return contiguous headers, starting from
/// [`start_block`](#structfield.start_block) and returning at most
/// [`limit`](#structfield.limit) headers.
pub skip: u32,
/// The direction in which the headers should be returned in.
pub direction: HeadersDirection,
}
/// The response to [`GetBlockHeaders`], containing headers if any headers were found.
#[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))]
pub struct BlockHeaders<H = alloy_consensus::Header>(
/// The requested headers.
pub Vec<H>,
);
generate_tests!(#[rlp, 10] BlockHeaders<alloy_consensus::Header>, EthBlockHeadersTests);
impl<H> From<Vec<H>> for BlockHeaders<H> {
fn from(headers: Vec<H>) -> Self {
Self(headers)
}
}
/// A request for a peer to return block bodies for the given block hashes.
#[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))]
#[add_arbitrary_tests(rlp)]
pub struct GetBlockBodies(
/// The block hashes to request bodies for.
pub Vec<B256>,
);
impl From<Vec<B256>> for GetBlockBodies {
fn from(hashes: Vec<B256>) -> Self {
Self(hashes)
}
}
/// The response to [`GetBlockBodies`], containing the block bodies that the peer knows about if
/// any were found.
#[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))]
pub struct BlockBodies<B = reth_ethereum_primitives::BlockBody>(
/// The requested block bodies, each of which should correspond to a hash in the request.
pub Vec<B>,
);
generate_tests!(#[rlp, 16] BlockBodies<reth_ethereum_primitives::BlockBody>, EthBlockBodiesTests);
impl<B> From<Vec<B>> for BlockBodies<B> {
fn from(bodies: Vec<B>) -> Self {
Self(bodies)
}
}
#[cfg(test)]
mod tests {
use crate::{
message::RequestPair, BlockBodies, BlockHeaders, GetBlockBodies, GetBlockHeaders,
HeadersDirection,
};
use alloy_consensus::{Header, TxLegacy};
use alloy_eips::BlockHashOrNumber;
use alloy_primitives::{hex, Signature, TxKind, U256};
use alloy_rlp::{Decodable, Encodable};
use reth_ethereum_primitives::{BlockBody, Transaction, TransactionSigned};
use std::str::FromStr;
#[test]
fn decode_hash() {
// this is a valid 32 byte rlp string
let rlp = hex!("a0ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff");
let decoded_number = BlockHashOrNumber::decode(&mut &rlp[..]).unwrap();
let full_bytes = [0xff; 32].into();
let expected = BlockHashOrNumber::Hash(full_bytes);
assert_eq!(expected, decoded_number);
}
#[test]
fn decode_number() {
// this is a valid 64 bit number
let rlp = hex!("88ffffffffffffffff");
let decoded_number = BlockHashOrNumber::decode(&mut &rlp[..]).unwrap();
let expected = BlockHashOrNumber::Number(u64::MAX);
assert_eq!(expected, decoded_number);
}
#[test]
fn decode_largest_single_byte() {
// the largest single byte is 0x7f, so we should be able to decode this into a u64
let rlp = hex!("7f");
let decoded_number = BlockHashOrNumber::decode(&mut &rlp[..]).unwrap();
let expected = BlockHashOrNumber::Number(0x7fu64);
assert_eq!(expected, decoded_number);
}
#[test]
fn decode_long_hash() {
// let's try a 33 byte long string
// 0xa1 = 0x80 (start of string) + 0x21 (33, length of string)
let long_rlp = hex!("a1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff");
let decode_result = BlockHashOrNumber::decode(&mut &long_rlp[..]);
assert!(
decode_result.is_err(),
"Decoding a bytestring longer than 32 bytes should not decode successfully"
);
}
#[test]
fn decode_long_number() {
// let's try a 72 bit number
// 0x89 = 0x80 (start of string) + 0x09 (9, length of string)
let long_number = hex!("89ffffffffffffffffff");
let decode_result = BlockHashOrNumber::decode(&mut &long_number[..]);
assert!(
decode_result.is_err(),
"Decoding a number longer than 64 bits (but not exactly 32 bytes) should not decode successfully"
);
}
// Test vector from: https://eips.ethereum.org/EIPS/eip-2481
#[test]
fn encode_get_block_header() {
let expected = hex!(
"e8820457e4a000000000000000000000000000000000000000000000000000000000deadc0de050580"
);
let mut data = vec![];
RequestPair::<GetBlockHeaders> {
request_id: 1111,
message: GetBlockHeaders {
start_block: BlockHashOrNumber::Hash(
hex!("00000000000000000000000000000000000000000000000000000000deadc0de").into(),
),
limit: 5,
skip: 5,
direction: HeadersDirection::Rising,
},
}
.encode(&mut data);
assert_eq!(data, expected);
}
// Test vector from: https://eips.ethereum.org/EIPS/eip-2481
#[test]
fn decode_get_block_header() {
let data = hex!(
"e8820457e4a000000000000000000000000000000000000000000000000000000000deadc0de050580"
);
let expected = RequestPair::<GetBlockHeaders> {
request_id: 1111,
message: GetBlockHeaders {
start_block: BlockHashOrNumber::Hash(
hex!("00000000000000000000000000000000000000000000000000000000deadc0de").into(),
),
limit: 5,
skip: 5,
direction: HeadersDirection::Rising,
},
};
let result = RequestPair::decode(&mut &data[..]);
assert_eq!(result.unwrap(), expected);
}
// Test vector from: https://eips.ethereum.org/EIPS/eip-2481
#[test]
fn encode_get_block_header_number() {
let expected = hex!("ca820457c682270f050580");
let mut data = vec![];
RequestPair {
request_id: 1111,
message: GetBlockHeaders {
start_block: BlockHashOrNumber::Number(9999),
limit: 5,
skip: 5,
direction: HeadersDirection::Rising,
},
}
.encode(&mut data);
assert_eq!(data, expected);
}
// Test vector from: https://eips.ethereum.org/EIPS/eip-2481
#[test]
fn decode_get_block_header_number() {
let data = hex!("ca820457c682270f050580");
let expected = RequestPair {
request_id: 1111,
message: GetBlockHeaders {
start_block: BlockHashOrNumber::Number(9999),
limit: 5,
skip: 5,
direction: HeadersDirection::Rising,
},
};
let result = RequestPair::decode(&mut &data[..]);
assert_eq!(result.unwrap(), expected);
}
// Test vector from: https://eips.ethereum.org/EIPS/eip-2481
#[test]
fn encode_block_header() {
// [ (f90202) 0x0457 = 1111, [ (f901fc) [ (f901f9) header ] ] ]
let expected = hex!(
"f90202820457f901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"
);
let mut data = vec![];
RequestPair {
request_id: 1111,
message: BlockHeaders(vec![
Header {
parent_hash: hex!("0000000000000000000000000000000000000000000000000000000000000000").into(),
ommers_hash: hex!("0000000000000000000000000000000000000000000000000000000000000000").into(),
beneficiary: hex!("0000000000000000000000000000000000000000").into(),
state_root: hex!("0000000000000000000000000000000000000000000000000000000000000000").into(),
transactions_root: hex!("0000000000000000000000000000000000000000000000000000000000000000").into(),
receipts_root: hex!("0000000000000000000000000000000000000000000000000000000000000000").into(),
logs_bloom: hex!("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").into(),
difficulty: U256::from(0x8aeu64),
number: 0xd05u64,
gas_limit: 0x115c,
gas_used: 0x15b3,
timestamp: 0x1a0au64,
extra_data: hex!("7788")[..].into(),
mix_hash: hex!("0000000000000000000000000000000000000000000000000000000000000000").into(),
nonce: 0x0000000000000000u64.into(),
base_fee_per_gas: None,
withdrawals_root: None,
blob_gas_used: None,
excess_blob_gas: None,
parent_beacon_block_root: None,
requests_hash: None,
},
]),
}.encode(&mut data);
assert_eq!(data, expected);
}
// Test vector from: https://eips.ethereum.org/EIPS/eip-2481
#[test]
fn decode_block_header() {
let data = hex!(
"f90202820457f901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"
);
let expected = RequestPair {
request_id: 1111,
message: BlockHeaders(vec![
Header {
parent_hash: hex!("0000000000000000000000000000000000000000000000000000000000000000").into(),
ommers_hash: hex!("0000000000000000000000000000000000000000000000000000000000000000").into(),
beneficiary: hex!("0000000000000000000000000000000000000000").into(),
state_root: hex!("0000000000000000000000000000000000000000000000000000000000000000").into(),
transactions_root: hex!("0000000000000000000000000000000000000000000000000000000000000000").into(),
receipts_root: hex!("0000000000000000000000000000000000000000000000000000000000000000").into(),
logs_bloom: hex!("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").into(),
difficulty: U256::from(0x8aeu64),
number: 0xd05u64,
gas_limit: 0x115c,
gas_used: 0x15b3,
timestamp: 0x1a0au64,
extra_data: hex!("7788")[..].into(),
mix_hash: hex!("0000000000000000000000000000000000000000000000000000000000000000").into(),
nonce: 0x0000000000000000u64.into(),
base_fee_per_gas: None,
withdrawals_root: None,
blob_gas_used: None,
excess_blob_gas: None,
parent_beacon_block_root: None,
requests_hash: None,
},
]),
};
let result = RequestPair::decode(&mut &data[..]);
assert_eq!(result.unwrap(), expected);
}
// Test vector from: https://eips.ethereum.org/EIPS/eip-2481
#[test]
fn encode_get_block_bodies() {
let expected = hex!(
"f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"
);
let mut data = vec![];
RequestPair {
request_id: 1111,
message: GetBlockBodies(vec![
hex!("00000000000000000000000000000000000000000000000000000000deadc0de").into(),
hex!("00000000000000000000000000000000000000000000000000000000feedbeef").into(),
]),
}
.encode(&mut data);
assert_eq!(data, expected);
}
// Test vector from: https://eips.ethereum.org/EIPS/eip-2481
#[test]
fn decode_get_block_bodies() {
let data = hex!(
"f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"
);
let expected = RequestPair {
request_id: 1111,
message: GetBlockBodies(vec![
hex!("00000000000000000000000000000000000000000000000000000000deadc0de").into(),
hex!("00000000000000000000000000000000000000000000000000000000feedbeef").into(),
]),
};
let result = RequestPair::decode(&mut &data[..]);
assert_eq!(result.unwrap(), expected);
}
// Test vector from: https://eips.ethereum.org/EIPS/eip-2481
#[test]
fn encode_block_bodies() {
let expected = hex!(
"f902dc820457f902d6f902d3f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afbf901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"
);
let mut data = vec![];
let request = RequestPair {
request_id: 1111,
message: BlockBodies(vec![
BlockBody {
transactions: vec![
TransactionSigned::new_unhashed(Transaction::Legacy(TxLegacy {
chain_id: Some(1),
nonce: 0x8u64,
gas_price: 0x4a817c808,
gas_limit: 0x2e248,
to: TxKind::Call(hex!("3535353535353535353535353535353535353535").into()),
value: U256::from(0x200u64),
input: Default::default(),
}), Signature::new(
U256::from_str("0x64b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12").unwrap(),
U256::from_str("0x64b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10").unwrap(),
false,
),
),
TransactionSigned::new_unhashed(Transaction::Legacy(TxLegacy {
chain_id: Some(1),
nonce: 0x9u64,
gas_price: 0x4a817c809,
gas_limit: 0x33450,
to: TxKind::Call(hex!("3535353535353535353535353535353535353535").into()),
value: U256::from(0x2d9u64),
input: Default::default(),
}), Signature::new(
U256::from_str("0x52f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb").unwrap(),
U256::from_str("0x52f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb").unwrap(),
false,
),
),
],
ommers: vec![
Header {
parent_hash: hex!("0000000000000000000000000000000000000000000000000000000000000000").into(),
ommers_hash: hex!("0000000000000000000000000000000000000000000000000000000000000000").into(),
beneficiary: hex!("0000000000000000000000000000000000000000").into(),
state_root: hex!("0000000000000000000000000000000000000000000000000000000000000000").into(),
transactions_root: hex!("0000000000000000000000000000000000000000000000000000000000000000").into(),
receipts_root: hex!("0000000000000000000000000000000000000000000000000000000000000000").into(),
logs_bloom: hex!("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").into(),
difficulty: U256::from(0x8aeu64),
number: 0xd05u64,
gas_limit: 0x115c,
gas_used: 0x15b3,
timestamp: 0x1a0au64,
extra_data: hex!("7788")[..].into(),
mix_hash: hex!("0000000000000000000000000000000000000000000000000000000000000000").into(),
nonce: 0x0000000000000000u64.into(),
base_fee_per_gas: None,
withdrawals_root: None,
blob_gas_used: None,
excess_blob_gas: None,
parent_beacon_block_root: None,
requests_hash: None,
},
],
withdrawals: None,
}
]),
};
request.encode(&mut data);
assert_eq!(data, expected);
}
// Test vector from: https://eips.ethereum.org/EIPS/eip-2481
#[test]
fn decode_block_bodies() {
let data = hex!(
"f902dc820457f902d6f902d3f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afbf901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"
);
let expected = RequestPair {
request_id: 1111,
message: BlockBodies(vec![
BlockBody {
transactions: vec![
TransactionSigned::new_unhashed(Transaction::Legacy(
TxLegacy {
chain_id: Some(1),
nonce: 0x8u64,
gas_price: 0x4a817c808,
gas_limit: 0x2e248,
to: TxKind::Call(hex!("3535353535353535353535353535353535353535").into()),
value: U256::from(0x200u64),
input: Default::default(),
}),
Signature::new(
U256::from_str("0x64b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12").unwrap(),
U256::from_str("0x64b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10").unwrap(),
false,
),
),
TransactionSigned::new_unhashed(
Transaction::Legacy(TxLegacy {
chain_id: Some(1),
nonce: 0x9u64,
gas_price: 0x4a817c809,
gas_limit: 0x33450,
to: TxKind::Call(hex!("3535353535353535353535353535353535353535").into()),
value: U256::from(0x2d9u64),
input: Default::default(),
}),
Signature::new(
U256::from_str("0x52f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb").unwrap(),
U256::from_str("0x52f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb").unwrap(),
false,
),
),
],
ommers: vec![
Header {
parent_hash: hex!("0000000000000000000000000000000000000000000000000000000000000000").into(),
ommers_hash: hex!("0000000000000000000000000000000000000000000000000000000000000000").into(),
beneficiary: hex!("0000000000000000000000000000000000000000").into(),
state_root: hex!("0000000000000000000000000000000000000000000000000000000000000000").into(),
transactions_root: hex!("0000000000000000000000000000000000000000000000000000000000000000").into(),
receipts_root: hex!("0000000000000000000000000000000000000000000000000000000000000000").into(),
logs_bloom: hex!("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").into(),
difficulty: U256::from(0x8aeu64),
number: 0xd05u64,
gas_limit: 0x115c,
gas_used: 0x15b3,
timestamp: 0x1a0au64,
extra_data: hex!("7788")[..].into(),
mix_hash: hex!("0000000000000000000000000000000000000000000000000000000000000000").into(),
nonce: 0x0000000000000000u64.into(),
base_fee_per_gas: None,
withdrawals_root: None,
blob_gas_used: None,
excess_blob_gas: None,
parent_beacon_block_root: None,
requests_hash: None,
},
],
withdrawals: None,
}
]),
};
let result = RequestPair::decode(&mut &data[..]).unwrap();
assert_eq!(result, expected);
}
#[test]
fn empty_block_bodies_rlp() {
let body = BlockBodies::default();
let mut buf = Vec::new();
body.encode(&mut buf);
let decoded = BlockBodies::<BlockBody>::decode(&mut buf.as_slice()).unwrap();
assert_eq!(body, decoded);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/eth-wire-types/src/capability.rs | crates/net/eth-wire-types/src/capability.rs | //! All capability related types
use crate::{EthMessageID, EthVersion};
use alloc::{borrow::Cow, string::String, vec::Vec};
use alloy_primitives::bytes::Bytes;
use alloy_rlp::{Decodable, Encodable, RlpDecodable, RlpEncodable};
use bytes::BufMut;
use core::fmt;
use reth_codecs_derive::add_arbitrary_tests;
/// A Capability message consisting of the message-id and the payload.
#[derive(Debug, Clone, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct RawCapabilityMessage {
/// Identifier of the message.
pub id: usize,
/// Actual __encoded__ payload
pub payload: Bytes,
}
impl RawCapabilityMessage {
/// Creates a new capability message with the given id and payload.
pub const fn new(id: usize, payload: Bytes) -> Self {
Self { id, payload }
}
/// Creates a raw message for the eth sub-protocol.
///
/// Caller must ensure that the rlp encoded `payload` matches the given `id`.
///
/// See also [`EthMessage`](crate::EthMessage)
pub const fn eth(id: EthMessageID, payload: Bytes) -> Self {
Self::new(id.to_u8() as usize, payload)
}
}
impl Encodable for RawCapabilityMessage {
/// Encodes the `RawCapabilityMessage` into an RLP byte stream.
fn encode(&self, out: &mut dyn BufMut) {
self.id.encode(out);
out.put_slice(&self.payload);
}
/// Returns the total length of the encoded message.
fn length(&self) -> usize {
self.id.length() + self.payload.len()
}
}
impl Decodable for RawCapabilityMessage {
/// Decodes a `RawCapabilityMessage` from an RLP byte stream.
fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
let id = usize::decode(buf)?;
let payload = Bytes::copy_from_slice(buf);
*buf = &buf[buf.len()..];
Ok(Self { id, payload })
}
}
/// A message indicating a supported capability and capability version.
#[add_arbitrary_tests(rlp)]
#[derive(Clone, Debug, PartialEq, Eq, RlpEncodable, RlpDecodable, Default, Hash)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct Capability {
/// The name of the subprotocol
pub name: Cow<'static, str>,
/// The version of the subprotocol
pub version: usize,
}
impl Capability {
/// Create a new `Capability` with the given name and version.
pub const fn new(name: String, version: usize) -> Self {
Self { name: Cow::Owned(name), version }
}
/// Create a new `Capability` with the given static name and version.
pub const fn new_static(name: &'static str, version: usize) -> Self {
Self { name: Cow::Borrowed(name), version }
}
/// Returns the corresponding eth capability for the given version.
pub const fn eth(version: EthVersion) -> Self {
Self::new_static("eth", version as usize)
}
/// Returns the [`EthVersion::Eth66`] capability.
pub const fn eth_66() -> Self {
Self::eth(EthVersion::Eth66)
}
/// Returns the [`EthVersion::Eth67`] capability.
pub const fn eth_67() -> Self {
Self::eth(EthVersion::Eth67)
}
/// Returns the [`EthVersion::Eth68`] capability.
pub const fn eth_68() -> Self {
Self::eth(EthVersion::Eth68)
}
/// Whether this is eth v66 protocol.
#[inline]
pub fn is_eth_v66(&self) -> bool {
self.name == "eth" && self.version == 66
}
/// Whether this is eth v67.
#[inline]
pub fn is_eth_v67(&self) -> bool {
self.name == "eth" && self.version == 67
}
/// Whether this is eth v68.
#[inline]
pub fn is_eth_v68(&self) -> bool {
self.name == "eth" && self.version == 68
}
/// Whether this is any eth version.
#[inline]
pub fn is_eth(&self) -> bool {
self.is_eth_v66() || self.is_eth_v67() || self.is_eth_v68()
}
}
impl fmt::Display for Capability {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}/{}", self.name, self.version)
}
}
impl From<EthVersion> for Capability {
#[inline]
fn from(value: EthVersion) -> Self {
Self::eth(value)
}
}
#[cfg(any(test, feature = "arbitrary"))]
impl<'a> arbitrary::Arbitrary<'a> for Capability {
fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result<Self> {
let version = u.int_in_range(66..=69)?; // Valid eth protocol versions are 66-69
// Only generate valid eth protocol name for now since it's the only supported protocol
Ok(Self::new_static("eth", version))
}
}
/// Represents all capabilities of a node.
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct Capabilities {
/// All Capabilities and their versions
inner: Vec<Capability>,
eth_66: bool,
eth_67: bool,
eth_68: bool,
}
impl Capabilities {
/// Returns all capabilities.
#[inline]
pub fn capabilities(&self) -> &[Capability] {
&self.inner
}
/// Consumes the type and returns the all capabilities.
#[inline]
pub fn into_inner(self) -> Vec<Capability> {
self.inner
}
/// Whether the peer supports `eth` sub-protocol.
#[inline]
pub const fn supports_eth(&self) -> bool {
self.eth_68 || self.eth_67 || self.eth_66
}
/// Whether this peer supports eth v66 protocol.
#[inline]
pub const fn supports_eth_v66(&self) -> bool {
self.eth_66
}
/// Whether this peer supports eth v67 protocol.
#[inline]
pub const fn supports_eth_v67(&self) -> bool {
self.eth_67
}
/// Whether this peer supports eth v68 protocol.
#[inline]
pub const fn supports_eth_v68(&self) -> bool {
self.eth_68
}
}
impl From<Vec<Capability>> for Capabilities {
fn from(value: Vec<Capability>) -> Self {
Self {
eth_66: value.iter().any(Capability::is_eth_v66),
eth_67: value.iter().any(Capability::is_eth_v67),
eth_68: value.iter().any(Capability::is_eth_v68),
inner: value,
}
}
}
impl Encodable for Capabilities {
fn encode(&self, out: &mut dyn BufMut) {
self.inner.encode(out)
}
}
impl Decodable for Capabilities {
fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
let inner = Vec::<Capability>::decode(buf)?;
Ok(Self {
eth_66: inner.iter().any(Capability::is_eth_v66),
eth_67: inner.iter().any(Capability::is_eth_v67),
eth_68: inner.iter().any(Capability::is_eth_v68),
inner,
})
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/eth-wire-types/src/disconnect_reason.rs | crates/net/eth-wire-types/src/disconnect_reason.rs | //! `RLPx` disconnect reason sent to/received from peer
use alloc::vec;
use alloy_primitives::bytes::{Buf, BufMut};
use alloy_rlp::{Decodable, Encodable, Header};
use derive_more::Display;
use reth_codecs_derive::add_arbitrary_tests;
use thiserror::Error;
/// `RLPx` disconnect reason.
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Display)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))]
#[add_arbitrary_tests(rlp)]
pub enum DisconnectReason {
/// Disconnect requested by the local node or remote peer.
#[default]
#[display("disconnect requested")]
DisconnectRequested = 0x00,
/// TCP related error
#[display("TCP sub-system error")]
TcpSubsystemError = 0x01,
/// Breach of protocol at the transport or p2p level
#[display("breach of protocol, e.g. a malformed message, bad RLP, etc.")]
ProtocolBreach = 0x02,
/// Node has no matching protocols.
#[display("useless peer")]
UselessPeer = 0x03,
/// Either the remote or local node has too many peers.
#[display("too many peers")]
TooManyPeers = 0x04,
/// Already connected to the peer.
#[display("already connected")]
AlreadyConnected = 0x05,
/// `p2p` protocol version is incompatible
#[display("incompatible P2P protocol version")]
IncompatibleP2PProtocolVersion = 0x06,
/// Received a null node identity.
#[display("null node identity received - this is automatically invalid")]
NullNodeIdentity = 0x07,
/// Reason when the client is shutting down.
#[display("client quitting")]
ClientQuitting = 0x08,
/// When the received handshake's identify is different from what is expected.
#[display("unexpected identity in handshake")]
UnexpectedHandshakeIdentity = 0x09,
/// The node is connected to itself
#[display("identity is the same as this node (i.e. connected to itself)")]
ConnectedToSelf = 0x0a,
/// Peer or local node did not respond to a ping in time.
#[display("ping timeout")]
PingTimeout = 0x0b,
/// Peer or local node violated a subprotocol-specific rule.
#[display("some other reason specific to a subprotocol")]
SubprotocolSpecific = 0x10,
}
impl TryFrom<u8> for DisconnectReason {
// This error type should not be used to crash the node, but rather to log the error and
// disconnect the peer.
type Error = UnknownDisconnectReason;
fn try_from(value: u8) -> Result<Self, Self::Error> {
match value {
0x00 => Ok(Self::DisconnectRequested),
0x01 => Ok(Self::TcpSubsystemError),
0x02 => Ok(Self::ProtocolBreach),
0x03 => Ok(Self::UselessPeer),
0x04 => Ok(Self::TooManyPeers),
0x05 => Ok(Self::AlreadyConnected),
0x06 => Ok(Self::IncompatibleP2PProtocolVersion),
0x07 => Ok(Self::NullNodeIdentity),
0x08 => Ok(Self::ClientQuitting),
0x09 => Ok(Self::UnexpectedHandshakeIdentity),
0x0a => Ok(Self::ConnectedToSelf),
0x0b => Ok(Self::PingTimeout),
0x10 => Ok(Self::SubprotocolSpecific),
_ => Err(UnknownDisconnectReason(value)),
}
}
}
impl Encodable for DisconnectReason {
/// The [`Encodable`] implementation for [`DisconnectReason`] encodes the disconnect reason in
/// a single-element RLP list.
fn encode(&self, out: &mut dyn BufMut) {
vec![*self as u8].encode(out);
}
fn length(&self) -> usize {
vec![*self as u8].length()
}
}
impl Decodable for DisconnectReason {
/// The [`Decodable`] implementation for [`DisconnectReason`] supports either a disconnect
/// reason encoded a single byte or a RLP list containing the disconnect reason.
fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
if buf.is_empty() {
return Err(alloy_rlp::Error::InputTooShort)
} else if buf.len() > 2 {
return Err(alloy_rlp::Error::Overflow)
}
if buf.len() > 1 {
// this should be a list, so decode the list header. this should advance the buffer so
// buf[0] is the first (and only) element of the list.
let header = Header::decode(buf)?;
if !header.list {
return Err(alloy_rlp::Error::UnexpectedString)
}
if header.payload_length != 1 {
return Err(alloy_rlp::Error::ListLengthMismatch {
expected: 1,
got: header.payload_length,
})
}
}
// geth rlp encodes [`DisconnectReason::DisconnectRequested`] as 0x00 and not as empty
// string 0x80
if buf[0] == 0x00 {
buf.advance(1);
Ok(Self::DisconnectRequested)
} else {
Self::try_from(u8::decode(buf)?)
.map_err(|_| alloy_rlp::Error::Custom("unknown disconnect reason"))
}
}
}
/// This represents an unknown disconnect reason with the given code.
#[derive(Debug, Clone, Error)]
#[error("unknown disconnect reason: {0}")]
pub struct UnknownDisconnectReason(u8);
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/eth-wire-types/src/message.rs | crates/net/eth-wire-types/src/message.rs | //! Implements Ethereum wire protocol for versions 66, 67, and 68.
//! Defines structs/enums for messages, request-response pairs, and broadcasts.
//! Handles compatibility with [`EthVersion`].
//!
//! Examples include creating, encoding, and decoding protocol messages.
//!
//! Reference: [Ethereum Wire Protocol](https://github.com/ethereum/devp2p/blob/master/caps/eth.md).
use super::{
broadcast::NewBlockHashes, BlockBodies, BlockHeaders, GetBlockBodies, GetBlockHeaders,
GetNodeData, GetPooledTransactions, GetReceipts, NewPooledTransactionHashes66,
NewPooledTransactionHashes68, NodeData, PooledTransactions, Receipts, Status, StatusEth69,
Transactions,
};
use crate::{
status::StatusMessage, BlockRangeUpdate, EthNetworkPrimitives, EthVersion, NetworkPrimitives,
RawCapabilityMessage, Receipts69, SharedTransactions,
};
use alloc::{boxed::Box, string::String, sync::Arc};
use alloy_primitives::{
bytes::{Buf, BufMut},
Bytes,
};
use alloy_rlp::{length_of_length, Decodable, Encodable, Header};
use core::fmt::Debug;
/// [`MAX_MESSAGE_SIZE`] is the maximum cap on the size of a protocol message.
// https://github.com/ethereum/go-ethereum/blob/30602163d5d8321fbc68afdcbbaf2362b2641bde/eth/protocols/eth/protocol.go#L50
pub const MAX_MESSAGE_SIZE: usize = 10 * 1024 * 1024;
/// Error when sending/receiving a message
#[derive(thiserror::Error, Debug)]
pub enum MessageError {
/// Flags an unrecognized message ID for a given protocol version.
#[error("message id {1:?} is invalid for version {0:?}")]
Invalid(EthVersion, EthMessageID),
/// Thrown when rlp decoding a message failed.
#[error("RLP error: {0}")]
RlpError(#[from] alloy_rlp::Error),
/// Other message error with custom message
#[error("{0}")]
Other(String),
}
/// An `eth` protocol message, containing a message ID and payload.
#[derive(Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct ProtocolMessage<N: NetworkPrimitives = EthNetworkPrimitives> {
/// The unique identifier representing the type of the Ethereum message.
pub message_type: EthMessageID,
/// The content of the message, including specific data based on the message type.
#[cfg_attr(
feature = "serde",
serde(bound = "EthMessage<N>: serde::Serialize + serde::de::DeserializeOwned")
)]
pub message: EthMessage<N>,
}
impl<N: NetworkPrimitives> ProtocolMessage<N> {
/// Create a new `ProtocolMessage` from a message type and message rlp bytes.
///
/// This will enforce decoding according to the given [`EthVersion`] of the connection.
pub fn decode_message(version: EthVersion, buf: &mut &[u8]) -> Result<Self, MessageError> {
let message_type = EthMessageID::decode(buf)?;
// For EIP-7642 (https://github.com/ethereum/EIPs/blob/master/EIPS/eip-7642.md):
// pre-merge (legacy) status messages include total difficulty, whereas eth/69 omits it.
let message = match message_type {
EthMessageID::Status => EthMessage::Status(if version < EthVersion::Eth69 {
StatusMessage::Legacy(Status::decode(buf)?)
} else {
StatusMessage::Eth69(StatusEth69::decode(buf)?)
}),
EthMessageID::NewBlockHashes => {
EthMessage::NewBlockHashes(NewBlockHashes::decode(buf)?)
}
EthMessageID::NewBlock => {
EthMessage::NewBlock(Box::new(N::NewBlockPayload::decode(buf)?))
}
EthMessageID::Transactions => EthMessage::Transactions(Transactions::decode(buf)?),
EthMessageID::NewPooledTransactionHashes => {
if version >= EthVersion::Eth68 {
EthMessage::NewPooledTransactionHashes68(NewPooledTransactionHashes68::decode(
buf,
)?)
} else {
EthMessage::NewPooledTransactionHashes66(NewPooledTransactionHashes66::decode(
buf,
)?)
}
}
EthMessageID::GetBlockHeaders => EthMessage::GetBlockHeaders(RequestPair::decode(buf)?),
EthMessageID::BlockHeaders => EthMessage::BlockHeaders(RequestPair::decode(buf)?),
EthMessageID::GetBlockBodies => EthMessage::GetBlockBodies(RequestPair::decode(buf)?),
EthMessageID::BlockBodies => EthMessage::BlockBodies(RequestPair::decode(buf)?),
EthMessageID::GetPooledTransactions => {
EthMessage::GetPooledTransactions(RequestPair::decode(buf)?)
}
EthMessageID::PooledTransactions => {
EthMessage::PooledTransactions(RequestPair::decode(buf)?)
}
EthMessageID::GetNodeData => {
// GetNodeData is disabled in all versions to prevent privacy leaks
return Err(MessageError::Invalid(version, EthMessageID::GetNodeData))
}
EthMessageID::NodeData => {
// NodeData is disabled in all versions to prevent privacy leaks
return Err(MessageError::Invalid(version, EthMessageID::NodeData))
}
EthMessageID::GetReceipts => EthMessage::GetReceipts(RequestPair::decode(buf)?),
EthMessageID::Receipts => {
if version < EthVersion::Eth69 {
EthMessage::Receipts(RequestPair::decode(buf)?)
} else {
// with eth69, receipts no longer include the bloom
EthMessage::Receipts69(RequestPair::decode(buf)?)
}
}
EthMessageID::BlockRangeUpdate => {
if version < EthVersion::Eth69 {
return Err(MessageError::Invalid(version, EthMessageID::BlockRangeUpdate))
}
EthMessage::BlockRangeUpdate(BlockRangeUpdate::decode(buf)?)
}
EthMessageID::Other(_) => {
let raw_payload = Bytes::copy_from_slice(buf);
buf.advance(raw_payload.len());
EthMessage::Other(RawCapabilityMessage::new(
message_type.to_u8() as usize,
raw_payload.into(),
))
}
};
Ok(Self { message_type, message })
}
}
impl<N: NetworkPrimitives> Encodable for ProtocolMessage<N> {
/// Encodes the protocol message into bytes. The message type is encoded as a single byte and
/// prepended to the message.
fn encode(&self, out: &mut dyn BufMut) {
self.message_type.encode(out);
self.message.encode(out);
}
fn length(&self) -> usize {
self.message_type.length() + self.message.length()
}
}
impl<N: NetworkPrimitives> From<EthMessage<N>> for ProtocolMessage<N> {
fn from(message: EthMessage<N>) -> Self {
Self { message_type: message.message_id(), message }
}
}
/// Represents messages that can be sent to multiple peers.
#[derive(Clone, Debug)]
pub struct ProtocolBroadcastMessage<N: NetworkPrimitives = EthNetworkPrimitives> {
/// The unique identifier representing the type of the Ethereum message.
pub message_type: EthMessageID,
/// The content of the message to be broadcasted, including specific data based on the message
/// type.
pub message: EthBroadcastMessage<N>,
}
impl<N: NetworkPrimitives> Encodable for ProtocolBroadcastMessage<N> {
/// Encodes the protocol message into bytes. The message type is encoded as a single byte and
/// prepended to the message.
fn encode(&self, out: &mut dyn BufMut) {
self.message_type.encode(out);
self.message.encode(out);
}
fn length(&self) -> usize {
self.message_type.length() + self.message.length()
}
}
impl<N: NetworkPrimitives> From<EthBroadcastMessage<N>> for ProtocolBroadcastMessage<N> {
fn from(message: EthBroadcastMessage<N>) -> Self {
Self { message_type: message.message_id(), message }
}
}
/// Represents a message in the eth wire protocol, versions 66, 67, 68 and 69.
///
/// The ethereum wire protocol is a set of messages that are broadcast to the network in two
/// styles:
/// * A request message sent by a peer (such as [`GetPooledTransactions`]), and an associated
/// response message (such as [`PooledTransactions`]).
/// * A message that is broadcast to the network, without a corresponding request.
///
/// The newer `eth/66` is an efficiency upgrade on top of `eth/65`, introducing a request id to
/// correlate request-response message pairs. This allows for request multiplexing.
///
/// The `eth/67` is based on `eth/66` but only removes two messages, [`GetNodeData`] and
/// [`NodeData`].
///
/// The `eth/68` changes only `NewPooledTransactionHashes` to include `types` and `sized`. For
/// it, `NewPooledTransactionHashes` is renamed as [`NewPooledTransactionHashes66`] and
/// [`NewPooledTransactionHashes68`] is defined.
///
/// The `eth/69` announces the historical block range served by the node. Removes total difficulty
/// information. And removes the Bloom field from receipts transferred over the protocol.
#[derive(Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum EthMessage<N: NetworkPrimitives = EthNetworkPrimitives> {
/// Represents a Status message required for the protocol handshake.
Status(StatusMessage),
/// Represents a `NewBlockHashes` message broadcast to the network.
NewBlockHashes(NewBlockHashes),
/// Represents a `NewBlock` message broadcast to the network.
#[cfg_attr(
feature = "serde",
serde(bound = "N::NewBlockPayload: serde::Serialize + serde::de::DeserializeOwned")
)]
NewBlock(Box<N::NewBlockPayload>),
/// Represents a Transactions message broadcast to the network.
#[cfg_attr(
feature = "serde",
serde(bound = "N::BroadcastedTransaction: serde::Serialize + serde::de::DeserializeOwned")
)]
Transactions(Transactions<N::BroadcastedTransaction>),
/// Represents a `NewPooledTransactionHashes` message for eth/66 version.
NewPooledTransactionHashes66(NewPooledTransactionHashes66),
/// Represents a `NewPooledTransactionHashes` message for eth/68 version.
NewPooledTransactionHashes68(NewPooledTransactionHashes68),
// The following messages are request-response message pairs
/// Represents a `GetBlockHeaders` request-response pair.
GetBlockHeaders(RequestPair<GetBlockHeaders>),
/// Represents a `BlockHeaders` request-response pair.
#[cfg_attr(
feature = "serde",
serde(bound = "N::BlockHeader: serde::Serialize + serde::de::DeserializeOwned")
)]
BlockHeaders(RequestPair<BlockHeaders<N::BlockHeader>>),
/// Represents a `GetBlockBodies` request-response pair.
GetBlockBodies(RequestPair<GetBlockBodies>),
/// Represents a `BlockBodies` request-response pair.
#[cfg_attr(
feature = "serde",
serde(bound = "N::BlockBody: serde::Serialize + serde::de::DeserializeOwned")
)]
BlockBodies(RequestPair<BlockBodies<N::BlockBody>>),
/// Represents a `GetPooledTransactions` request-response pair.
GetPooledTransactions(RequestPair<GetPooledTransactions>),
/// Represents a `PooledTransactions` request-response pair.
#[cfg_attr(
feature = "serde",
serde(bound = "N::PooledTransaction: serde::Serialize + serde::de::DeserializeOwned")
)]
PooledTransactions(RequestPair<PooledTransactions<N::PooledTransaction>>),
/// Represents a `GetNodeData` request-response pair.
GetNodeData(RequestPair<GetNodeData>),
/// Represents a `NodeData` request-response pair.
NodeData(RequestPair<NodeData>),
/// Represents a `GetReceipts` request-response pair.
GetReceipts(RequestPair<GetReceipts>),
/// Represents a Receipts request-response pair.
#[cfg_attr(
feature = "serde",
serde(bound = "N::Receipt: serde::Serialize + serde::de::DeserializeOwned")
)]
Receipts(RequestPair<Receipts<N::Receipt>>),
/// Represents a Receipts request-response pair for eth/69.
#[cfg_attr(
feature = "serde",
serde(bound = "N::Receipt: serde::Serialize + serde::de::DeserializeOwned")
)]
Receipts69(RequestPair<Receipts69<N::Receipt>>),
/// Represents a `BlockRangeUpdate` message broadcast to the network.
#[cfg_attr(
feature = "serde",
serde(bound = "N::BroadcastedTransaction: serde::Serialize + serde::de::DeserializeOwned")
)]
BlockRangeUpdate(BlockRangeUpdate),
/// Represents an encoded message that doesn't match any other variant
Other(RawCapabilityMessage),
}
impl<N: NetworkPrimitives> EthMessage<N> {
/// Returns the message's ID.
pub const fn message_id(&self) -> EthMessageID {
match self {
Self::Status(_) => EthMessageID::Status,
Self::NewBlockHashes(_) => EthMessageID::NewBlockHashes,
Self::NewBlock(_) => EthMessageID::NewBlock,
Self::Transactions(_) => EthMessageID::Transactions,
Self::NewPooledTransactionHashes66(_) | Self::NewPooledTransactionHashes68(_) => {
EthMessageID::NewPooledTransactionHashes
}
Self::GetBlockHeaders(_) => EthMessageID::GetBlockHeaders,
Self::BlockHeaders(_) => EthMessageID::BlockHeaders,
Self::GetBlockBodies(_) => EthMessageID::GetBlockBodies,
Self::BlockBodies(_) => EthMessageID::BlockBodies,
Self::GetPooledTransactions(_) => EthMessageID::GetPooledTransactions,
Self::PooledTransactions(_) => EthMessageID::PooledTransactions,
Self::GetNodeData(_) => EthMessageID::GetNodeData,
Self::NodeData(_) => EthMessageID::NodeData,
Self::GetReceipts(_) => EthMessageID::GetReceipts,
Self::Receipts(_) | Self::Receipts69(_) => EthMessageID::Receipts,
Self::BlockRangeUpdate(_) => EthMessageID::BlockRangeUpdate,
Self::Other(msg) => EthMessageID::Other(msg.id as u8),
}
}
/// Returns true if the message variant is a request.
pub const fn is_request(&self) -> bool {
matches!(
self,
Self::GetBlockBodies(_) |
Self::GetBlockHeaders(_) |
Self::GetReceipts(_) |
Self::GetPooledTransactions(_) |
Self::GetNodeData(_)
)
}
/// Returns true if the message variant is a response to a request.
pub const fn is_response(&self) -> bool {
matches!(
self,
Self::PooledTransactions(_) |
Self::Receipts(_) |
Self::Receipts69(_) |
Self::BlockHeaders(_) |
Self::BlockBodies(_) |
Self::NodeData(_)
)
}
}
impl<N: NetworkPrimitives> Encodable for EthMessage<N> {
fn encode(&self, out: &mut dyn BufMut) {
match self {
Self::Status(status) => status.encode(out),
Self::NewBlockHashes(new_block_hashes) => new_block_hashes.encode(out),
Self::NewBlock(new_block) => new_block.encode(out),
Self::Transactions(transactions) => transactions.encode(out),
Self::NewPooledTransactionHashes66(hashes) => hashes.encode(out),
Self::NewPooledTransactionHashes68(hashes) => hashes.encode(out),
Self::GetBlockHeaders(request) => request.encode(out),
Self::BlockHeaders(headers) => headers.encode(out),
Self::GetBlockBodies(request) => request.encode(out),
Self::BlockBodies(bodies) => bodies.encode(out),
Self::GetPooledTransactions(request) => request.encode(out),
Self::PooledTransactions(transactions) => transactions.encode(out),
Self::GetNodeData(request) => request.encode(out),
Self::NodeData(data) => data.encode(out),
Self::GetReceipts(request) => request.encode(out),
Self::Receipts(receipts) => receipts.encode(out),
Self::Receipts69(receipt69) => receipt69.encode(out),
Self::BlockRangeUpdate(block_range_update) => block_range_update.encode(out),
Self::Other(unknown) => out.put_slice(&unknown.payload),
}
}
fn length(&self) -> usize {
match self {
Self::Status(status) => status.length(),
Self::NewBlockHashes(new_block_hashes) => new_block_hashes.length(),
Self::NewBlock(new_block) => new_block.length(),
Self::Transactions(transactions) => transactions.length(),
Self::NewPooledTransactionHashes66(hashes) => hashes.length(),
Self::NewPooledTransactionHashes68(hashes) => hashes.length(),
Self::GetBlockHeaders(request) => request.length(),
Self::BlockHeaders(headers) => headers.length(),
Self::GetBlockBodies(request) => request.length(),
Self::BlockBodies(bodies) => bodies.length(),
Self::GetPooledTransactions(request) => request.length(),
Self::PooledTransactions(transactions) => transactions.length(),
Self::GetNodeData(request) => request.length(),
Self::NodeData(data) => data.length(),
Self::GetReceipts(request) => request.length(),
Self::Receipts(receipts) => receipts.length(),
Self::Receipts69(receipt69) => receipt69.length(),
Self::BlockRangeUpdate(block_range_update) => block_range_update.length(),
Self::Other(unknown) => unknown.length(),
}
}
}
/// Represents broadcast messages of [`EthMessage`] with the same object that can be sent to
/// multiple peers.
///
/// Messages that contain a list of hashes depend on the peer the message is sent to. A peer should
/// never receive a hash of an object (block, transaction) it has already seen.
///
/// Note: This is only useful for outgoing messages.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum EthBroadcastMessage<N: NetworkPrimitives = EthNetworkPrimitives> {
/// Represents a new block broadcast message.
NewBlock(Arc<N::NewBlockPayload>),
/// Represents a transactions broadcast message.
Transactions(SharedTransactions<N::BroadcastedTransaction>),
}
// === impl EthBroadcastMessage ===
impl<N: NetworkPrimitives> EthBroadcastMessage<N> {
/// Returns the message's ID.
pub const fn message_id(&self) -> EthMessageID {
match self {
Self::NewBlock(_) => EthMessageID::NewBlock,
Self::Transactions(_) => EthMessageID::Transactions,
}
}
}
impl<N: NetworkPrimitives> Encodable for EthBroadcastMessage<N> {
fn encode(&self, out: &mut dyn BufMut) {
match self {
Self::NewBlock(new_block) => new_block.encode(out),
Self::Transactions(transactions) => transactions.encode(out),
}
}
fn length(&self) -> usize {
match self {
Self::NewBlock(new_block) => new_block.length(),
Self::Transactions(transactions) => transactions.length(),
}
}
}
/// Represents message IDs for eth protocol messages.
#[repr(u8)]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum EthMessageID {
/// Status message.
Status = 0x00,
/// New block hashes message.
NewBlockHashes = 0x01,
/// Transactions message.
Transactions = 0x02,
/// Get block headers message.
GetBlockHeaders = 0x03,
/// Block headers message.
BlockHeaders = 0x04,
/// Get block bodies message.
GetBlockBodies = 0x05,
/// Block bodies message.
BlockBodies = 0x06,
/// New block message.
NewBlock = 0x07,
/// New pooled transaction hashes message.
NewPooledTransactionHashes = 0x08,
/// Requests pooled transactions.
GetPooledTransactions = 0x09,
/// Represents pooled transactions.
PooledTransactions = 0x0a,
/// Requests node data.
GetNodeData = 0x0d,
/// Represents node data.
NodeData = 0x0e,
/// Requests receipts.
GetReceipts = 0x0f,
/// Represents receipts.
Receipts = 0x10,
/// Block range update.
///
/// Introduced in Eth69
BlockRangeUpdate = 0x11,
/// Represents unknown message types.
Other(u8),
}
impl EthMessageID {
/// Returns the corresponding `u8` value for an `EthMessageID`.
pub const fn to_u8(&self) -> u8 {
match self {
Self::Status => 0x00,
Self::NewBlockHashes => 0x01,
Self::Transactions => 0x02,
Self::GetBlockHeaders => 0x03,
Self::BlockHeaders => 0x04,
Self::GetBlockBodies => 0x05,
Self::BlockBodies => 0x06,
Self::NewBlock => 0x07,
Self::NewPooledTransactionHashes => 0x08,
Self::GetPooledTransactions => 0x09,
Self::PooledTransactions => 0x0a,
Self::GetNodeData => 0x0d,
Self::NodeData => 0x0e,
Self::GetReceipts => 0x0f,
Self::Receipts => 0x10,
Self::BlockRangeUpdate => 0x11,
Self::Other(value) => *value, // Return the stored `u8`
}
}
/// Returns the max value for the given version.
pub const fn max(version: EthVersion) -> u8 {
if version.is_eth69() {
Self::BlockRangeUpdate.to_u8()
} else {
Self::Receipts.to_u8()
}
}
/// Returns the total number of message types for the given version.
///
/// This is used for message ID multiplexing.
///
/// <https://github.com/ethereum/go-ethereum/blob/85077be58edea572f29c3b1a6a055077f1a56a8b/eth/protocols/eth/protocol.go#L45-L47>
pub const fn message_count(version: EthVersion) -> u8 {
Self::max(version) + 1
}
}
impl Encodable for EthMessageID {
fn encode(&self, out: &mut dyn BufMut) {
out.put_u8(self.to_u8());
}
fn length(&self) -> usize {
1
}
}
impl Decodable for EthMessageID {
fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
let id = match buf.first().ok_or(alloy_rlp::Error::InputTooShort)? {
0x00 => Self::Status,
0x01 => Self::NewBlockHashes,
0x02 => Self::Transactions,
0x03 => Self::GetBlockHeaders,
0x04 => Self::BlockHeaders,
0x05 => Self::GetBlockBodies,
0x06 => Self::BlockBodies,
0x07 => Self::NewBlock,
0x08 => Self::NewPooledTransactionHashes,
0x09 => Self::GetPooledTransactions,
0x0a => Self::PooledTransactions,
0x0d => Self::GetNodeData,
0x0e => Self::NodeData,
0x0f => Self::GetReceipts,
0x10 => Self::Receipts,
0x11 => Self::BlockRangeUpdate,
unknown => Self::Other(*unknown),
};
buf.advance(1);
Ok(id)
}
}
impl TryFrom<usize> for EthMessageID {
type Error = &'static str;
fn try_from(value: usize) -> Result<Self, Self::Error> {
match value {
0x00 => Ok(Self::Status),
0x01 => Ok(Self::NewBlockHashes),
0x02 => Ok(Self::Transactions),
0x03 => Ok(Self::GetBlockHeaders),
0x04 => Ok(Self::BlockHeaders),
0x05 => Ok(Self::GetBlockBodies),
0x06 => Ok(Self::BlockBodies),
0x07 => Ok(Self::NewBlock),
0x08 => Ok(Self::NewPooledTransactionHashes),
0x09 => Ok(Self::GetPooledTransactions),
0x0a => Ok(Self::PooledTransactions),
0x0d => Ok(Self::GetNodeData),
0x0e => Ok(Self::NodeData),
0x0f => Ok(Self::GetReceipts),
0x10 => Ok(Self::Receipts),
0x11 => Ok(Self::BlockRangeUpdate),
_ => Err("Invalid message ID"),
}
}
}
/// This is used for all request-response style `eth` protocol messages.
/// This can represent either a request or a response, since both include a message payload and
/// request id.
#[derive(Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))]
pub struct RequestPair<T> {
/// id for the contained request or response message
pub request_id: u64,
/// the request or response message payload
pub message: T,
}
impl<T> RequestPair<T> {
/// Converts the message type with the given closure.
pub fn map<F, R>(self, f: F) -> RequestPair<R>
where
F: FnOnce(T) -> R,
{
let Self { request_id, message } = self;
RequestPair { request_id, message: f(message) }
}
}
/// Allows messages with request ids to be serialized into RLP bytes.
impl<T> Encodable for RequestPair<T>
where
T: Encodable,
{
fn encode(&self, out: &mut dyn alloy_rlp::BufMut) {
let header =
Header { list: true, payload_length: self.request_id.length() + self.message.length() };
header.encode(out);
self.request_id.encode(out);
self.message.encode(out);
}
fn length(&self) -> usize {
let mut length = 0;
length += self.request_id.length();
length += self.message.length();
length += length_of_length(length);
length
}
}
/// Allows messages with request ids to be deserialized into RLP bytes.
impl<T> Decodable for RequestPair<T>
where
T: Decodable,
{
fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
let header = Header::decode(buf)?;
let initial_length = buf.len();
let request_id = u64::decode(buf)?;
let message = T::decode(buf)?;
// Check that the buffer consumed exactly payload_length bytes after decoding the
// RequestPair
let consumed_len = initial_length - buf.len();
if consumed_len != header.payload_length {
return Err(alloy_rlp::Error::UnexpectedLength)
}
Ok(Self { request_id, message })
}
}
#[cfg(test)]
mod tests {
use super::MessageError;
use crate::{
message::RequestPair, EthMessage, EthMessageID, EthNetworkPrimitives, EthVersion,
GetNodeData, NodeData, ProtocolMessage, RawCapabilityMessage,
};
use alloy_primitives::hex;
use alloy_rlp::{Decodable, Encodable, Error};
use reth_ethereum_primitives::BlockBody;
fn encode<T: Encodable>(value: T) -> Vec<u8> {
let mut buf = vec![];
value.encode(&mut buf);
buf
}
#[test]
fn test_removed_message_at_eth67() {
let get_node_data = EthMessage::<EthNetworkPrimitives>::GetNodeData(RequestPair {
request_id: 1337,
message: GetNodeData(vec![]),
});
let buf = encode(ProtocolMessage {
message_type: EthMessageID::GetNodeData,
message: get_node_data,
});
let msg = ProtocolMessage::<EthNetworkPrimitives>::decode_message(
crate::EthVersion::Eth67,
&mut &buf[..],
);
assert!(matches!(msg, Err(MessageError::Invalid(..))));
let node_data = EthMessage::<EthNetworkPrimitives>::NodeData(RequestPair {
request_id: 1337,
message: NodeData(vec![]),
});
let buf =
encode(ProtocolMessage { message_type: EthMessageID::NodeData, message: node_data });
let msg = ProtocolMessage::<EthNetworkPrimitives>::decode_message(
crate::EthVersion::Eth67,
&mut &buf[..],
);
assert!(matches!(msg, Err(MessageError::Invalid(..))));
}
#[test]
fn test_get_node_data_disabled_for_privacy() {
// Test that GetNodeData is rejected in ETH66 to prevent privacy leaks
let get_node_data = EthMessage::<EthNetworkPrimitives>::GetNodeData(RequestPair {
request_id: 1337,
message: GetNodeData(vec![]),
});
let buf = encode(ProtocolMessage {
message_type: EthMessageID::GetNodeData,
message: get_node_data,
});
let msg = ProtocolMessage::<EthNetworkPrimitives>::decode_message(
crate::EthVersion::Eth66,
&mut &buf[..],
);
assert!(matches!(msg, Err(MessageError::Invalid(..))));
// Test that NodeData is also rejected in ETH66
let node_data = EthMessage::<EthNetworkPrimitives>::NodeData(RequestPair {
request_id: 1337,
message: NodeData(vec![]),
});
let buf =
encode(ProtocolMessage { message_type: EthMessageID::NodeData, message: node_data });
let msg = ProtocolMessage::<EthNetworkPrimitives>::decode_message(
crate::EthVersion::Eth66,
&mut &buf[..],
);
assert!(matches!(msg, Err(MessageError::Invalid(..))));
}
#[test]
fn request_pair_encode() {
let request_pair = RequestPair { request_id: 1337, message: vec![5u8] };
// c5: start of list (c0) + len(full_list) (length is <55 bytes)
// 82: 0x80 + len(1337)
// 05 39: 1337 (request_id)
// === full_list ===
// c1: start of list (c0) + len(list) (length is <55 bytes)
// 05: 5 (message)
let expected = hex!("c5820539c105");
let got = encode(request_pair);
assert_eq!(expected[..], got, "expected: {expected:X?}, got: {got:X?}",);
}
#[test]
fn request_pair_decode() {
let raw_pair = &hex!("c5820539c105")[..];
let expected = RequestPair { request_id: 1337, message: vec![5u8] };
let got = RequestPair::<Vec<u8>>::decode(&mut &*raw_pair).unwrap();
assert_eq!(expected.length(), raw_pair.len());
assert_eq!(expected, got);
}
#[test]
fn malicious_request_pair_decode() {
// A maliciously encoded request pair, where the len(full_list) is 5, but it
// actually consumes 6 bytes when decoding
//
// c5: start of list (c0) + len(full_list) (length is <55 bytes)
// 82: 0x80 + len(1337)
// 05 39: 1337 (request_id)
// === full_list ===
// c2: start of list (c0) + len(list) (length is <55 bytes)
// 05 05: 5 5(message)
let raw_pair = &hex!("c5820539c20505")[..];
let result = RequestPair::<Vec<u8>>::decode(&mut &*raw_pair);
assert!(matches!(result, Err(Error::UnexpectedLength)));
}
#[test]
fn empty_block_bodies_protocol() {
let empty_block_bodies =
ProtocolMessage::from(EthMessage::<EthNetworkPrimitives>::BlockBodies(RequestPair {
request_id: 0,
message: Default::default(),
}));
let mut buf = Vec::new();
empty_block_bodies.encode(&mut buf);
let decoded =
ProtocolMessage::decode_message(EthVersion::Eth68, &mut buf.as_slice()).unwrap();
assert_eq!(empty_block_bodies, decoded);
}
#[test]
fn empty_block_body_protocol() {
let empty_block_bodies =
ProtocolMessage::from(EthMessage::<EthNetworkPrimitives>::BlockBodies(RequestPair {
request_id: 0,
message: vec![BlockBody {
transactions: vec![],
ommers: vec![],
withdrawals: Some(Default::default()),
}]
.into(),
}));
let mut buf = Vec::new();
empty_block_bodies.encode(&mut buf);
let decoded =
ProtocolMessage::decode_message(EthVersion::Eth68, &mut buf.as_slice()).unwrap();
assert_eq!(empty_block_bodies, decoded);
}
#[test]
fn decode_block_bodies_message() {
let buf = hex!("06c48199c1c0");
let msg = ProtocolMessage::<EthNetworkPrimitives>::decode_message(
EthVersion::Eth68,
&mut &buf[..],
)
.unwrap_err();
assert!(matches!(msg, MessageError::RlpError(alloy_rlp::Error::InputTooShort)));
}
#[test]
fn custom_message_roundtrip() {
let custom_payload = vec![1, 2, 3, 4, 5];
let custom_message = RawCapabilityMessage::new(0x20, custom_payload.into());
let protocol_message = ProtocolMessage::<EthNetworkPrimitives> {
message_type: EthMessageID::Other(0x20),
message: EthMessage::Other(custom_message),
};
let encoded = encode(protocol_message.clone());
let decoded = ProtocolMessage::<EthNetworkPrimitives>::decode_message(
EthVersion::Eth68,
&mut &encoded[..],
)
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | true |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/eth-wire-types/src/snap.rs | crates/net/eth-wire-types/src/snap.rs | //! Implements Ethereum SNAP message types.
//! Snap protocol runs on top of `RLPx`
//! facilitating the exchange of Ethereum state snapshots between peers
//! Reference: [Ethereum Snapshot Protocol](https://github.com/ethereum/devp2p/blob/master/caps/snap.md#protocol-messages)
//!
//! Current version: snap/1
use alloc::vec::Vec;
use alloy_primitives::{Bytes, B256};
use alloy_rlp::{Decodable, Encodable, RlpDecodable, RlpEncodable};
use reth_codecs_derive::add_arbitrary_tests;
/// Message IDs for the snap sync protocol
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum SnapMessageId {
/// Requests of an unknown number of accounts from a given account trie.
GetAccountRange = 0x00,
/// Response with the number of consecutive accounts and the Merkle proofs for the entire
/// range.
AccountRange = 0x01,
/// Requests for the storage slots of multiple accounts' storage tries.
GetStorageRanges = 0x02,
/// Response for the number of consecutive storage slots for the requested account.
StorageRanges = 0x03,
/// Request of the number of contract byte-codes by hash.
GetByteCodes = 0x04,
/// Response for the number of requested contract codes.
ByteCodes = 0x05,
/// Request of the number of state (either account or storage) Merkle trie nodes by path.
GetTrieNodes = 0x06,
/// Response for the number of requested state trie nodes.
TrieNodes = 0x07,
}
/// Request for a range of accounts from the state trie.
// https://github.com/ethereum/devp2p/blob/master/caps/snap.md#getaccountrange-0x00
#[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)]
#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))]
#[add_arbitrary_tests(rlp)]
pub struct GetAccountRangeMessage {
/// Request ID to match up responses with
pub request_id: u64,
/// Root hash of the account trie to serve
pub root_hash: B256,
/// Account hash of the first to retrieve
pub starting_hash: B256,
/// Account hash after which to stop serving data
pub limit_hash: B256,
/// Soft limit at which to stop returning data
pub response_bytes: u64,
}
/// Account data in the response.
#[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)]
#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))]
#[add_arbitrary_tests(rlp)]
pub struct AccountData {
/// Hash of the account address (trie path)
pub hash: B256,
/// Account body in slim format
pub body: Bytes,
}
/// Response containing a number of consecutive accounts and the Merkle proofs for the entire range.
// http://github.com/ethereum/devp2p/blob/master/caps/snap.md#accountrange-0x01
#[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)]
#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))]
#[add_arbitrary_tests(rlp)]
pub struct AccountRangeMessage {
/// ID of the request this is a response for
pub request_id: u64,
/// List of consecutive accounts from the trie
pub accounts: Vec<AccountData>,
/// List of trie nodes proving the account range
pub proof: Vec<Bytes>,
}
/// Request for the storage slots of multiple accounts' storage tries.
// https://github.com/ethereum/devp2p/blob/master/caps/snap.md#getstorageranges-0x02
#[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)]
#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))]
#[add_arbitrary_tests(rlp)]
pub struct GetStorageRangesMessage {
/// Request ID to match up responses with
pub request_id: u64,
/// Root hash of the account trie to serve
pub root_hash: B256,
/// Account hashes of the storage tries to serve
pub account_hashes: Vec<B256>,
/// Storage slot hash of the first to retrieve
pub starting_hash: B256,
/// Storage slot hash after which to stop serving
pub limit_hash: B256,
/// Soft limit at which to stop returning data
pub response_bytes: u64,
}
/// Storage slot data in the response.
#[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)]
#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))]
#[add_arbitrary_tests(rlp)]
pub struct StorageData {
/// Hash of the storage slot key (trie path)
pub hash: B256,
/// Data content of the slot
pub data: Bytes,
}
/// Response containing a number of consecutive storage slots for the requested account
/// and optionally the merkle proofs for the last range (boundary proofs) if it only partially
/// covers the storage trie.
// https://github.com/ethereum/devp2p/blob/master/caps/snap.md#storageranges-0x03
#[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)]
#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))]
#[add_arbitrary_tests(rlp)]
pub struct StorageRangesMessage {
/// ID of the request this is a response for
pub request_id: u64,
/// List of list of consecutive slots from the trie (one list per account)
pub slots: Vec<Vec<StorageData>>,
/// List of trie nodes proving the slot range (if partial)
pub proof: Vec<Bytes>,
}
/// Request to get a number of requested contract codes.
// https://github.com/ethereum/devp2p/blob/master/caps/snap.md#getbytecodes-0x04
#[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)]
#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))]
#[add_arbitrary_tests(rlp)]
pub struct GetByteCodesMessage {
/// Request ID to match up responses with
pub request_id: u64,
/// Code hashes to retrieve the code for
pub hashes: Vec<B256>,
/// Soft limit at which to stop returning data (in bytes)
pub response_bytes: u64,
}
/// Response containing a number of requested contract codes.
// https://github.com/ethereum/devp2p/blob/master/caps/snap.md#bytecodes-0x05
#[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)]
#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))]
#[add_arbitrary_tests(rlp)]
pub struct ByteCodesMessage {
/// ID of the request this is a response for
pub request_id: u64,
/// The requested bytecodes in order
pub codes: Vec<Bytes>,
}
/// Path in the trie for an account and its storage
#[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)]
#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))]
#[add_arbitrary_tests(rlp)]
pub struct TriePath {
/// Path in the account trie
pub account_path: Bytes,
/// Paths in the storage trie
pub slot_paths: Vec<Bytes>,
}
/// Request a number of state (either account or storage) Merkle trie nodes by path
// https://github.com/ethereum/devp2p/blob/master/caps/snap.md#gettrienodes-0x06
#[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)]
#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))]
#[add_arbitrary_tests(rlp)]
pub struct GetTrieNodesMessage {
/// Request ID to match up responses with
pub request_id: u64,
/// Root hash of the account trie to serve
pub root_hash: B256,
/// Trie paths to retrieve the nodes for, grouped by account
pub paths: Vec<TriePath>,
/// Soft limit at which to stop returning data (in bytes)
pub response_bytes: u64,
}
/// Response containing a number of requested state trie nodes
// https://github.com/ethereum/devp2p/blob/master/caps/snap.md#trienodes-0x07
#[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)]
#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))]
#[add_arbitrary_tests(rlp)]
pub struct TrieNodesMessage {
/// ID of the request this is a response for
pub request_id: u64,
/// The requested trie nodes in order
pub nodes: Vec<Bytes>,
}
/// Represents all types of messages in the snap sync protocol.
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum SnapProtocolMessage {
/// Request for an account range - see [`GetAccountRangeMessage`]
GetAccountRange(GetAccountRangeMessage),
/// Response with accounts and proofs - see [`AccountRangeMessage`]
AccountRange(AccountRangeMessage),
/// Request for storage slots - see [`GetStorageRangesMessage`]
GetStorageRanges(GetStorageRangesMessage),
/// Response with storage slots - see [`StorageRangesMessage`]
StorageRanges(StorageRangesMessage),
/// Request for contract bytecodes - see [`GetByteCodesMessage`]
GetByteCodes(GetByteCodesMessage),
/// Response with contract codes - see [`ByteCodesMessage`]
ByteCodes(ByteCodesMessage),
/// Request for trie nodes - see [`GetTrieNodesMessage`]
GetTrieNodes(GetTrieNodesMessage),
/// Response with trie nodes - see [`TrieNodesMessage`]
TrieNodes(TrieNodesMessage),
}
impl SnapProtocolMessage {
/// Returns the protocol message ID for this message type.
///
/// The message ID is used in the `RLPx` protocol to identify different types of messages.
pub const fn message_id(&self) -> SnapMessageId {
match self {
Self::GetAccountRange(_) => SnapMessageId::GetAccountRange,
Self::AccountRange(_) => SnapMessageId::AccountRange,
Self::GetStorageRanges(_) => SnapMessageId::GetStorageRanges,
Self::StorageRanges(_) => SnapMessageId::StorageRanges,
Self::GetByteCodes(_) => SnapMessageId::GetByteCodes,
Self::ByteCodes(_) => SnapMessageId::ByteCodes,
Self::GetTrieNodes(_) => SnapMessageId::GetTrieNodes,
Self::TrieNodes(_) => SnapMessageId::TrieNodes,
}
}
/// Encode the message to bytes
pub fn encode(&self) -> Bytes {
let mut buf = Vec::new();
// Add message ID as first byte
buf.push(self.message_id() as u8);
// Encode the message body based on its type
match self {
Self::GetAccountRange(msg) => msg.encode(&mut buf),
Self::AccountRange(msg) => msg.encode(&mut buf),
Self::GetStorageRanges(msg) => msg.encode(&mut buf),
Self::StorageRanges(msg) => msg.encode(&mut buf),
Self::GetByteCodes(msg) => msg.encode(&mut buf),
Self::ByteCodes(msg) => msg.encode(&mut buf),
Self::GetTrieNodes(msg) => msg.encode(&mut buf),
Self::TrieNodes(msg) => msg.encode(&mut buf),
}
Bytes::from(buf)
}
/// Decodes a SNAP protocol message from its message ID and RLP-encoded body.
pub fn decode(message_id: u8, buf: &mut &[u8]) -> Result<Self, alloy_rlp::Error> {
// Decoding protocol message variants based on message ID
macro_rules! decode_snap_message_variant {
($message_id:expr, $buf:expr, $id:expr, $variant:ident, $msg_type:ty) => {
if $message_id == $id as u8 {
return Ok(Self::$variant(<$msg_type>::decode($buf)?));
}
};
}
// Try to decode each message type based on the message ID
decode_snap_message_variant!(
message_id,
buf,
SnapMessageId::GetAccountRange,
GetAccountRange,
GetAccountRangeMessage
);
decode_snap_message_variant!(
message_id,
buf,
SnapMessageId::AccountRange,
AccountRange,
AccountRangeMessage
);
decode_snap_message_variant!(
message_id,
buf,
SnapMessageId::GetStorageRanges,
GetStorageRanges,
GetStorageRangesMessage
);
decode_snap_message_variant!(
message_id,
buf,
SnapMessageId::StorageRanges,
StorageRanges,
StorageRangesMessage
);
decode_snap_message_variant!(
message_id,
buf,
SnapMessageId::GetByteCodes,
GetByteCodes,
GetByteCodesMessage
);
decode_snap_message_variant!(
message_id,
buf,
SnapMessageId::ByteCodes,
ByteCodes,
ByteCodesMessage
);
decode_snap_message_variant!(
message_id,
buf,
SnapMessageId::GetTrieNodes,
GetTrieNodes,
GetTrieNodesMessage
);
decode_snap_message_variant!(
message_id,
buf,
SnapMessageId::TrieNodes,
TrieNodes,
TrieNodesMessage
);
Err(alloy_rlp::Error::Custom("Unknown message ID"))
}
}
#[cfg(test)]
mod tests {
use super::*;
// Helper function to create a B256 from a u64 for testing
fn b256_from_u64(value: u64) -> B256 {
B256::left_padding_from(&value.to_be_bytes())
}
// Helper function to test roundtrip encoding/decoding
fn test_roundtrip(original: SnapProtocolMessage) {
let encoded = original.encode();
// Verify the first byte matches the expected message ID
assert_eq!(encoded[0], original.message_id() as u8);
let mut buf = &encoded[1..];
let decoded = SnapProtocolMessage::decode(encoded[0], &mut buf).unwrap();
// Verify the match
assert_eq!(decoded, original);
}
#[test]
fn test_all_message_roundtrips() {
test_roundtrip(SnapProtocolMessage::GetAccountRange(GetAccountRangeMessage {
request_id: 42,
root_hash: b256_from_u64(123),
starting_hash: b256_from_u64(456),
limit_hash: b256_from_u64(789),
response_bytes: 1024,
}));
test_roundtrip(SnapProtocolMessage::AccountRange(AccountRangeMessage {
request_id: 42,
accounts: vec![AccountData {
hash: b256_from_u64(123),
body: Bytes::from(vec![1, 2, 3]),
}],
proof: vec![Bytes::from(vec![4, 5, 6])],
}));
test_roundtrip(SnapProtocolMessage::GetStorageRanges(GetStorageRangesMessage {
request_id: 42,
root_hash: b256_from_u64(123),
account_hashes: vec![b256_from_u64(456)],
starting_hash: b256_from_u64(789),
limit_hash: b256_from_u64(101112),
response_bytes: 2048,
}));
test_roundtrip(SnapProtocolMessage::StorageRanges(StorageRangesMessage {
request_id: 42,
slots: vec![vec![StorageData {
hash: b256_from_u64(123),
data: Bytes::from(vec![1, 2, 3]),
}]],
proof: vec![Bytes::from(vec![4, 5, 6])],
}));
test_roundtrip(SnapProtocolMessage::GetByteCodes(GetByteCodesMessage {
request_id: 42,
hashes: vec![b256_from_u64(123)],
response_bytes: 1024,
}));
test_roundtrip(SnapProtocolMessage::ByteCodes(ByteCodesMessage {
request_id: 42,
codes: vec![Bytes::from(vec![1, 2, 3])],
}));
test_roundtrip(SnapProtocolMessage::GetTrieNodes(GetTrieNodesMessage {
request_id: 42,
root_hash: b256_from_u64(123),
paths: vec![TriePath {
account_path: Bytes::from(vec![1, 2, 3]),
slot_paths: vec![Bytes::from(vec![4, 5, 6])],
}],
response_bytes: 1024,
}));
test_roundtrip(SnapProtocolMessage::TrieNodes(TrieNodesMessage {
request_id: 42,
nodes: vec![Bytes::from(vec![1, 2, 3])],
}));
}
#[test]
fn test_unknown_message_id() {
// Create some random data
let data = Bytes::from(vec![1, 2, 3, 4]);
let mut buf = data.as_ref();
// Try to decode with an invalid message ID
let result = SnapProtocolMessage::decode(255, &mut buf);
assert!(result.is_err());
if let Err(e) = result {
assert_eq!(e.to_string(), "Unknown message ID");
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/peers/src/node_record.rs | crates/net/peers/src/node_record.rs | //! Commonly used `NodeRecord` type for peers.
use crate::PeerId;
use alloc::{
format,
string::{String, ToString},
};
use alloy_rlp::{RlpDecodable, RlpEncodable};
use core::{
fmt,
fmt::Write,
net::{IpAddr, Ipv4Addr, SocketAddr},
num::ParseIntError,
str::FromStr,
};
use serde_with::{DeserializeFromStr, SerializeDisplay};
#[cfg(feature = "secp256k1")]
use enr::Enr;
/// Represents an ENR in discovery.
///
/// Note: this is only an excerpt of the [`NodeRecord`] data structure.
#[derive(
Clone,
Copy,
Debug,
Eq,
PartialEq,
Hash,
SerializeDisplay,
DeserializeFromStr,
RlpEncodable,
RlpDecodable,
)]
pub struct NodeRecord {
/// The Address of a node.
pub address: IpAddr,
/// UDP discovery port.
pub udp_port: u16,
/// TCP port of the port that accepts connections.
pub tcp_port: u16,
/// Public key of the discovery service
pub id: PeerId,
}
impl NodeRecord {
/// Derive the [`NodeRecord`] from the secret key and addr.
///
/// Note: this will set both the TCP and UDP ports to the port of the addr.
#[cfg(feature = "secp256k1")]
pub fn from_secret_key(addr: SocketAddr, sk: &secp256k1::SecretKey) -> Self {
let pk = secp256k1::PublicKey::from_secret_key(secp256k1::SECP256K1, sk);
let id = PeerId::from_slice(&pk.serialize_uncompressed()[1..]);
Self::new(addr, id)
}
/// Converts the `address` into an [`Ipv4Addr`] if the `address` is a mapped
/// [`Ipv6Addr`](std::net::Ipv6Addr).
///
/// Returns `true` if the address was converted.
///
/// See also [`std::net::Ipv6Addr::to_ipv4_mapped`]
pub fn convert_ipv4_mapped(&mut self) -> bool {
// convert IPv4 mapped IPv6 address
if let IpAddr::V6(v6) = self.address {
if let Some(v4) = v6.to_ipv4_mapped() {
self.address = v4.into();
return true
}
}
false
}
/// Same as [`Self::convert_ipv4_mapped`] but consumes the type
pub fn into_ipv4_mapped(mut self) -> Self {
self.convert_ipv4_mapped();
self
}
/// Sets the tcp port
pub const fn with_tcp_port(mut self, port: u16) -> Self {
self.tcp_port = port;
self
}
/// Sets the udp port
pub const fn with_udp_port(mut self, port: u16) -> Self {
self.udp_port = port;
self
}
/// Creates a new record from a socket addr and peer id.
pub const fn new(addr: SocketAddr, id: PeerId) -> Self {
Self { address: addr.ip(), tcp_port: addr.port(), udp_port: addr.port(), id }
}
/// Creates a new record from an ip address and ports.
pub fn new_with_ports(
ip_addr: IpAddr,
tcp_port: u16,
udp_port: Option<u16>,
id: PeerId,
) -> Self {
let udp_port = udp_port.unwrap_or(tcp_port);
Self { address: ip_addr, tcp_port, udp_port, id }
}
/// The TCP socket address of this node
#[must_use]
pub const fn tcp_addr(&self) -> SocketAddr {
SocketAddr::new(self.address, self.tcp_port)
}
/// The UDP socket address of this node
#[must_use]
pub const fn udp_addr(&self) -> SocketAddr {
SocketAddr::new(self.address, self.udp_port)
}
}
impl fmt::Display for NodeRecord {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("enode://")?;
alloy_primitives::hex::encode(self.id.as_slice()).fmt(f)?;
f.write_char('@')?;
match self.address {
IpAddr::V4(ip) => {
ip.fmt(f)?;
}
IpAddr::V6(ip) => {
// encapsulate with brackets
f.write_char('[')?;
ip.fmt(f)?;
f.write_char(']')?;
}
}
f.write_char(':')?;
self.tcp_port.fmt(f)?;
if self.tcp_port != self.udp_port {
f.write_str("?discport=")?;
self.udp_port.fmt(f)?;
}
Ok(())
}
}
/// Possible error types when parsing a [`NodeRecord`]
#[derive(Debug, thiserror::Error)]
pub enum NodeRecordParseError {
/// Invalid url
#[error("Failed to parse url: {0}")]
InvalidUrl(String),
/// Invalid id
#[error("Failed to parse id")]
InvalidId(String),
/// Invalid discport
#[error("Failed to discport query: {0}")]
Discport(ParseIntError),
}
impl FromStr for NodeRecord {
type Err = NodeRecordParseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
use url::{Host, Url};
let url = Url::parse(s).map_err(|e| NodeRecordParseError::InvalidUrl(e.to_string()))?;
let address = match url.host() {
Some(Host::Ipv4(ip)) => IpAddr::V4(ip),
Some(Host::Ipv6(ip)) => IpAddr::V6(ip),
Some(Host::Domain(ip)) => IpAddr::V4(
Ipv4Addr::from_str(ip)
.map_err(|e| NodeRecordParseError::InvalidUrl(e.to_string()))?,
),
_ => return Err(NodeRecordParseError::InvalidUrl(format!("invalid host: {url:?}"))),
};
let port = url
.port()
.ok_or_else(|| NodeRecordParseError::InvalidUrl("no port specified".to_string()))?;
let udp_port = if let Some(discovery_port) = url
.query_pairs()
.find_map(|(maybe_disc, port)| (maybe_disc.as_ref() == "discport").then_some(port))
{
discovery_port.parse::<u16>().map_err(NodeRecordParseError::Discport)?
} else {
port
};
let id = url
.username()
.parse::<PeerId>()
.map_err(|e| NodeRecordParseError::InvalidId(e.to_string()))?;
Ok(Self { address, id, tcp_port: port, udp_port })
}
}
#[cfg(feature = "secp256k1")]
impl TryFrom<Enr<secp256k1::SecretKey>> for NodeRecord {
type Error = NodeRecordParseError;
fn try_from(enr: Enr<secp256k1::SecretKey>) -> Result<Self, Self::Error> {
(&enr).try_into()
}
}
#[cfg(feature = "secp256k1")]
impl TryFrom<&Enr<secp256k1::SecretKey>> for NodeRecord {
type Error = NodeRecordParseError;
fn try_from(enr: &Enr<secp256k1::SecretKey>) -> Result<Self, Self::Error> {
let Some(address) = enr.ip4().map(IpAddr::from).or_else(|| enr.ip6().map(IpAddr::from))
else {
return Err(NodeRecordParseError::InvalidUrl("ip missing".to_string()))
};
let Some(udp_port) = enr.udp4().or_else(|| enr.udp6()) else {
return Err(NodeRecordParseError::InvalidUrl("udp port missing".to_string()))
};
let Some(tcp_port) = enr.tcp4().or_else(|| enr.tcp6()) else {
return Err(NodeRecordParseError::InvalidUrl("tcp port missing".to_string()))
};
let id = crate::pk2id(&enr.public_key());
Ok(Self { address, tcp_port, udp_port, id }.into_ipv4_mapped())
}
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_rlp::Decodable;
use rand::{rng, Rng, RngCore};
use std::net::Ipv6Addr;
#[test]
fn test_mapped_ipv6() {
let mut rng = rng();
let v4: Ipv4Addr = "0.0.0.0".parse().unwrap();
let v6 = v4.to_ipv6_mapped();
let record = NodeRecord {
address: v6.into(),
tcp_port: rng.random(),
udp_port: rng.random(),
id: rng.random(),
};
assert!(record.clone().convert_ipv4_mapped());
assert_eq!(record.into_ipv4_mapped().address, IpAddr::from(v4));
}
#[test]
fn test_mapped_ipv4() {
let mut rng = rng();
let v4: Ipv4Addr = "0.0.0.0".parse().unwrap();
let record = NodeRecord {
address: v4.into(),
tcp_port: rng.random(),
udp_port: rng.random(),
id: rng.random(),
};
assert!(!record.clone().convert_ipv4_mapped());
assert_eq!(record.into_ipv4_mapped().address, IpAddr::from(v4));
}
#[test]
fn test_noderecord_codec_ipv4() {
let mut rng = rng();
for _ in 0..100 {
let mut ip = [0u8; 4];
rng.fill_bytes(&mut ip);
let record = NodeRecord {
address: IpAddr::V4(ip.into()),
tcp_port: rng.random(),
udp_port: rng.random(),
id: rng.random(),
};
let decoded = NodeRecord::decode(&mut alloy_rlp::encode(record).as_slice()).unwrap();
assert_eq!(record, decoded);
}
}
#[test]
fn test_noderecord_codec_ipv6() {
let mut rng = rng();
for _ in 0..100 {
let mut ip = [0u8; 16];
rng.fill_bytes(&mut ip);
let record = NodeRecord {
address: IpAddr::V6(ip.into()),
tcp_port: rng.random(),
udp_port: rng.random(),
id: rng.random(),
};
let decoded = NodeRecord::decode(&mut alloy_rlp::encode(record).as_slice()).unwrap();
assert_eq!(record, decoded);
}
}
#[test]
fn test_url_parse() {
let url = "enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301";
let node: NodeRecord = url.parse().unwrap();
assert_eq!(node, NodeRecord {
address: IpAddr::V4([10,3,58,6].into()),
tcp_port: 30303,
udp_port: 30301,
id: "6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0".parse().unwrap(),
})
}
#[test]
fn test_node_display() {
let url = "enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303";
let node: NodeRecord = url.parse().unwrap();
assert_eq!(url, &format!("{node}"));
}
#[test]
fn test_node_display_discport() {
let url = "enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301";
let node: NodeRecord = url.parse().unwrap();
assert_eq!(url, &format!("{node}"));
}
#[test]
fn test_node_serialize() {
let cases = vec![
// IPv4
(
NodeRecord {
address: IpAddr::V4([10, 3, 58, 6].into()),
tcp_port: 30303u16,
udp_port: 30301u16,
id: PeerId::from_str("6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0").unwrap(),
},
"\"enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301\""
),
// IPv6
(
NodeRecord {
address: Ipv6Addr::new(0x2001, 0xdb8, 0x3c4d, 0x15, 0x0, 0x0, 0xabcd, 0xef12).into(),
tcp_port: 52150u16,
udp_port: 52151u16,
id: PeerId::from_str("1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439").unwrap(),
},
"\"enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@[2001:db8:3c4d:15::abcd:ef12]:52150?discport=52151\"",
)
];
for (node, expected) in cases {
let ser = serde_json::to_string::<NodeRecord>(&node).expect("couldn't serialize");
assert_eq!(ser, expected);
}
}
#[test]
fn test_node_deserialize() {
let cases = vec![
// IPv4
(
"\"enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301\"",
NodeRecord {
address: IpAddr::V4([10, 3, 58, 6].into()),
tcp_port: 30303u16,
udp_port: 30301u16,
id: PeerId::from_str("6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0").unwrap(),
}
),
// IPv6
(
"\"enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@[2001:db8:3c4d:15::abcd:ef12]:52150?discport=52151\"",
NodeRecord {
address: Ipv6Addr::new(0x2001, 0xdb8, 0x3c4d, 0x15, 0x0, 0x0, 0xabcd, 0xef12).into(),
tcp_port: 52150u16,
udp_port: 52151u16,
id: PeerId::from_str("1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439").unwrap(),
}
),
];
for (url, expected) in cases {
let node: NodeRecord = serde_json::from_str(url).expect("couldn't deserialize");
assert_eq!(node, expected);
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/peers/src/lib.rs | crates/net/peers/src/lib.rs | //! Network Types and Utilities.
//!
//! This crate manages and converts Ethereum network entities such as node records, peer IDs, and
//! Ethereum Node Records (ENRs)
//!
//! ## An overview of Node Record types
//!
//! Ethereum uses different types of "node records" to represent peers on the network.
//!
//! The simplest way to identify a peer is by public key. This is the [`PeerId`] type, which usually
//! represents a peer's secp256k1 public key.
//!
//! A more complete representation of a peer is the [`NodeRecord`] type, which includes the peer's
//! IP address, the ports where it is reachable (TCP and UDP), and the peer's public key. This is
//! what is returned from discovery v4 queries.
//!
//! The most comprehensive node record type is the Ethereum Node Record ([`Enr`]), which is a
//! signed, versioned record that includes the information from a [`NodeRecord`] along with
//! additional metadata. This is the data structure returned from discovery v5 queries.
//!
//! When we need to deserialize an identifier that could be any of these three types ([`PeerId`],
//! [`NodeRecord`], and [`Enr`]), we use the [`AnyNode`] type, which is an enum over the three
//! types. [`AnyNode`] is used in reth's `admin_addTrustedPeer` RPC method.
//!
//! The __final__ type is the [`TrustedPeer`] type, which is similar to a [`NodeRecord`] but may
//! include a domain name instead of a direct IP address. It includes a `resolve` method, which can
//! be used to resolve the domain name, producing a [`NodeRecord`]. This is useful for adding
//! trusted peers at startup, whose IP address may not be static each time the node starts. This is
//! common in orchestrated environments like Kubernetes, where there is reliable service discovery,
//! but services do not necessarily have static IPs.
//!
//! In short, the types are as follows:
//! - [`PeerId`]: A simple public key identifier.
//! - [`NodeRecord`]: A more complete representation of a peer, including IP address and ports.
//! - [`Enr`]: An Ethereum Node Record, which is a signed, versioned record that includes additional
//! metadata. Useful when interacting with discovery v5, or when custom metadata is required.
//! - [`AnyNode`]: An enum over [`PeerId`], [`NodeRecord`], and [`Enr`], useful in deserialization
//! when the type of the node record is not known.
//! - [`TrustedPeer`]: A [`NodeRecord`] with an optional domain name, which can be resolved to a
//! [`NodeRecord`]. Useful for adding trusted peers at startup, whose IP address may not be
//! static.
//!
//!
//! ## Feature Flags
//!
//! - `net`: Support for address lookups.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![cfg_attr(not(feature = "std"), no_std)]
extern crate alloc;
use alloc::{
format,
string::{String, ToString},
};
use alloy_primitives::B512;
use core::str::FromStr;
// Re-export PeerId for ease of use.
#[cfg(feature = "secp256k1")]
pub use enr::Enr;
/// Alias for a peer identifier
pub type PeerId = B512;
pub mod node_record;
pub use node_record::{NodeRecord, NodeRecordParseError};
pub mod trusted_peer;
pub use trusted_peer::TrustedPeer;
mod bootnodes;
pub use bootnodes::*;
/// This tag should be set to indicate to libsecp256k1 that the following bytes denote an
/// uncompressed pubkey.
///
/// `SECP256K1_TAG_PUBKEY_UNCOMPRESSED` = `0x04`
///
/// See: <https://github.com/bitcoin-core/secp256k1/blob/master/include/secp256k1.h#L211>
#[cfg(feature = "secp256k1")]
const SECP256K1_TAG_PUBKEY_UNCOMPRESSED: u8 = 4;
/// Converts a [`secp256k1::PublicKey`] to a [`PeerId`] by stripping the
/// `SECP256K1_TAG_PUBKEY_UNCOMPRESSED` tag and storing the rest of the slice in the [`PeerId`].
#[cfg(feature = "secp256k1")]
#[inline]
pub fn pk2id(pk: &secp256k1::PublicKey) -> PeerId {
PeerId::from_slice(&pk.serialize_uncompressed()[1..])
}
/// Converts a [`PeerId`] to a [`secp256k1::PublicKey`] by prepending the [`PeerId`] bytes with the
/// `SECP256K1_TAG_PUBKEY_UNCOMPRESSED` tag.
#[cfg(feature = "secp256k1")]
#[inline]
pub fn id2pk(id: PeerId) -> Result<secp256k1::PublicKey, secp256k1::Error> {
// NOTE: B512 is used as a PeerId because 512 bits is enough to represent an uncompressed
// public key.
let mut s = [0u8; secp256k1::constants::UNCOMPRESSED_PUBLIC_KEY_SIZE];
s[0] = SECP256K1_TAG_PUBKEY_UNCOMPRESSED;
s[1..].copy_from_slice(id.as_slice());
secp256k1::PublicKey::from_slice(&s)
}
/// A peer that can come in ENR or [`NodeRecord`] form.
#[derive(
Debug, Clone, Eq, PartialEq, Hash, serde_with::SerializeDisplay, serde_with::DeserializeFromStr,
)]
pub enum AnyNode {
/// An "enode:" peer with full ip
NodeRecord(NodeRecord),
/// An "enr:" peer
#[cfg(feature = "secp256k1")]
Enr(Enr<secp256k1::SecretKey>),
/// An incomplete "enode" with only a peer id
PeerId(PeerId),
}
impl AnyNode {
/// Returns the peer id of the node.
#[allow(clippy::missing_const_for_fn)]
pub fn peer_id(&self) -> PeerId {
match self {
Self::NodeRecord(record) => record.id,
#[cfg(feature = "secp256k1")]
Self::Enr(enr) => pk2id(&enr.public_key()),
Self::PeerId(peer_id) => *peer_id,
}
}
/// Returns the full node record if available.
#[allow(clippy::missing_const_for_fn)]
pub fn node_record(&self) -> Option<NodeRecord> {
match self {
Self::NodeRecord(record) => Some(*record),
#[cfg(feature = "secp256k1")]
Self::Enr(enr) => {
let node_record = NodeRecord {
address: enr
.ip4()
.map(core::net::IpAddr::from)
.or_else(|| enr.ip6().map(core::net::IpAddr::from))?,
tcp_port: enr.tcp4().or_else(|| enr.tcp6())?,
udp_port: enr.udp4().or_else(|| enr.udp6())?,
id: pk2id(&enr.public_key()),
}
.into_ipv4_mapped();
Some(node_record)
}
_ => None,
}
}
}
impl From<NodeRecord> for AnyNode {
fn from(value: NodeRecord) -> Self {
Self::NodeRecord(value)
}
}
#[cfg(feature = "secp256k1")]
impl From<Enr<secp256k1::SecretKey>> for AnyNode {
fn from(value: Enr<secp256k1::SecretKey>) -> Self {
Self::Enr(value)
}
}
impl FromStr for AnyNode {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
if let Some(rem) = s.strip_prefix("enode://") {
if let Ok(record) = NodeRecord::from_str(s) {
return Ok(Self::NodeRecord(record))
}
// incomplete enode
if let Ok(peer_id) = PeerId::from_str(rem) {
return Ok(Self::PeerId(peer_id))
}
return Err(format!("invalid public key: {rem}"))
}
#[cfg(feature = "secp256k1")]
if s.starts_with("enr:") {
return Enr::from_str(s).map(AnyNode::Enr)
}
Err("missing 'enr:' prefix for base64-encoded record".to_string())
}
}
impl core::fmt::Display for AnyNode {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match self {
Self::NodeRecord(record) => write!(f, "{record}"),
#[cfg(feature = "secp256k1")]
Self::Enr(enr) => write!(f, "{enr}"),
Self::PeerId(peer_id) => {
write!(f, "enode://{}", alloy_primitives::hex::encode(peer_id.as_slice()))
}
}
}
}
/// Generic wrapper with peer id
#[derive(Debug)]
pub struct WithPeerId<T>(PeerId, pub T);
impl<T> From<(PeerId, T)> for WithPeerId<T> {
fn from(value: (PeerId, T)) -> Self {
Self(value.0, value.1)
}
}
impl<T> WithPeerId<T> {
/// Wraps the value with the peerid.
pub const fn new(peer: PeerId, value: T) -> Self {
Self(peer, value)
}
/// Get the peer id
pub const fn peer_id(&self) -> PeerId {
self.0
}
/// Get the underlying data
pub const fn data(&self) -> &T {
&self.1
}
/// Returns ownership of the underlying data.
pub fn into_data(self) -> T {
self.1
}
/// Transform the data
pub fn transform<F: From<T>>(self) -> WithPeerId<F> {
WithPeerId(self.0, self.1.into())
}
/// Split the wrapper into [`PeerId`] and data tuple
pub fn split(self) -> (PeerId, T) {
(self.0, self.1)
}
/// Maps the inner value to a new value using the given function.
pub fn map<U, F: FnOnce(T) -> U>(self, op: F) -> WithPeerId<U> {
WithPeerId(self.0, op(self.1))
}
}
impl<T> WithPeerId<Option<T>> {
/// Returns `None` if the inner value is `None`, otherwise returns `Some(WithPeerId<T>)`.
pub fn transpose(self) -> Option<WithPeerId<T>> {
self.1.map(|v| WithPeerId(self.0, v))
}
/// Returns the contained Some value, consuming the self value.
///
/// See also [`Option::unwrap`]
///
/// # Panics
///
/// Panics if the value is a None
pub fn unwrap(self) -> T {
self.1.unwrap()
}
/// Returns the transposed [`WithPeerId`] type with the contained Some value
///
/// # Panics
///
/// Panics if the value is a None
pub fn unwrapped(self) -> WithPeerId<T> {
self.transpose().unwrap()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[cfg(feature = "secp256k1")]
#[test]
fn test_node_record_parse() {
let url = "enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301";
let node: AnyNode = url.parse().unwrap();
assert_eq!(node, AnyNode::NodeRecord(NodeRecord {
address: std::net::IpAddr::V4([10,3,58,6].into()),
tcp_port: 30303,
udp_port: 30301,
id: "6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0".parse().unwrap(),
}));
assert_eq!(node.to_string(), url)
}
#[test]
fn test_peer_id_parse() {
let url = "enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0";
let node: AnyNode = url.parse().unwrap();
assert_eq!(node, AnyNode::PeerId("6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0".parse().unwrap()));
assert_eq!(node.to_string(), url);
let url = "enode://";
let err = url.parse::<AnyNode>().unwrap_err();
assert_eq!(err, "invalid public key: ");
}
// <https://eips.ethereum.org/EIPS/eip-778>
#[cfg(feature = "secp256k1")]
#[test]
fn test_enr_parse() {
let url = "enr:-IS4QHCYrYZbAKWCBRlAy5zzaDZXJBGkcnh4MHcBFZntXNFrdvJjX04jRzjzCBOonrkTfj499SZuOh8R33Ls8RRcy5wBgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQPKY0yuDUmstAHYpMa2_oxVtw0RW_QAdpzBQA8yWM0xOIN1ZHCCdl8";
let node: AnyNode = url.parse().unwrap();
assert_eq!(
node.peer_id(),
"0xca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd31387574077f301b421bc84df7266c44e9e6d569fc56be00812904767bf5ccd1fc7f"
.parse::<PeerId>()
.unwrap()
);
assert_eq!(node.to_string(), url);
}
#[test]
#[cfg(feature = "secp256k1")]
fn pk2id2pk() {
let prikey = secp256k1::SecretKey::new(&mut rand_08::thread_rng());
let pubkey = secp256k1::PublicKey::from_secret_key(secp256k1::SECP256K1, &prikey);
assert_eq!(pubkey, id2pk(pk2id(&pubkey)).unwrap());
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/peers/src/trusted_peer.rs | crates/net/peers/src/trusted_peer.rs | //! `NodeRecord` type that uses a domain instead of an IP.
use crate::{NodeRecord, PeerId};
use alloc::string::{String, ToString};
use core::{
fmt::{self, Write},
net::IpAddr,
num::ParseIntError,
str::FromStr,
};
use serde_with::{DeserializeFromStr, SerializeDisplay};
use url::Host;
/// Represents the node record of a trusted peer. The only difference between this and a
/// [`NodeRecord`] is that this does not contain the IP address of the peer, but rather a domain
/// __or__ IP address.
///
/// This is useful when specifying nodes which are in internal infrastructure and may only be
/// discoverable reliably using DNS.
///
/// This should NOT be used for any use case other than in trusted peer lists.
#[derive(Clone, Debug, Eq, PartialEq, Hash, SerializeDisplay, DeserializeFromStr)]
pub struct TrustedPeer {
/// The host of a node.
pub host: Host,
/// TCP port of the port that accepts connections.
pub tcp_port: u16,
/// UDP discovery port.
pub udp_port: u16,
/// Public key of the discovery service
pub id: PeerId,
}
impl TrustedPeer {
/// Derive the [`NodeRecord`] from the secret key and addr
#[cfg(feature = "secp256k1")]
pub fn from_secret_key(host: Host, port: u16, sk: &secp256k1::SecretKey) -> Self {
let pk = secp256k1::PublicKey::from_secret_key(secp256k1::SECP256K1, sk);
let id = PeerId::from_slice(&pk.serialize_uncompressed()[1..]);
Self::new(host, port, id)
}
/// Creates a new record from a socket addr and peer id.
pub const fn new(host: Host, port: u16, id: PeerId) -> Self {
Self { host, tcp_port: port, udp_port: port, id }
}
#[cfg(any(test, feature = "std"))]
const fn to_node_record(&self, ip: IpAddr) -> NodeRecord {
NodeRecord { address: ip, id: self.id, tcp_port: self.tcp_port, udp_port: self.udp_port }
}
/// Tries to resolve directly to a [`NodeRecord`] if the host is an IP address.
#[cfg(any(test, feature = "std"))]
fn try_node_record(&self) -> Result<NodeRecord, &str> {
match &self.host {
Host::Ipv4(ip) => Ok(self.to_node_record((*ip).into())),
Host::Ipv6(ip) => Ok(self.to_node_record((*ip).into())),
Host::Domain(domain) => Err(domain),
}
}
/// Resolves the host in a [`TrustedPeer`] to an IP address, returning a [`NodeRecord`].
///
/// This use [`ToSocketAddr`](std::net::ToSocketAddrs) to resolve the host to an IP address.
#[cfg(any(test, feature = "std"))]
pub fn resolve_blocking(&self) -> Result<NodeRecord, std::io::Error> {
let domain = match self.try_node_record() {
Ok(record) => return Ok(record),
Err(domain) => domain,
};
// Resolve the domain to an IP address
let mut ips = std::net::ToSocketAddrs::to_socket_addrs(&(domain, 0))?;
let ip = ips.next().ok_or_else(|| {
std::io::Error::new(std::io::ErrorKind::AddrNotAvailable, "No IP found")
})?;
Ok(self.to_node_record(ip.ip()))
}
/// Resolves the host in a [`TrustedPeer`] to an IP address, returning a [`NodeRecord`].
#[cfg(any(test, feature = "net"))]
pub async fn resolve(&self) -> Result<NodeRecord, std::io::Error> {
let domain = match self.try_node_record() {
Ok(record) => return Ok(record),
Err(domain) => domain,
};
// Resolve the domain to an IP address
let mut ips = tokio::net::lookup_host(format!("{domain}:0")).await?;
let ip = ips.next().ok_or_else(|| {
std::io::Error::new(std::io::ErrorKind::AddrNotAvailable, "No IP found")
})?;
Ok(self.to_node_record(ip.ip()))
}
}
impl fmt::Display for TrustedPeer {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("enode://")?;
alloy_primitives::hex::encode(self.id.as_slice()).fmt(f)?;
f.write_char('@')?;
self.host.fmt(f)?;
f.write_char(':')?;
self.tcp_port.fmt(f)?;
if self.tcp_port != self.udp_port {
f.write_str("?discport=")?;
self.udp_port.fmt(f)?;
}
Ok(())
}
}
/// Possible error types when parsing a [`NodeRecord`]
#[derive(Debug, thiserror::Error)]
pub enum NodeRecordParseError {
/// Invalid url
#[error("Failed to parse url: {0}")]
InvalidUrl(String),
/// Invalid id
#[error("Failed to parse id")]
InvalidId(String),
/// Invalid discport
#[error("Failed to discport query: {0}")]
Discport(ParseIntError),
}
impl FromStr for TrustedPeer {
type Err = NodeRecordParseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
use url::Url;
// Parse the URL with enode prefix replaced with http.
// The enode prefix causes the parser to use parse_opaque() on
// the host str which only handles domains and ipv6, not ipv4.
let url = Url::parse(s.replace("enode://", "http://").as_str())
.map_err(|e| NodeRecordParseError::InvalidUrl(e.to_string()))?;
let host = url
.host()
.ok_or_else(|| NodeRecordParseError::InvalidUrl("no host specified".to_string()))?
.to_owned();
let port = url
.port()
.ok_or_else(|| NodeRecordParseError::InvalidUrl("no port specified".to_string()))?;
let udp_port = if let Some(discovery_port) = url
.query_pairs()
.find_map(|(maybe_disc, port)| (maybe_disc.as_ref() == "discport").then_some(port))
{
discovery_port.parse::<u16>().map_err(NodeRecordParseError::Discport)?
} else {
port
};
let id = url
.username()
.parse::<PeerId>()
.map_err(|e| NodeRecordParseError::InvalidId(e.to_string()))?;
Ok(Self { host, id, tcp_port: port, udp_port })
}
}
impl From<NodeRecord> for TrustedPeer {
fn from(record: NodeRecord) -> Self {
let host = match record.address {
IpAddr::V4(ip) => Host::Ipv4(ip),
IpAddr::V6(ip) => Host::Ipv6(ip),
};
Self { host, tcp_port: record.tcp_port, udp_port: record.udp_port, id: record.id }
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::net::Ipv6Addr;
#[test]
fn test_url_parse() {
let url = "enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301";
let node: TrustedPeer = url.parse().unwrap();
assert_eq!(node, TrustedPeer {
host: Host::Ipv4([10,3,58,6].into()),
tcp_port: 30303,
udp_port: 30301,
id: "6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0".parse().unwrap(),
})
}
#[test]
fn test_node_display() {
let url = "enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303";
let node: TrustedPeer = url.parse().unwrap();
assert_eq!(url, &format!("{node}"));
}
#[test]
fn test_node_display_discport() {
let url = "enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301";
let node: TrustedPeer = url.parse().unwrap();
assert_eq!(url, &format!("{node}"));
}
#[test]
fn test_node_serialize() {
let cases = vec![
// IPv4
(
TrustedPeer {
host: Host::Ipv4([10, 3, 58, 6].into()),
tcp_port: 30303u16,
udp_port: 30301u16,
id: PeerId::from_str("6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0").unwrap(),
},
"\"enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301\""
),
// IPv6
(
TrustedPeer {
host: Host::Ipv6(Ipv6Addr::new(0x2001, 0xdb8, 0x3c4d, 0x15, 0x0, 0x0, 0xabcd, 0xef12)),
tcp_port: 52150u16,
udp_port: 52151u16,
id: PeerId::from_str("1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439").unwrap(),
},
"\"enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@[2001:db8:3c4d:15::abcd:ef12]:52150?discport=52151\""
),
// URL
(
TrustedPeer {
host: Host::Domain("my-domain".to_string()),
tcp_port: 52150u16,
udp_port: 52151u16,
id: PeerId::from_str("1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439").unwrap(),
},
"\"enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@my-domain:52150?discport=52151\""
),
];
for (node, expected) in cases {
let ser = serde_json::to_string::<TrustedPeer>(&node).expect("couldn't serialize");
assert_eq!(ser, expected);
}
}
#[test]
fn test_node_deserialize() {
let cases = vec![
// IPv4
(
"\"enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301\"",
TrustedPeer {
host: Host::Ipv4([10, 3, 58, 6].into()),
tcp_port: 30303u16,
udp_port: 30301u16,
id: PeerId::from_str("6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0").unwrap(),
}
),
// IPv6
(
"\"enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@[2001:db8:3c4d:15::abcd:ef12]:52150?discport=52151\"",
TrustedPeer {
host: Host::Ipv6(Ipv6Addr::new(0x2001, 0xdb8, 0x3c4d, 0x15, 0x0, 0x0, 0xabcd, 0xef12)),
tcp_port: 52150u16,
udp_port: 52151u16,
id: PeerId::from_str("1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439").unwrap(),
}
),
// URL
(
"\"enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@my-domain:52150?discport=52151\"",
TrustedPeer {
host: Host::Domain("my-domain".to_string()),
tcp_port: 52150u16,
udp_port: 52151u16,
id: PeerId::from_str("1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439").unwrap(),
}
),
];
for (url, expected) in cases {
let node: TrustedPeer = serde_json::from_str(url).expect("couldn't deserialize");
assert_eq!(node, expected);
}
}
#[tokio::test]
async fn test_resolve_dns_node_record() {
// Set up tests
let tests = vec![("localhost")];
// Run tests
for domain in tests {
// Construct record
let rec =
TrustedPeer::new(url::Host::Domain(domain.to_owned()), 30300, PeerId::random());
// Resolve domain and validate
let ensure = |rec: NodeRecord| match rec.address {
IpAddr::V4(addr) => {
assert_eq!(addr, std::net::Ipv4Addr::new(127, 0, 0, 1))
}
IpAddr::V6(addr) => {
assert_eq!(addr, Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))
}
};
ensure(rec.resolve().await.unwrap());
ensure(rec.resolve_blocking().unwrap());
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/peers/src/bootnodes/ethereum.rs | crates/net/peers/src/bootnodes/ethereum.rs | //! Ethereum bootnodes come from <https://github.com/ledgerwatch/erigon/blob/devel/params/bootnodes.go>
/// Ethereum Foundation Go Bootnodes
pub static MAINNET_BOOTNODES: [&str; 4] = [
"enode://d860a01f9722d78051619d1e2351aba3f43f943f6f00718d1b9baa4101932a1f5011f16bb2b1bb35db20d6fe28fa0bf09636d26a87d31de9ec6203eeedb1f666@18.138.108.67:30303", // bootnode-aws-ap-southeast-1-001
"enode://22a8232c3abc76a16ae9d6c3b164f98775fe226f0917b0ca871128a74a8e9630b458460865bab457221f1d448dd9791d24c4e5d88786180ac185df813a68d4de@3.209.45.79:30303", // bootnode-aws-us-east-1-001
"enode://2b252ab6a1d0f971d9722cb839a42cb81db019ba44c08754628ab4a823487071b5695317c8ccd085219c3a03af063495b2f1da8d18218da2d6a82981b45e6ffc@65.108.70.101:30303", // bootnode-hetzner-hel
"enode://4aeb4ab6c14b23e2c4cfdce879c04b0748a20d8e9b59e25ded2a08143e265c6c25936e74cbc8e641e3312ca288673d91f2f93f8e277de3cfa444ecdaaf982052@157.90.35.166:30303", // bootnode-hetzner-fsn
];
/// Ethereum Foundation Sepolia Bootnodes
pub static SEPOLIA_BOOTNODES: [&str; 5] = [
"enode://4e5e92199ee224a01932a377160aa432f31d0b351f84ab413a8e0a42f4f36476f8fb1cbe914af0d9aef0d51665c214cf653c651c4bbd9d5550a934f241f1682b@138.197.51.181:30303", // sepolia-bootnode-1-nyc3
"enode://143e11fb766781d22d92a2e33f8f104cddae4411a122295ed1fdb6638de96a6ce65f5b7c964ba3763bba27961738fef7d3ecc739268f3e5e771fb4c87b6234ba@146.190.1.103:30303", // sepolia-bootnode-1-sfo3
"enode://8b61dc2d06c3f96fddcbebb0efb29d60d3598650275dc469c22229d3e5620369b0d3dedafd929835fe7f489618f19f456fe7c0df572bf2d914a9f4e006f783a9@170.64.250.88:30303", // sepolia-bootnode-1-syd1
"enode://10d62eff032205fcef19497f35ca8477bea0eadfff6d769a147e895d8b2b8f8ae6341630c645c30f5df6e67547c03494ced3d9c5764e8622a26587b083b028e8@139.59.49.206:30303", // sepolia-bootnode-1-blr1
"enode://9e9492e2e8836114cc75f5b929784f4f46c324ad01daf87d956f98b3b6c5fcba95524d6e5cf9861dc96a2c8a171ea7105bb554a197455058de185fa870970c7c@138.68.123.152:30303", // sepolia-bootnode-1-ams3
];
/// Ethereum Foundation Holesky Bootnodes
pub static HOLESKY_BOOTNODES: [&str; 2] = [
"enode://ac906289e4b7f12df423d654c5a962b6ebe5b3a74cc9e06292a85221f9a64a6f1cfdd6b714ed6dacef51578f92b34c60ee91e9ede9c7f8fadc4d347326d95e2b@146.190.13.128:30303",
"enode://a3435a0155a3e837c02f5e7f5662a2f1fbc25b48e4dc232016e1c51b544cb5b4510ef633ea3278c0e970fa8ad8141e2d4d0f9f95456c537ff05fdf9b31c15072@178.128.136.233:30303",
];
/// Ethereum Foundation Hoodi Bootnodes
/// From: <https://github.com/eth-clients/hoodi/blob/main/metadata/enodes.yaml>
pub static HOODI_BOOTNODES: [&str; 3] = [
"enode://2112dd3839dd752813d4df7f40936f06829fc54c0e051a93967c26e5f5d27d99d886b57b4ffcc3c475e930ec9e79c56ef1dbb7d86ca5ee83a9d2ccf36e5c240c@134.209.138.84:30303",
"enode://60203fcb3524e07c5df60a14ae1c9c5b24023ea5d47463dfae051d2c9f3219f309657537576090ca0ae641f73d419f53d8e8000d7a464319d4784acd7d2abc41@209.38.124.160:30303",
"enode://8ae4a48101b2299597341263da0deb47cc38aa4d3ef4b7430b897d49bfa10eb1ccfe1655679b1ed46928ef177fbf21b86837bd724400196c508427a6f41602cd@134.199.184.23:30303",
];
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/peers/src/bootnodes/optimism.rs | crates/net/peers/src/bootnodes/optimism.rs | //! OP bootnodes come from <https://github.com/ethereum-optimism/op-geth/blob/optimism/params/bootnodes.go>
/// OP stack mainnet boot nodes.
pub static OP_BOOTNODES: &[&str] = &[
// OP Labs
"enode://ca2774c3c401325850b2477fd7d0f27911efbf79b1e8b335066516e2bd8c4c9e0ba9696a94b1cb030a88eac582305ff55e905e64fb77fe0edcd70a4e5296d3ec@34.65.175.185:30305",
"enode://dd751a9ef8912be1bfa7a5e34e2c3785cc5253110bd929f385e07ba7ac19929fb0e0c5d93f77827291f4da02b2232240fbc47ea7ce04c46e333e452f8656b667@34.65.107.0:30305",
"enode://c5d289b56a77b6a2342ca29956dfd07aadf45364dde8ab20d1dc4efd4d1bc6b4655d902501daea308f4d8950737a4e93a4dfedd17b49cd5760ffd127837ca965@34.65.202.239:30305",
// Base
"enode://87a32fd13bd596b2ffca97020e31aef4ddcc1bbd4b95bb633d16c1329f654f34049ed240a36b449fda5e5225d70fe40bc667f53c304b71f8e68fc9d448690b51@3.231.138.188:30301",
"enode://ca21ea8f176adb2e229ce2d700830c844af0ea941a1d8152a9513b966fe525e809c3a6c73a2c18a12b74ed6ec4380edf91662778fe0b79f6a591236e49e176f9@184.72.129.189:30301",
"enode://acf4507a211ba7c1e52cdf4eef62cdc3c32e7c9c47998954f7ba024026f9a6b2150cd3f0b734d9c78e507ab70d59ba61dfe5c45e1078c7ad0775fb251d7735a2@3.220.145.177:30301",
"enode://8a5a5006159bf079d06a04e5eceab2a1ce6e0f721875b2a9c96905336219dbe14203d38f70f3754686a6324f786c2f9852d8c0dd3adac2d080f4db35efc678c5@3.231.11.52:30301",
"enode://cdadbe835308ad3557f9a1de8db411da1a260a98f8421d62da90e71da66e55e98aaa8e90aa7ce01b408a54e4bd2253d701218081ded3dbe5efbbc7b41d7cef79@54.198.153.150:30301",
// Uniswap Labs
"enode://b1a743328188dba3b2ed8c06abbb2688fabe64a3251e43bd77d4e5265bbd5cf03eca8ace4cde8ddb0c49c409b90bf941ebf556094638c6203edd6baa5ef0091b@3.134.214.169:30303",
"enode://ea9eaaf695facbe53090beb7a5b0411a81459bbf6e6caac151e587ee77120a1b07f3b9f3a9550f797d73d69840a643b775fd1e40344dea11e7660b6a483fe80e@52.14.30.39:30303",
"enode://77b6b1e72984d5d50e00ae934ffea982902226fe92fa50da42334c2750d8e405b55a5baabeb988c88125368142a64eda5096d0d4522d3b6eef75d166c7d303a9@3.148.100.173:30303",
];
/// OP stack testnet boot nodes.
pub static OP_TESTNET_BOOTNODES: &[&str] = &[
// OP Labs
"enode://2bd2e657bb3c8efffb8ff6db9071d9eb7be70d7c6d7d980ff80fc93b2629675c5f750bc0a5ef27cd788c2e491b8795a7e9a4a6e72178c14acc6753c0e5d77ae4@34.65.205.244:30305",
"enode://db8e1cab24624cc62fc35dbb9e481b88a9ef0116114cd6e41034c55b5b4f18755983819252333509bd8e25f6b12aadd6465710cd2e956558faf17672cce7551f@34.65.173.88:30305",
"enode://bfda2e0110cfd0f4c9f7aa5bf5ec66e6bd18f71a2db028d36b8bf8b0d6fdb03125c1606a6017b31311d96a36f5ef7e1ad11604d7a166745e6075a715dfa67f8a@34.65.229.245:30305",
// Base
"enode://548f715f3fc388a7c917ba644a2f16270f1ede48a5d88a4d14ea287cc916068363f3092e39936f1a3e7885198bef0e5af951f1d7b1041ce8ba4010917777e71f@18.210.176.114:30301",
"enode://6f10052847a966a725c9f4adf6716f9141155b99a0fb487fea3f51498f4c2a2cb8d534e680ee678f9447db85b93ff7c74562762c3714783a7233ac448603b25f@107.21.251.55:30301",
// Uniswap Labs
"enode://9e138a8ec4291c4f2fe5851aaee44fc73ae67da87fb26b75e3b94183c7ffc15b2795afc816b0aa084151b95b3a3553f1cd0d1e9dd134dcf059a84d4e0b429afc@3.146.117.118:30303",
"enode://34d87d649e5c58a17a43c1d59900a2020bd82d5b12ea39467c3366bee2946aaa9c759c77ede61089624691291fb2129eeb2a47687b50e2463188c78e1f738cf2@52.15.54.8:30303",
"enode://c2405194166fe2c0e6c61ee469745fed1a6802f51c8fc39e1c78c21c9a6a15a7c55304f09ee37e430da9a1ce8117ca085263c6b0f474f6946811e398347611ef@3.146.213.65:30303",
];
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/peers/src/bootnodes/mod.rs | crates/net/peers/src/bootnodes/mod.rs | //! Bootnodes for the network
use crate::NodeRecord;
use alloc::vec::Vec;
mod ethereum;
pub use ethereum::*;
mod optimism;
pub use optimism::*;
/// Returns parsed mainnet nodes
pub fn mainnet_nodes() -> Vec<NodeRecord> {
parse_nodes(&MAINNET_BOOTNODES[..])
}
/// Returns parsed sepolia nodes
pub fn sepolia_nodes() -> Vec<NodeRecord> {
parse_nodes(&SEPOLIA_BOOTNODES[..])
}
/// Returns parsed holesky nodes
pub fn holesky_nodes() -> Vec<NodeRecord> {
parse_nodes(&HOLESKY_BOOTNODES[..])
}
/// Returns parsed hoodi nodes
pub fn hoodi_nodes() -> Vec<NodeRecord> {
parse_nodes(&HOODI_BOOTNODES[..])
}
/// Returns parsed op-stack mainnet nodes
pub fn op_nodes() -> Vec<NodeRecord> {
parse_nodes(OP_BOOTNODES)
}
/// Returns parsed op-stack testnet nodes
pub fn op_testnet_nodes() -> Vec<NodeRecord> {
parse_nodes(OP_TESTNET_BOOTNODES)
}
/// Returns parsed op-stack base mainnet nodes
pub fn base_nodes() -> Vec<NodeRecord> {
parse_nodes(OP_BOOTNODES)
}
/// Returns parsed op-stack base testnet nodes
pub fn base_testnet_nodes() -> Vec<NodeRecord> {
parse_nodes(OP_TESTNET_BOOTNODES)
}
/// Parses all the nodes
pub fn parse_nodes(nodes: impl IntoIterator<Item = impl AsRef<str>>) -> Vec<NodeRecord> {
nodes.into_iter().map(|s| s.as_ref().parse().unwrap()).collect()
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/p2p/src/full_block.rs | crates/net/p2p/src/full_block.rs | use super::headers::client::HeadersRequest;
use crate::{
bodies::client::{BodiesClient, SingleBodyRequest},
download::DownloadClient,
error::PeerRequestResult,
headers::client::{HeadersClient, SingleHeaderRequest},
priority::Priority,
BlockClient,
};
use alloy_consensus::BlockHeader;
use alloy_primitives::{Sealable, B256};
use core::marker::PhantomData;
use reth_consensus::{Consensus, ConsensusError};
use reth_eth_wire_types::{EthNetworkPrimitives, HeadersDirection, NetworkPrimitives};
use reth_network_peers::{PeerId, WithPeerId};
use reth_primitives_traits::{SealedBlock, SealedHeader};
use std::{
cmp::Reverse,
collections::{HashMap, VecDeque},
fmt::Debug,
future::Future,
hash::Hash,
ops::RangeInclusive,
pin::Pin,
sync::Arc,
task::{ready, Context, Poll},
};
use tracing::debug;
/// A Client that can fetch full blocks from the network.
#[derive(Debug, Clone)]
pub struct FullBlockClient<Client>
where
Client: BlockClient,
{
client: Client,
consensus: Arc<dyn Consensus<Client::Block, Error = ConsensusError>>,
}
impl<Client> FullBlockClient<Client>
where
Client: BlockClient,
{
/// Creates a new instance of `FullBlockClient`.
pub fn new(
client: Client,
consensus: Arc<dyn Consensus<Client::Block, Error = ConsensusError>>,
) -> Self {
Self { client, consensus }
}
/// Returns a client with Test consensus
#[cfg(any(test, feature = "test-utils"))]
pub fn test_client(client: Client) -> Self {
Self::new(client, Arc::new(reth_consensus::test_utils::TestConsensus::default()))
}
}
impl<Client> FullBlockClient<Client>
where
Client: BlockClient,
{
/// Returns a future that fetches the [`SealedBlock`] for the given hash.
///
/// Note: this future is cancel safe
///
/// Caution: This does no validation of body (transactions) response but guarantees that the
/// [`SealedHeader`] matches the requested hash.
pub fn get_full_block(&self, hash: B256) -> FetchFullBlockFuture<Client> {
let client = self.client.clone();
FetchFullBlockFuture {
hash,
consensus: self.consensus.clone(),
request: FullBlockRequest {
header: Some(client.get_header(hash.into())),
body: Some(client.get_block_body(hash)),
},
client,
header: None,
body: None,
}
}
/// Returns a future that fetches [`SealedBlock`]s for the given hash and count.
///
/// Note: this future is cancel safe
///
/// Caution: This does no validation of body (transactions) responses but guarantees that
/// the starting [`SealedHeader`] matches the requested hash, and that the number of headers and
/// bodies received matches the requested limit.
///
/// The returned future yields bodies in falling order, i.e. with descending block numbers.
pub fn get_full_block_range(
&self,
hash: B256,
count: u64,
) -> FetchFullBlockRangeFuture<Client> {
let client = self.client.clone();
FetchFullBlockRangeFuture {
start_hash: hash,
count,
request: FullBlockRangeRequest {
headers: Some(client.get_headers(HeadersRequest::falling(hash.into(), count))),
bodies: None,
},
client,
headers: None,
pending_headers: VecDeque::new(),
bodies: HashMap::default(),
consensus: Arc::clone(&self.consensus),
}
}
}
/// A future that downloads a full block from the network.
///
/// This will attempt to fetch both the header and body for the given block hash at the same time.
/// When both requests succeed, the future will yield the full block.
#[must_use = "futures do nothing unless polled"]
pub struct FetchFullBlockFuture<Client>
where
Client: BlockClient,
{
client: Client,
consensus: Arc<dyn Consensus<Client::Block, Error = ConsensusError>>,
hash: B256,
request: FullBlockRequest<Client>,
header: Option<SealedHeader<Client::Header>>,
body: Option<BodyResponse<Client::Body>>,
}
impl<Client> FetchFullBlockFuture<Client>
where
Client: BlockClient<Header: BlockHeader>,
{
/// Returns the hash of the block being requested.
pub const fn hash(&self) -> &B256 {
&self.hash
}
/// If the header request is already complete, this returns the block number
pub fn block_number(&self) -> Option<u64> {
self.header.as_ref().map(|h| h.number())
}
/// Returns the [`SealedBlock`] if the request is complete and valid.
fn take_block(&mut self) -> Option<SealedBlock<Client::Block>> {
if self.header.is_none() || self.body.is_none() {
return None
}
let header = self.header.take().unwrap();
let resp = self.body.take().unwrap();
match resp {
BodyResponse::Validated(body) => Some(SealedBlock::from_sealed_parts(header, body)),
BodyResponse::PendingValidation(resp) => {
// ensure the block is valid, else retry
if let Err(err) = self.consensus.validate_body_against_header(resp.data(), &header)
{
debug!(target: "downloaders", %err, hash=?header.hash(), "Received wrong body");
self.client.report_bad_message(resp.peer_id());
self.header = Some(header);
self.request.body = Some(self.client.get_block_body(self.hash));
return None
}
Some(SealedBlock::from_sealed_parts(header, resp.into_data()))
}
}
}
fn on_block_response(&mut self, resp: WithPeerId<Client::Body>) {
if let Some(ref header) = self.header {
if let Err(err) = self.consensus.validate_body_against_header(resp.data(), header) {
debug!(target: "downloaders", %err, hash=?header.hash(), "Received wrong body");
self.client.report_bad_message(resp.peer_id());
return
}
self.body = Some(BodyResponse::Validated(resp.into_data()));
return
}
self.body = Some(BodyResponse::PendingValidation(resp));
}
}
impl<Client> Future for FetchFullBlockFuture<Client>
where
Client: BlockClient<Header: BlockHeader + Sealable> + 'static,
{
type Output = SealedBlock<Client::Block>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.get_mut();
// preemptive yield point
let mut budget = 4;
loop {
match ready!(this.request.poll(cx)) {
ResponseResult::Header(res) => {
match res {
Ok(maybe_header) => {
let (peer, maybe_header) =
maybe_header.map(|h| h.map(SealedHeader::seal_slow)).split();
if let Some(header) = maybe_header {
if header.hash() == this.hash {
this.header = Some(header);
} else {
debug!(target: "downloaders", expected=?this.hash, received=?header.hash(), "Received wrong header");
// received a different header than requested
this.client.report_bad_message(peer)
}
}
}
Err(err) => {
debug!(target: "downloaders", %err, ?this.hash, "Header download failed");
}
}
if this.header.is_none() {
// received bad response
this.request.header = Some(this.client.get_header(this.hash.into()));
}
}
ResponseResult::Body(res) => {
match res {
Ok(maybe_body) => {
if let Some(body) = maybe_body.transpose() {
this.on_block_response(body);
}
}
Err(err) => {
debug!(target: "downloaders", %err, ?this.hash, "Body download failed");
}
}
if this.body.is_none() {
// received bad response
this.request.body = Some(this.client.get_block_body(this.hash));
}
}
}
if let Some(res) = this.take_block() {
return Poll::Ready(res)
}
// ensure we still have enough budget for another iteration
budget -= 1;
if budget == 0 {
// make sure we're woken up again
cx.waker().wake_by_ref();
return Poll::Pending
}
}
}
}
impl<Client> Debug for FetchFullBlockFuture<Client>
where
Client: BlockClient<Header: Debug, Body: Debug>,
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("FetchFullBlockFuture")
.field("hash", &self.hash)
.field("header", &self.header)
.field("body", &self.body)
.finish()
}
}
struct FullBlockRequest<Client>
where
Client: BlockClient,
{
header: Option<SingleHeaderRequest<<Client as HeadersClient>::Output>>,
body: Option<SingleBodyRequest<<Client as BodiesClient>::Output>>,
}
impl<Client> FullBlockRequest<Client>
where
Client: BlockClient,
{
fn poll(&mut self, cx: &mut Context<'_>) -> Poll<ResponseResult<Client::Header, Client::Body>> {
if let Some(fut) = Pin::new(&mut self.header).as_pin_mut() {
if let Poll::Ready(res) = fut.poll(cx) {
self.header = None;
return Poll::Ready(ResponseResult::Header(res))
}
}
if let Some(fut) = Pin::new(&mut self.body).as_pin_mut() {
if let Poll::Ready(res) = fut.poll(cx) {
self.body = None;
return Poll::Ready(ResponseResult::Body(res))
}
}
Poll::Pending
}
}
/// The result of a request for a single header or body. This is yielded by the `FullBlockRequest`
/// future.
enum ResponseResult<H, B> {
Header(PeerRequestResult<Option<H>>),
Body(PeerRequestResult<Option<B>>),
}
/// The response of a body request.
#[derive(Debug)]
enum BodyResponse<B> {
/// Already validated against transaction root of header
Validated(B),
/// Still needs to be validated against header
PendingValidation(WithPeerId<B>),
}
/// A future that downloads a range of full blocks from the network.
///
/// This first fetches the headers for the given range using the inner `Client`. Once the request
/// is complete, it will fetch the bodies for the headers it received.
///
/// Once the bodies request completes, the [`SealedBlock`]s will be assembled and the future will
/// yield the full block range.
///
/// The full block range will be returned with falling block numbers, i.e. in descending order.
///
/// NOTE: this assumes that bodies responses are returned by the client in the same order as the
/// hash array used to request them.
#[must_use = "futures do nothing unless polled"]
#[expect(missing_debug_implementations)]
pub struct FetchFullBlockRangeFuture<Client>
where
Client: BlockClient,
{
/// The client used to fetch headers and bodies.
client: Client,
/// The consensus instance used to validate the blocks.
consensus: Arc<dyn Consensus<Client::Block, Error = ConsensusError>>,
/// The block hash to start fetching from (inclusive).
start_hash: B256,
/// How many blocks to fetch: `len([start_hash, ..]) == count`
count: u64,
/// Requests for headers and bodies that are in progress.
request: FullBlockRangeRequest<Client>,
/// Fetched headers.
headers: Option<Vec<SealedHeader<Client::Header>>>,
/// The next headers to request bodies for. This is drained as responses are received.
pending_headers: VecDeque<SealedHeader<Client::Header>>,
/// The bodies that have been received so far.
bodies: HashMap<SealedHeader<Client::Header>, BodyResponse<Client::Body>>,
}
impl<Client> FetchFullBlockRangeFuture<Client>
where
Client: BlockClient<Header: Debug + BlockHeader + Sealable + Clone + Hash + Eq>,
{
/// Returns the block hashes for the given range, if they are available.
pub fn range_block_hashes(&self) -> Option<Vec<B256>> {
self.headers.as_ref().map(|h| h.iter().map(|h| h.hash()).collect())
}
/// Returns whether or not the bodies map is fully populated with requested headers and bodies.
fn is_bodies_complete(&self) -> bool {
self.bodies.len() == self.count as usize
}
/// Inserts a block body, matching it with the `next_header`.
///
/// Note: this assumes the response matches the next header in the queue.
fn insert_body(&mut self, body_response: BodyResponse<Client::Body>) {
if let Some(header) = self.pending_headers.pop_front() {
self.bodies.insert(header, body_response);
}
}
/// Inserts multiple block bodies.
fn insert_bodies(&mut self, bodies: impl IntoIterator<Item = BodyResponse<Client::Body>>) {
for body in bodies {
self.insert_body(body);
}
}
/// Returns the remaining hashes for the bodies request, based on the headers that still exist
/// in the `root_map`.
fn remaining_bodies_hashes(&self) -> Vec<B256> {
self.pending_headers.iter().map(|h| h.hash()).collect()
}
/// Returns the [`SealedBlock`]s if the request is complete and valid.
///
/// The request is complete if the number of blocks requested is equal to the number of blocks
/// received. The request is valid if the returned bodies match the roots in the headers.
///
/// These are returned in falling order starting with the requested `hash`, i.e. with
/// descending block numbers.
fn take_blocks(&mut self) -> Option<Vec<SealedBlock<Client::Block>>> {
if !self.is_bodies_complete() {
// not done with bodies yet
return None
}
let headers = self.headers.take()?;
let mut needs_retry = false;
let mut valid_responses = Vec::new();
for header in &headers {
if let Some(body_resp) = self.bodies.remove(header) {
// validate body w.r.t. the hashes in the header, only inserting into the response
let body = match body_resp {
BodyResponse::Validated(body) => body,
BodyResponse::PendingValidation(resp) => {
// ensure the block is valid, else retry
if let Err(err) =
self.consensus.validate_body_against_header(resp.data(), header)
{
debug!(target: "downloaders", %err, hash=?header.hash(), "Received wrong body in range response");
self.client.report_bad_message(resp.peer_id());
// get body that doesn't match, put back into vecdeque, and retry it
self.pending_headers.push_back(header.clone());
needs_retry = true;
continue
}
resp.into_data()
}
};
valid_responses
.push(SealedBlock::<Client::Block>::from_sealed_parts(header.clone(), body));
}
}
if needs_retry {
// put response hashes back into bodies map since we aren't returning them as a
// response
for block in valid_responses {
let (header, body) = block.split_sealed_header_body();
self.bodies.insert(header, BodyResponse::Validated(body));
}
// put headers back since they were `take`n before
self.headers = Some(headers);
// create response for failing bodies
let hashes = self.remaining_bodies_hashes();
self.request.bodies = Some(self.client.get_block_bodies(hashes));
return None
}
Some(valid_responses)
}
fn on_headers_response(&mut self, headers: WithPeerId<Vec<Client::Header>>) {
let (peer, mut headers_falling) =
headers.map(|h| h.into_iter().map(SealedHeader::seal_slow).collect::<Vec<_>>()).split();
// fill in the response if it's the correct length
if headers_falling.len() == self.count as usize {
// sort headers from highest to lowest block number
headers_falling.sort_unstable_by_key(|h| Reverse(h.number()));
// check the starting hash
if headers_falling[0].hash() == self.start_hash {
let headers_rising = headers_falling.iter().rev().cloned().collect::<Vec<_>>();
// check if the downloaded headers are valid
if let Err(err) = self.consensus.validate_header_range(&headers_rising) {
debug!(target: "downloaders", %err, ?self.start_hash, "Received bad header response");
self.client.report_bad_message(peer);
}
// get the bodies request so it can be polled later
let hashes = headers_falling.iter().map(|h| h.hash()).collect::<Vec<_>>();
// populate the pending headers
self.pending_headers = headers_falling.clone().into();
// set the actual request if it hasn't been started yet
if !self.has_bodies_request_started() {
// request the bodies for the downloaded headers
self.request.bodies = Some(self.client.get_block_bodies(hashes));
}
// set the headers response
self.headers = Some(headers_falling);
} else {
// received a different header than requested
self.client.report_bad_message(peer);
}
}
}
/// Returns whether or not a bodies request has been started, returning false if there is no
/// pending request.
const fn has_bodies_request_started(&self) -> bool {
self.request.bodies.is_some()
}
/// Returns the start hash for the request
pub const fn start_hash(&self) -> B256 {
self.start_hash
}
/// Returns the block count for the request
pub const fn count(&self) -> u64 {
self.count
}
}
impl<Client> Future for FetchFullBlockRangeFuture<Client>
where
Client: BlockClient<Header: Debug + BlockHeader + Sealable + Clone + Hash + Eq> + 'static,
{
type Output = Vec<SealedBlock<Client::Block>>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.get_mut();
loop {
match ready!(this.request.poll(cx)) {
// This branch handles headers responses from peers - it first ensures that the
// starting hash and number of headers matches what we requested.
//
// If these don't match, we penalize the peer and retry the request.
// If they do match, we sort the headers by block number and start the request for
// the corresponding block bodies.
//
// The next result that should be yielded by `poll` is the bodies response.
RangeResponseResult::Header(res) => {
match res {
Ok(headers) => {
this.on_headers_response(headers);
}
Err(err) => {
debug!(target: "downloaders", %err, ?this.start_hash, "Header range download failed");
}
}
if this.headers.is_none() {
// did not receive a correct response yet, retry
this.request.headers = Some(this.client.get_headers(HeadersRequest {
start: this.start_hash.into(),
limit: this.count,
direction: HeadersDirection::Falling,
}));
}
}
// This branch handles block body responses from peers - it first inserts the
// bodies into the `bodies` map, and then checks if the request is complete.
//
// If the request is not complete, and we need to request more bodies, we send
// a bodies request for the headers we don't yet have bodies for.
RangeResponseResult::Body(res) => {
match res {
Ok(bodies_resp) => {
let (peer, new_bodies) = bodies_resp.split();
// first insert the received bodies
this.insert_bodies(
new_bodies
.into_iter()
.map(|resp| WithPeerId::new(peer, resp))
.map(BodyResponse::PendingValidation),
);
if !this.is_bodies_complete() {
// get remaining hashes so we can send the next request
let req_hashes = this.remaining_bodies_hashes();
// set a new request
this.request.bodies = Some(this.client.get_block_bodies(req_hashes))
}
}
Err(err) => {
debug!(target: "downloaders", %err, ?this.start_hash, "Body range download failed");
}
}
if this.bodies.is_empty() {
// received bad response, re-request headers
// TODO: convert this into two futures, one which is a headers range
// future, and one which is a bodies range future.
//
// The headers range future should yield the bodies range future.
// The bodies range future should not have an Option<Vec<B256>>, it should
// have a populated Vec<B256> from the successful headers range future.
//
// This is optimal because we can not send a bodies request without
// first completing the headers request. This way we can get rid of the
// following `if let Some`. A bodies request should never be sent before
// the headers request completes, so this should always be `Some` anyways.
let hashes = this.remaining_bodies_hashes();
if !hashes.is_empty() {
this.request.bodies = Some(this.client.get_block_bodies(hashes));
}
}
}
}
if let Some(res) = this.take_blocks() {
return Poll::Ready(res)
}
}
}
}
/// A request for a range of full blocks. Polling this will poll the inner headers and bodies
/// futures until they return responses. It will return either the header or body result, depending
/// on which future successfully returned.
struct FullBlockRangeRequest<Client>
where
Client: BlockClient,
{
headers: Option<<Client as HeadersClient>::Output>,
bodies: Option<<Client as BodiesClient>::Output>,
}
impl<Client> FullBlockRangeRequest<Client>
where
Client: BlockClient,
{
fn poll(
&mut self,
cx: &mut Context<'_>,
) -> Poll<RangeResponseResult<Client::Header, Client::Body>> {
if let Some(fut) = Pin::new(&mut self.headers).as_pin_mut() {
if let Poll::Ready(res) = fut.poll(cx) {
self.headers = None;
return Poll::Ready(RangeResponseResult::Header(res))
}
}
if let Some(fut) = Pin::new(&mut self.bodies).as_pin_mut() {
if let Poll::Ready(res) = fut.poll(cx) {
self.bodies = None;
return Poll::Ready(RangeResponseResult::Body(res))
}
}
Poll::Pending
}
}
// The result of a request for headers or block bodies. This is yielded by the
// `FullBlockRangeRequest` future.
enum RangeResponseResult<H, B> {
Header(PeerRequestResult<Vec<H>>),
Body(PeerRequestResult<Vec<B>>),
}
/// A headers+bodies client implementation that does nothing.
#[derive(Debug, Clone)]
#[non_exhaustive]
pub struct NoopFullBlockClient<Net = EthNetworkPrimitives>(PhantomData<Net>);
/// Implements the `DownloadClient` trait for the `NoopFullBlockClient` struct.
impl<Net> DownloadClient for NoopFullBlockClient<Net>
where
Net: Debug + Send + Sync,
{
/// Reports a bad message received from a peer.
///
/// # Arguments
///
/// * `_peer_id` - Identifier for the peer sending the bad message (unused in this
/// implementation).
fn report_bad_message(&self, _peer_id: PeerId) {}
/// Retrieves the number of connected peers.
///
/// # Returns
///
/// The number of connected peers, which is always zero in this implementation.
fn num_connected_peers(&self) -> usize {
0
}
}
/// Implements the `BodiesClient` trait for the `NoopFullBlockClient` struct.
impl<Net> BodiesClient for NoopFullBlockClient<Net>
where
Net: NetworkPrimitives,
{
type Body = Net::BlockBody;
/// Defines the output type of the function.
type Output = futures::future::Ready<PeerRequestResult<Vec<Self::Body>>>;
/// Retrieves block bodies based on provided hashes and priority.
///
/// # Arguments
///
/// * `_hashes` - A vector of block hashes (unused in this implementation).
/// * `_priority` - Priority level for block body retrieval (unused in this implementation).
///
/// # Returns
///
/// A future containing an empty vector of block bodies and a randomly generated `PeerId`.
fn get_block_bodies_with_priority_and_range_hint(
&self,
_hashes: Vec<B256>,
_priority: Priority,
_range_hint: Option<RangeInclusive<u64>>,
) -> Self::Output {
// Create a future that immediately returns an empty vector of block bodies and a random
// PeerId.
futures::future::ready(Ok(WithPeerId::new(PeerId::random(), vec![])))
}
}
impl<Net> HeadersClient for NoopFullBlockClient<Net>
where
Net: NetworkPrimitives,
{
type Header = Net::BlockHeader;
/// The output type representing a future containing a peer request result with a vector of
/// headers.
type Output = futures::future::Ready<PeerRequestResult<Vec<Self::Header>>>;
/// Retrieves headers with a specified priority level.
///
/// This implementation does nothing and returns an empty vector of headers.
///
/// # Arguments
///
/// * `_request` - A request for headers (unused in this implementation).
/// * `_priority` - The priority level for the headers request (unused in this implementation).
///
/// # Returns
///
/// Always returns a ready future with an empty vector of headers wrapped in a
/// `PeerRequestResult`.
fn get_headers_with_priority(
&self,
_request: HeadersRequest,
_priority: Priority,
) -> Self::Output {
futures::future::ready(Ok(WithPeerId::new(PeerId::random(), vec![])))
}
}
impl<Net> BlockClient for NoopFullBlockClient<Net>
where
Net: NetworkPrimitives,
{
type Block = Net::Block;
}
impl<Net> Default for NoopFullBlockClient<Net> {
fn default() -> Self {
Self(PhantomData::<Net>)
}
}
#[cfg(test)]
mod tests {
use reth_ethereum_primitives::BlockBody;
use super::*;
use crate::test_utils::TestFullBlockClient;
use std::ops::Range;
#[tokio::test]
async fn download_single_full_block() {
let client = TestFullBlockClient::default();
let header: SealedHeader = SealedHeader::default();
let body = BlockBody::default();
client.insert(header.clone(), body.clone());
let client = FullBlockClient::test_client(client);
let received = client.get_full_block(header.hash()).await;
assert_eq!(received, SealedBlock::from_sealed_parts(header, body));
}
#[tokio::test]
async fn download_single_full_block_range() {
let client = TestFullBlockClient::default();
let header: SealedHeader = SealedHeader::default();
let body = BlockBody::default();
client.insert(header.clone(), body.clone());
let client = FullBlockClient::test_client(client);
let received = client.get_full_block_range(header.hash(), 1).await;
let received = received.first().expect("response should include a block");
assert_eq!(*received, SealedBlock::from_sealed_parts(header, body));
}
/// Inserts headers and returns the last header and block body.
fn insert_headers_into_client(
client: &TestFullBlockClient,
range: Range<usize>,
) -> (SealedHeader, BlockBody) {
let mut sealed_header: SealedHeader = SealedHeader::default();
let body = BlockBody::default();
for _ in range {
let (mut header, hash) = sealed_header.split();
// update to the next header
header.parent_hash = hash;
header.number += 1;
sealed_header = SealedHeader::seal_slow(header);
client.insert(sealed_header.clone(), body.clone());
}
(sealed_header, body)
}
#[tokio::test]
async fn download_full_block_range() {
let client = TestFullBlockClient::default();
let (header, body) = insert_headers_into_client(&client, 0..50);
let client = FullBlockClient::test_client(client);
let received = client.get_full_block_range(header.hash(), 1).await;
let received = received.first().expect("response should include a block");
assert_eq!(*received, SealedBlock::from_sealed_parts(header.clone(), body));
let received = client.get_full_block_range(header.hash(), 10).await;
assert_eq!(received.len(), 10);
for (i, block) in received.iter().enumerate() {
let expected_number = header.number - i as u64;
assert_eq!(block.number, expected_number);
}
}
#[tokio::test]
async fn download_full_block_range_over_soft_limit() {
// default soft limit is 20, so we will request 50 blocks
let client = TestFullBlockClient::default();
let (header, body) = insert_headers_into_client(&client, 0..50);
let client = FullBlockClient::test_client(client);
let received = client.get_full_block_range(header.hash(), 1).await;
let received = received.first().expect("response should include a block");
assert_eq!(*received, SealedBlock::from_sealed_parts(header.clone(), body));
let received = client.get_full_block_range(header.hash(), 50).await;
assert_eq!(received.len(), 50);
for (i, block) in received.iter().enumerate() {
let expected_number = header.number - i as u64;
assert_eq!(block.number, expected_number);
}
}
#[tokio::test]
async fn download_full_block_range_with_invalid_header() {
let client = TestFullBlockClient::default();
let range_length: usize = 3;
let (header, _) = insert_headers_into_client(&client, 0..range_length);
let test_consensus = reth_consensus::test_utils::TestConsensus::default();
test_consensus.set_fail_validation(true);
test_consensus.set_fail_body_against_header(false);
let client = FullBlockClient::new(client, Arc::new(test_consensus));
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | true |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/p2p/src/lib.rs | crates/net/p2p/src/lib.rs | //! Provides abstractions and commonly used types for p2p.
//!
//! ## Feature Flags
//!
//! - `test-utils`: Export utilities for testing
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
/// Shared abstractions for downloader implementations.
pub mod download;
/// Traits for implementing P2P block body clients.
pub mod bodies;
/// A downloader that combines two different downloaders/client implementations.
pub mod either;
/// An implementation that uses headers and bodies traits to download full blocks
pub mod full_block;
pub use full_block::{FullBlockClient, NoopFullBlockClient};
/// Traits for implementing P2P Header Clients. Also includes implementations
/// of a Linear and a Parallel downloader generic over the [`Consensus`] and
/// [`HeadersClient`].
///
/// [`Consensus`]: reth_consensus::Consensus
/// [`HeadersClient`]: crate::headers::client::HeadersClient
pub mod headers;
/// Error types broadly used by p2p interfaces for any operation which may produce an error when
/// interacting with the network implementation
pub mod error;
/// Priority enum for `BlockHeader` and `BlockBody` requests
pub mod priority;
/// Syncing related traits.
pub mod sync;
/// Snap related traits.
pub mod snap;
/// Common test helpers for mocking out Consensus, Downloaders and Header Clients.
#[cfg(any(test, feature = "test-utils"))]
pub mod test_utils;
pub use bodies::client::BodiesClient;
pub use headers::client::HeadersClient;
use reth_primitives_traits::Block;
/// Helper trait that unifies network behaviour needed for fetching entire blocks.
pub trait BlockClient:
HeadersClient<Header = <Self::Block as Block>::Header>
+ BodiesClient<Body = <Self::Block as Block>::Body>
+ Unpin
+ Clone
{
/// The Block type that this client fetches.
type Block: Block;
}
/// The [`BlockClient`] providing Ethereum block parts.
pub trait EthBlockClient: BlockClient<Block = reth_ethereum_primitives::Block> {}
impl<T> EthBlockClient for T where T: BlockClient<Block = reth_ethereum_primitives::Block> {}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/p2p/src/download.rs | crates/net/p2p/src/download.rs | use reth_network_peers::PeerId;
use std::fmt::Debug;
/// Generic download client for peer penalization
#[auto_impl::auto_impl(&, Arc, Box)]
pub trait DownloadClient: Send + Sync + Debug {
/// Penalize the peer for responding with a message
/// that violates validation rules
fn report_bad_message(&self, peer_id: PeerId);
/// Returns how many peers the network is currently connected to.
fn num_connected_peers(&self) -> usize;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/p2p/src/sync.rs | crates/net/p2p/src/sync.rs | //! Traits used when interacting with the sync status of the network.
use alloy_eips::eip2124::Head;
use reth_eth_wire_types::BlockRangeUpdate;
/// A type that provides information about whether the node is currently syncing and the network is
/// currently serving syncing related requests.
#[auto_impl::auto_impl(&, Arc, Box)]
pub trait SyncStateProvider: Send + Sync {
/// Returns `true` if the network is undergoing sync.
fn is_syncing(&self) -> bool;
/// Returns `true` if the network is undergoing an initial (pipeline) sync.
fn is_initially_syncing(&self) -> bool;
}
/// An updater for updating the [SyncState] and status of the network.
///
/// The node is either syncing, or it is idle.
/// While syncing, the node will download data from the network and process it. The processing
/// consists of several stages, like recovering senders, executing the blocks and indexing.
/// Eventually the node reaches the `Finish` stage and will transition to [`SyncState::Idle`], it
/// which point the node is considered fully synced.
#[auto_impl::auto_impl(&, Arc, Box)]
pub trait NetworkSyncUpdater: std::fmt::Debug + Send + Sync + 'static {
/// Notifies about a [`SyncState`] update.
fn update_sync_state(&self, state: SyncState);
/// Updates the status of the p2p node.
fn update_status(&self, head: Head);
/// Updates the advertised block range.
fn update_block_range(&self, update: BlockRangeUpdate);
}
/// The state the network is currently in when it comes to synchronization.
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
pub enum SyncState {
/// Node sync is complete.
///
/// The network just serves requests to keep up of the chain.
Idle,
/// Network is syncing
Syncing,
}
impl SyncState {
/// Whether the node is currently syncing.
///
/// Note: this does not include keep-up sync when the state is idle.
pub const fn is_syncing(&self) -> bool {
!matches!(self, Self::Idle)
}
}
/// A [`NetworkSyncUpdater`] implementation that does nothing.
#[derive(Clone, Copy, Debug, Default)]
#[non_exhaustive]
pub struct NoopSyncStateUpdater;
impl SyncStateProvider for NoopSyncStateUpdater {
fn is_syncing(&self) -> bool {
false
}
fn is_initially_syncing(&self) -> bool {
false
}
}
impl NetworkSyncUpdater for NoopSyncStateUpdater {
fn update_sync_state(&self, _state: SyncState) {}
fn update_status(&self, _: Head) {}
fn update_block_range(&self, _update: BlockRangeUpdate) {}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/p2p/src/error.rs | crates/net/p2p/src/error.rs | use std::ops::RangeInclusive;
use super::headers::client::HeadersRequest;
use alloy_consensus::BlockHeader;
use alloy_eips::BlockHashOrNumber;
use alloy_primitives::{BlockNumber, B256};
use derive_more::{Display, Error};
use reth_consensus::ConsensusError;
use reth_network_peers::WithPeerId;
use reth_network_types::ReputationChangeKind;
use reth_primitives_traits::{GotExpected, GotExpectedBoxed};
use reth_storage_errors::{db::DatabaseError, provider::ProviderError};
use tokio::sync::{mpsc, oneshot};
/// Result alias for result of a request.
pub type RequestResult<T> = Result<T, RequestError>;
/// Result with [`PeerId`][reth_network_peers::PeerId]
pub type PeerRequestResult<T> = RequestResult<WithPeerId<T>>;
/// Helper trait used to validate responses.
pub trait EthResponseValidator {
/// Determine whether the response matches what we requested in [`HeadersRequest`]
fn is_likely_bad_headers_response(&self, request: &HeadersRequest) -> bool;
/// Return the response reputation impact if any
fn reputation_change_err(&self) -> Option<ReputationChangeKind>;
}
impl<H: BlockHeader> EthResponseValidator for RequestResult<Vec<H>> {
fn is_likely_bad_headers_response(&self, request: &HeadersRequest) -> bool {
match self {
Ok(headers) => {
let request_length = headers.len() as u64;
if request_length <= 1 && request.limit != request_length {
return true
}
match request.start {
BlockHashOrNumber::Number(block_number) => {
headers.first().is_some_and(|header| block_number != header.number())
}
BlockHashOrNumber::Hash(_) => {
// we don't want to hash the header
false
}
}
}
Err(_) => true,
}
}
/// [`RequestError::ChannelClosed`] is not possible here since these errors are mapped to
/// `ConnectionDropped`, which will be handled when the dropped connection is cleaned up.
///
/// [`RequestError::ConnectionDropped`] should be ignored here because this is already handled
/// when the dropped connection is handled.
///
/// [`RequestError::UnsupportedCapability`] is not used yet because we only support active
/// session for eth protocol.
fn reputation_change_err(&self) -> Option<ReputationChangeKind> {
if let Err(err) = self {
match err {
RequestError::ChannelClosed |
RequestError::ConnectionDropped |
RequestError::UnsupportedCapability |
RequestError::BadResponse => None,
RequestError::Timeout => Some(ReputationChangeKind::Timeout),
}
} else {
None
}
}
}
/// Error variants that can happen when sending requests to a session.
///
/// Represents errors encountered when sending requests.
#[derive(Clone, Debug, Eq, PartialEq, Display, Error)]
pub enum RequestError {
/// Closed channel to the peer.
/// Indicates the channel to the peer is closed.
#[display("closed channel to the peer")]
ChannelClosed,
/// Connection to a peer dropped while handling the request.
/// Represents a dropped connection while handling the request.
#[display("connection to a peer dropped while handling the request")]
ConnectionDropped,
/// Capability message is not supported by the remote peer.
/// Indicates an unsupported capability message from the remote peer.
#[display("capability message is not supported by remote peer")]
UnsupportedCapability,
/// Request timed out while awaiting response.
/// Represents a timeout while waiting for a response.
#[display("request timed out while awaiting response")]
Timeout,
/// Received bad response.
/// Indicates a bad response was received.
#[display("received bad response")]
BadResponse,
}
// === impl RequestError ===
impl RequestError {
/// Indicates whether this error is retryable or fatal.
pub const fn is_retryable(&self) -> bool {
matches!(self, Self::Timeout | Self::ConnectionDropped)
}
/// Whether the error happened because the channel was closed.
pub const fn is_channel_closed(&self) -> bool {
matches!(self, Self::ChannelClosed)
}
}
impl<T> From<mpsc::error::SendError<T>> for RequestError {
fn from(_: mpsc::error::SendError<T>) -> Self {
Self::ChannelClosed
}
}
impl From<oneshot::error::RecvError> for RequestError {
fn from(_: oneshot::error::RecvError) -> Self {
Self::ChannelClosed
}
}
/// The download result type
pub type DownloadResult<T> = Result<T, DownloadError>;
/// The downloader error type
#[derive(Debug, Clone, Display, Error)]
pub enum DownloadError {
/* ==================== HEADER ERRORS ==================== */
/// Header validation failed.
#[display("failed to validate header {hash}, block number {number}: {error}")]
HeaderValidation {
/// Hash of header failing validation
hash: B256,
/// Number of header failing validation
number: u64,
/// The details of validation failure
#[error(source)]
error: Box<ConsensusError>,
},
/// Received an invalid tip.
#[display("received invalid tip: {_0}")]
InvalidTip(GotExpectedBoxed<B256>),
/// Received a tip with an invalid tip number.
#[display("received invalid tip number: {_0}")]
InvalidTipNumber(GotExpected<u64>),
/// Received a response to a request with unexpected start block
#[display("headers response starts at unexpected block: {_0}")]
HeadersResponseStartBlockMismatch(GotExpected<u64>),
/// Received headers with less than expected items.
#[display("received less headers than expected: {_0}")]
HeadersResponseTooShort(GotExpected<u64>),
/* ==================== BODIES ERRORS ==================== */
/// Block validation failed
#[display("failed to validate body for header {hash}, block number {number}: {error}")]
BodyValidation {
/// Hash of the block failing validation
hash: B256,
/// Number of the block failing validation
number: u64,
/// The details of validation failure
error: Box<ConsensusError>,
},
/// Received more bodies than requested.
#[display("received more bodies than requested: {_0}")]
TooManyBodies(GotExpected<usize>),
/// Headers missing from the database.
#[display("header missing from the database: {block_number}")]
MissingHeader {
/// Missing header block number.
block_number: BlockNumber,
},
/// Body range invalid
#[display("requested body range is invalid: {range:?}")]
InvalidBodyRange {
/// Invalid block number range.
range: RangeInclusive<BlockNumber>,
},
/* ==================== COMMON ERRORS ==================== */
/// Timed out while waiting for request id response.
#[display("timed out while waiting for response")]
Timeout,
/// Received empty response while expecting non empty
#[display("received empty response")]
EmptyResponse,
/// Error while executing the request.
RequestError(RequestError),
/// Provider error.
Provider(ProviderError),
}
impl From<DatabaseError> for DownloadError {
fn from(error: DatabaseError) -> Self {
Self::Provider(ProviderError::Database(error))
}
}
impl From<RequestError> for DownloadError {
fn from(error: RequestError) -> Self {
Self::RequestError(error)
}
}
impl From<ProviderError> for DownloadError {
fn from(error: ProviderError) -> Self {
Self::Provider(error)
}
}
#[cfg(test)]
mod tests {
use alloy_consensus::Header;
use super::*;
#[test]
fn test_is_likely_bad_headers_response() {
let request =
HeadersRequest { start: 0u64.into(), limit: 0, direction: Default::default() };
let headers: Vec<Header> = vec![];
assert!(!Ok(headers).is_likely_bad_headers_response(&request));
let request =
HeadersRequest { start: 0u64.into(), limit: 1, direction: Default::default() };
let headers: Vec<Header> = vec![];
assert!(Ok(headers).is_likely_bad_headers_response(&request));
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/p2p/src/either.rs | crates/net/p2p/src/either.rs | //! Support for different download types.
use std::ops::RangeInclusive;
use crate::{
bodies::client::BodiesClient,
download::DownloadClient,
headers::client::{HeadersClient, HeadersRequest},
priority::Priority,
};
use alloy_primitives::B256;
pub use futures::future::Either;
impl<A, B> DownloadClient for Either<A, B>
where
A: DownloadClient,
B: DownloadClient,
{
fn report_bad_message(&self, peer_id: reth_network_peers::PeerId) {
match self {
Self::Left(a) => a.report_bad_message(peer_id),
Self::Right(b) => b.report_bad_message(peer_id),
}
}
fn num_connected_peers(&self) -> usize {
match self {
Self::Left(a) => a.num_connected_peers(),
Self::Right(b) => b.num_connected_peers(),
}
}
}
impl<A, B> BodiesClient for Either<A, B>
where
A: BodiesClient,
B: BodiesClient<Body = A::Body>,
{
type Body = A::Body;
type Output = Either<A::Output, B::Output>;
fn get_block_bodies_with_priority_and_range_hint(
&self,
hashes: Vec<B256>,
priority: Priority,
range_hint: Option<RangeInclusive<u64>>,
) -> Self::Output {
match self {
Self::Left(a) => Either::Left(
a.get_block_bodies_with_priority_and_range_hint(hashes, priority, range_hint),
),
Self::Right(b) => Either::Right(
b.get_block_bodies_with_priority_and_range_hint(hashes, priority, range_hint),
),
}
}
}
impl<A, B> HeadersClient for Either<A, B>
where
A: HeadersClient,
B: HeadersClient<Header = A::Header>,
{
type Header = A::Header;
type Output = Either<A::Output, B::Output>;
fn get_headers_with_priority(
&self,
request: HeadersRequest,
priority: Priority,
) -> Self::Output {
match self {
Self::Left(a) => Either::Left(a.get_headers_with_priority(request, priority)),
Self::Right(b) => Either::Right(b.get_headers_with_priority(request, priority)),
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/p2p/src/priority.rs | crates/net/p2p/src/priority.rs | /// `BlockHeader` and `BodyHeader` `DownloadRequest` priority
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
pub enum Priority {
/// Queued from the back for download requests.
#[default]
Normal,
/// Queued from the front for download requests.
High,
}
impl Priority {
/// Returns `true` if this is [`Priority::High`]
pub const fn is_high(&self) -> bool {
matches!(self, Self::High)
}
/// Returns `true` if this is [`Priority::Normal`]
pub const fn is_normal(&self) -> bool {
matches!(self, Self::Normal)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/p2p/src/bodies/response.rs | crates/net/p2p/src/bodies/response.rs | use alloy_consensus::BlockHeader;
use alloy_primitives::{BlockNumber, U256};
use reth_primitives_traits::{Block, InMemorySize, SealedBlock, SealedHeader};
/// The block response
#[derive(PartialEq, Eq, Debug, Clone)]
pub enum BlockResponse<B: Block> {
/// Full block response (with transactions or ommers)
Full(SealedBlock<B>),
/// The empty block response
Empty(SealedHeader<B::Header>),
}
impl<B> BlockResponse<B>
where
B: Block,
{
/// Return the block number
pub fn block_number(&self) -> BlockNumber {
match self {
Self::Full(block) => block.number(),
Self::Empty(header) => header.number(),
}
}
/// Return the difficulty of the response header
pub fn difficulty(&self) -> U256 {
match self {
Self::Full(block) => block.difficulty(),
Self::Empty(header) => header.difficulty(),
}
}
/// Return the reference to the response body
pub fn into_body(self) -> Option<B::Body> {
match self {
Self::Full(block) => Some(block.into_body()),
Self::Empty(_) => None,
}
}
}
impl<B: Block> InMemorySize for BlockResponse<B> {
#[inline]
fn size(&self) -> usize {
match self {
Self::Full(block) => SealedBlock::size(block),
Self::Empty(header) => SealedHeader::size(header),
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/p2p/src/bodies/client.rs | crates/net/p2p/src/bodies/client.rs | use std::{
ops::RangeInclusive,
pin::Pin,
task::{ready, Context, Poll},
};
use crate::{download::DownloadClient, error::PeerRequestResult, priority::Priority};
use alloy_primitives::B256;
use futures::{Future, FutureExt};
use reth_primitives_traits::BlockBody;
/// The bodies future type
pub type BodiesFut<B = reth_ethereum_primitives::BlockBody> =
Pin<Box<dyn Future<Output = PeerRequestResult<Vec<B>>> + Send + Sync>>;
/// A client capable of downloading block bodies.
#[auto_impl::auto_impl(&, Arc, Box)]
pub trait BodiesClient: DownloadClient {
/// The body type this client fetches.
type Body: BlockBody;
/// The output of the request future for querying block bodies.
type Output: Future<Output = PeerRequestResult<Vec<Self::Body>>> + Sync + Send + Unpin;
/// Fetches the block body for the requested block.
fn get_block_bodies(&self, hashes: Vec<B256>) -> Self::Output {
self.get_block_bodies_with_priority(hashes, Priority::Normal)
}
/// Fetches the block body for the requested block with priority
fn get_block_bodies_with_priority(
&self,
hashes: Vec<B256>,
priority: Priority,
) -> Self::Output {
self.get_block_bodies_with_priority_and_range_hint(hashes, priority, None)
}
/// Fetches the block body for the requested block with priority and a range hint for the
/// requested blocks.
///
/// The range hint is not required, but can be used to optimize the routing of the request if
/// the hashes are continuous or close together and the range hint is `[earliest, latest]` for
/// the requested blocks.
fn get_block_bodies_with_priority_and_range_hint(
&self,
hashes: Vec<B256>,
priority: Priority,
range_hint: Option<RangeInclusive<u64>>,
) -> Self::Output;
/// Fetches a single block body for the requested hash.
fn get_block_body(&self, hash: B256) -> SingleBodyRequest<Self::Output> {
self.get_block_body_with_priority(hash, Priority::Normal)
}
/// Fetches a single block body for the requested hash with priority
fn get_block_body_with_priority(
&self,
hash: B256,
priority: Priority,
) -> SingleBodyRequest<Self::Output> {
let fut = self.get_block_bodies_with_priority(vec![hash], priority);
SingleBodyRequest { fut }
}
}
/// A Future that resolves to a single block body.
#[derive(Debug)]
#[must_use = "futures do nothing unless polled"]
pub struct SingleBodyRequest<Fut> {
fut: Fut,
}
impl<Fut, B> Future for SingleBodyRequest<Fut>
where
Fut: Future<Output = PeerRequestResult<Vec<B>>> + Sync + Send + Unpin,
{
type Output = PeerRequestResult<Option<B>>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let resp = ready!(self.get_mut().fut.poll_unpin(cx));
let resp = resp.map(|res| res.map(|bodies| bodies.into_iter().next()));
Poll::Ready(resp)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/p2p/src/bodies/downloader.rs | crates/net/p2p/src/bodies/downloader.rs | use super::response::BlockResponse;
use crate::error::DownloadResult;
use alloy_primitives::BlockNumber;
use futures::Stream;
use reth_primitives_traits::Block;
use std::ops::RangeInclusive;
/// Body downloader return type.
pub type BodyDownloaderResult<B> = DownloadResult<Vec<BlockResponse<B>>>;
/// A downloader capable of fetching and yielding block bodies from block headers.
///
/// A downloader represents a distinct strategy for submitting requests to download block bodies,
/// while a [`BodiesClient`][crate::bodies::client::BodiesClient] represents a client capable of
/// fulfilling these requests.
pub trait BodyDownloader:
Send + Sync + Stream<Item = BodyDownloaderResult<Self::Block>> + Unpin
{
/// The Block type this downloader supports
type Block: Block + 'static;
/// Method for setting the download range.
fn set_download_range(&mut self, range: RangeInclusive<BlockNumber>) -> DownloadResult<()>;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/p2p/src/bodies/mod.rs | crates/net/p2p/src/bodies/mod.rs | /// Traits and types for block body clients.
pub mod client;
/// Block body downloaders.
pub mod downloader;
/// Block response
pub mod response;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/p2p/src/snap/client.rs | crates/net/p2p/src/snap/client.rs | use crate::{download::DownloadClient, error::PeerRequestResult, priority::Priority};
use futures::Future;
use reth_eth_wire_types::snap::{
AccountRangeMessage, GetAccountRangeMessage, GetByteCodesMessage, GetStorageRangesMessage,
GetTrieNodesMessage,
};
/// The snap sync downloader client
#[auto_impl::auto_impl(&, Arc, Box)]
pub trait SnapClient: DownloadClient {
/// The output future type for account range requests
type Output: Future<Output = PeerRequestResult<AccountRangeMessage>> + Send + Sync + Unpin;
/// Sends the account range request to the p2p network and returns the account range
/// response received from a peer.
fn get_account_range(&self, request: GetAccountRangeMessage) -> Self::Output {
self.get_account_range_with_priority(request, Priority::Normal)
}
/// Sends the account range request to the p2p network with priority set and returns
/// the account range response received from a peer.
fn get_account_range_with_priority(
&self,
request: GetAccountRangeMessage,
priority: Priority,
) -> Self::Output;
/// Sends the storage ranges request to the p2p network and returns the storage ranges
/// response received from a peer.
fn get_storage_ranges(&self, request: GetStorageRangesMessage) -> Self::Output;
/// Sends the storage ranges request to the p2p network with priority set and returns
/// the storage ranges response received from a peer.
fn get_storage_ranges_with_priority(
&self,
request: GetStorageRangesMessage,
priority: Priority,
) -> Self::Output;
/// Sends the byte codes request to the p2p network and returns the byte codes
/// response received from a peer.
fn get_byte_codes(&self, request: GetByteCodesMessage) -> Self::Output;
/// Sends the byte codes request to the p2p network with priority set and returns
/// the byte codes response received from a peer.
fn get_byte_codes_with_priority(
&self,
request: GetByteCodesMessage,
priority: Priority,
) -> Self::Output;
/// Sends the trie nodes request to the p2p network and returns the trie nodes
/// response received from a peer.
fn get_trie_nodes(&self, request: GetTrieNodesMessage) -> Self::Output;
/// Sends the trie nodes request to the p2p network with priority set and returns
/// the trie nodes response received from a peer.
fn get_trie_nodes_with_priority(
&self,
request: GetTrieNodesMessage,
priority: Priority,
) -> Self::Output;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/p2p/src/snap/mod.rs | crates/net/p2p/src/snap/mod.rs | /// SNAP related traits.
pub mod client;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/p2p/src/headers/client.rs | crates/net/p2p/src/headers/client.rs | use crate::{download::DownloadClient, error::PeerRequestResult, priority::Priority};
use alloy_consensus::Header;
use alloy_eips::BlockHashOrNumber;
use futures::{Future, FutureExt};
pub use reth_eth_wire_types::{BlockHeaders, HeadersDirection};
use reth_primitives_traits::BlockHeader;
use std::{
fmt::Debug,
pin::Pin,
task::{ready, Context, Poll},
};
/// The header request struct to be sent to connected peers, which
/// will proceed to ask them to stream the requested headers to us.
#[derive(Clone, Debug)]
pub struct HeadersRequest {
/// The starting block
pub start: BlockHashOrNumber,
/// The response max size
pub limit: u64,
/// The direction in which headers should be returned.
pub direction: HeadersDirection,
}
impl HeadersRequest {
/// Creates a request for a single header (direction doesn't matter).
///
/// # Arguments
/// * `start` - The block hash or number to start from
pub const fn one(start: BlockHashOrNumber) -> Self {
Self { direction: HeadersDirection::Rising, limit: 1, start }
}
/// Creates a request for headers in rising direction (ascending block numbers).
///
/// # Arguments
/// * `start` - The block hash or number to start from
/// * `limit` - Maximum number of headers to retrieve
pub const fn rising(start: BlockHashOrNumber, limit: u64) -> Self {
Self { direction: HeadersDirection::Rising, limit, start }
}
/// Creates a request for headers in falling direction (descending block numbers).
///
/// # Arguments
/// * `start` - The block hash or number to start from
/// * `limit` - Maximum number of headers to retrieve
pub const fn falling(start: BlockHashOrNumber, limit: u64) -> Self {
Self { direction: HeadersDirection::Falling, limit, start }
}
}
/// The headers future type
pub type HeadersFut<H = Header> =
Pin<Box<dyn Future<Output = PeerRequestResult<Vec<H>>> + Send + Sync>>;
/// The block headers downloader client
#[auto_impl::auto_impl(&, Arc, Box)]
pub trait HeadersClient: DownloadClient {
/// The header type this client fetches.
type Header: BlockHeader;
/// The headers future type
type Output: Future<Output = PeerRequestResult<Vec<Self::Header>>> + Sync + Send + Unpin;
/// Sends the header request to the p2p network and returns the header response received from a
/// peer.
fn get_headers(&self, request: HeadersRequest) -> Self::Output {
self.get_headers_with_priority(request, Priority::Normal)
}
/// Sends the header request to the p2p network with priority set and returns the header
/// response received from a peer.
fn get_headers_with_priority(
&self,
request: HeadersRequest,
priority: Priority,
) -> Self::Output;
/// Fetches a single header for the requested number or hash.
fn get_header(&self, start: BlockHashOrNumber) -> SingleHeaderRequest<Self::Output> {
self.get_header_with_priority(start, Priority::Normal)
}
/// Fetches a single header for the requested number or hash with priority
fn get_header_with_priority(
&self,
start: BlockHashOrNumber,
priority: Priority,
) -> SingleHeaderRequest<Self::Output> {
let req = HeadersRequest::one(start);
let fut = self.get_headers_with_priority(req, priority);
SingleHeaderRequest { fut }
}
}
/// A Future that resolves to a single block body.
///
/// Returns `None` if the peer responded with an empty header response.
#[derive(Debug)]
#[must_use = "futures do nothing unless polled"]
pub struct SingleHeaderRequest<Fut> {
fut: Fut,
}
impl<Fut, H> Future for SingleHeaderRequest<Fut>
where
Fut: Future<Output = PeerRequestResult<Vec<H>>> + Sync + Send + Unpin,
{
type Output = PeerRequestResult<Option<H>>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let resp = ready!(self.get_mut().fut.poll_unpin(cx));
let resp = resp.map(|res| res.map(|headers| headers.into_iter().next()));
Poll::Ready(resp)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/p2p/src/headers/downloader.rs | crates/net/p2p/src/headers/downloader.rs | use super::error::HeadersDownloaderResult;
use crate::error::{DownloadError, DownloadResult};
use alloy_eips::{eip1898::BlockWithParent, BlockHashOrNumber};
use alloy_primitives::{Sealable, B256};
use futures::Stream;
use reth_consensus::HeaderValidator;
use reth_primitives_traits::{BlockHeader, Header, SealedHeader};
use std::fmt::Debug;
/// A downloader capable of fetching and yielding block headers.
///
/// A downloader represents a distinct strategy for submitting requests to download block headers,
/// while a [`HeadersClient`][crate::headers::client::HeadersClient] represents a client capable
/// of fulfilling these requests.
///
/// A [`HeaderDownloader`] is a [Stream] that returns batches of headers.
pub trait HeaderDownloader:
Send
+ Sync
+ Stream<Item = HeadersDownloaderResult<Vec<SealedHeader<Self::Header>>, Self::Header>>
+ Unpin
{
/// The header type being downloaded.
type Header: Sealable + Debug + Send + Sync + Unpin + 'static;
/// Updates the gap to sync which ranges from local head to the sync target.
///
/// See also [`HeaderDownloader::update_sync_target`] and
/// [`HeaderDownloader::update_local_head`]
fn update_sync_gap(&mut self, head: SealedHeader<Self::Header>, target: SyncTarget) {
self.update_local_head(head);
self.update_sync_target(target);
}
/// Updates the block number of the local database
fn update_local_head(&mut self, head: SealedHeader<Self::Header>);
/// Updates the target we want to sync to.
fn update_sync_target(&mut self, target: SyncTarget);
/// Sets the headers batch size that the Stream should return.
fn set_batch_size(&mut self, limit: usize);
}
/// Specifies the target to sync for [`HeaderDownloader::update_sync_target`]
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum SyncTarget {
/// This represents a range missing headers in the form of `(head,..`
///
/// Sync _inclusively_ to the given block hash.
///
/// This target specifies the upper end of the sync gap `(head...tip]`
Tip(B256),
/// This represents a gap missing headers bounded by the given header `h` in the form of
/// `(head,..h),h+1,h+2...`
///
/// Sync _exclusively_ to the given header's parent which is: `(head..h-1]`
///
/// The benefit of this variant is, that this already provides the block number of the highest
/// missing block.
Gap(BlockWithParent),
/// This represents a tip by block number
TipNum(u64),
}
// === impl SyncTarget ===
impl SyncTarget {
/// Returns the tip to sync to _inclusively_
///
/// This returns the hash if the target is [`SyncTarget::Tip`] or the `parent_hash` of the given
/// header in [`SyncTarget::Gap`]
pub fn tip(&self) -> BlockHashOrNumber {
match self {
Self::Tip(tip) => (*tip).into(),
Self::Gap(gap) => gap.parent.into(),
Self::TipNum(num) => (*num).into(),
}
}
}
/// Represents a gap to sync: from `local_head` to `target`
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct HeaderSyncGap<H: Sealable = Header> {
/// The local head block. Represents lower bound of sync range.
pub local_head: SealedHeader<H>,
/// The sync target. Represents upper bound of sync range.
pub target: SyncTarget,
}
impl<H: BlockHeader + Sealable> HeaderSyncGap<H> {
/// Returns `true` if the gap from the head to the target was closed
#[inline]
pub fn is_closed(&self) -> bool {
match self.target.tip() {
BlockHashOrNumber::Hash(hash) => self.local_head.hash() == hash,
BlockHashOrNumber::Number(num) => self.local_head.number() == num,
}
}
}
/// Validate whether the header is valid in relation to it's parent
///
/// Returns Ok(false) if the
pub fn validate_header_download<H: BlockHeader>(
consensus: &dyn HeaderValidator<H>,
header: &SealedHeader<H>,
parent: &SealedHeader<H>,
) -> DownloadResult<()> {
// validate header against parent
consensus.validate_header_against_parent(header, parent).map_err(|error| {
DownloadError::HeaderValidation {
hash: header.hash(),
number: header.number(),
error: Box::new(error),
}
})?;
// validate header standalone
consensus.validate_header(header).map_err(|error| DownloadError::HeaderValidation {
hash: header.hash(),
number: header.number(),
error: Box::new(error),
})?;
Ok(())
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/p2p/src/headers/error.rs | crates/net/p2p/src/headers/error.rs | use alloy_primitives::Sealable;
use derive_more::{Display, Error};
use reth_consensus::ConsensusError;
use reth_primitives_traits::SealedHeader;
/// Header downloader result
pub type HeadersDownloaderResult<T, H> = Result<T, HeadersDownloaderError<H>>;
/// Error variants that can happen when sending requests to a session.
#[derive(Debug, Clone, Eq, PartialEq, Display, Error)]
pub enum HeadersDownloaderError<H: Sealable> {
/// The downloaded header cannot be attached to the local head,
/// but is valid otherwise.
#[display("valid downloaded header cannot be attached to the local head: {error}")]
DetachedHead {
/// The local head we attempted to attach to.
local_head: Box<SealedHeader<H>>,
/// The header we attempted to attach.
header: Box<SealedHeader<H>>,
/// The error that occurred when attempting to attach the header.
#[error(source)]
error: Box<ConsensusError>,
},
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/p2p/src/headers/mod.rs | crates/net/p2p/src/headers/mod.rs | /// Trait definition for [`HeadersClient`]
///
/// [`HeadersClient`]: client::HeadersClient
pub mod client;
/// A downloader that receives and verifies block headers, is generic
/// over the Consensus and the `HeadersClient` being used.
///
/// [`Consensus`]: reth_consensus::Consensus
/// [`HeadersClient`]: client::HeadersClient
pub mod downloader;
/// Header downloader error.
pub mod error;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/p2p/src/test_utils/headers.rs | crates/net/p2p/src/test_utils/headers.rs | //! Testing support for headers related interfaces.
use crate::{
download::DownloadClient,
error::{DownloadError, DownloadResult, PeerRequestResult, RequestError},
headers::{
client::{HeadersClient, HeadersRequest},
downloader::{HeaderDownloader, SyncTarget},
error::HeadersDownloaderResult,
},
priority::Priority,
};
use alloy_consensus::Header;
use futures::{Future, FutureExt, Stream, StreamExt};
use reth_eth_wire_types::HeadersDirection;
use reth_network_peers::{PeerId, WithPeerId};
use reth_primitives_traits::SealedHeader;
use std::{
fmt,
pin::Pin,
sync::{
atomic::{AtomicU64, Ordering},
Arc,
},
task::{ready, Context, Poll},
};
use tokio::sync::Mutex;
/// A test downloader which just returns the values that have been pushed to it.
#[derive(Debug)]
pub struct TestHeaderDownloader {
client: TestHeadersClient,
limit: u64,
download: Option<TestDownload>,
queued_headers: Vec<SealedHeader>,
batch_size: usize,
}
impl TestHeaderDownloader {
/// Instantiates the downloader with the mock responses
pub const fn new(client: TestHeadersClient, limit: u64, batch_size: usize) -> Self {
Self { client, limit, download: None, batch_size, queued_headers: Vec::new() }
}
fn create_download(&self) -> TestDownload {
TestDownload {
client: self.client.clone(),
limit: self.limit,
fut: None,
buffer: vec![],
done: false,
}
}
}
impl HeaderDownloader for TestHeaderDownloader {
type Header = Header;
fn update_local_head(&mut self, _head: SealedHeader) {}
fn update_sync_target(&mut self, _target: SyncTarget) {}
fn set_batch_size(&mut self, limit: usize) {
self.batch_size = limit;
}
}
impl Stream for TestHeaderDownloader {
type Item = HeadersDownloaderResult<Vec<SealedHeader>, Header>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = self.get_mut();
loop {
if this.queued_headers.len() == this.batch_size {
return Poll::Ready(Some(Ok(std::mem::take(&mut this.queued_headers))))
}
if this.download.is_none() {
this.download = Some(this.create_download());
}
match ready!(this.download.as_mut().unwrap().poll_next_unpin(cx)) {
None => return Poll::Ready(Some(Ok(std::mem::take(&mut this.queued_headers)))),
Some(header) => this.queued_headers.push(header.unwrap()),
}
}
}
}
type TestHeadersFut = Pin<Box<dyn Future<Output = PeerRequestResult<Vec<Header>>> + Sync + Send>>;
struct TestDownload {
client: TestHeadersClient,
limit: u64,
fut: Option<TestHeadersFut>,
buffer: Vec<SealedHeader>,
done: bool,
}
impl TestDownload {
fn get_or_init_fut(&mut self) -> &mut TestHeadersFut {
if self.fut.is_none() {
let request = HeadersRequest {
limit: self.limit,
direction: HeadersDirection::Rising,
start: 0u64.into(), // ignored
};
let client = self.client.clone();
self.fut = Some(Box::pin(client.get_headers(request)));
}
self.fut.as_mut().unwrap()
}
}
impl fmt::Debug for TestDownload {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("TestDownload")
.field("client", &self.client)
.field("limit", &self.limit)
.field("buffer", &self.buffer)
.field("done", &self.done)
.finish_non_exhaustive()
}
}
impl Stream for TestDownload {
type Item = DownloadResult<SealedHeader>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = self.get_mut();
loop {
if let Some(header) = this.buffer.pop() {
return Poll::Ready(Some(Ok(header)))
} else if this.done {
return Poll::Ready(None)
}
match ready!(this.get_or_init_fut().poll_unpin(cx)) {
Ok(resp) => {
// Skip head and seal headers
let mut headers =
resp.1.into_iter().skip(1).map(SealedHeader::seal_slow).collect::<Vec<_>>();
headers.sort_unstable_by_key(|h| h.number);
for h in headers {
this.buffer.push(h);
}
this.done = true;
}
Err(err) => {
this.done = true;
return Poll::Ready(Some(Err(match err {
RequestError::Timeout => DownloadError::Timeout,
_ => DownloadError::RequestError(err),
})))
}
}
}
}
}
/// A test client for fetching headers
#[derive(Debug, Default, Clone)]
pub struct TestHeadersClient {
responses: Arc<Mutex<Vec<Header>>>,
error: Arc<Mutex<Option<RequestError>>>,
request_attempts: Arc<AtomicU64>,
}
impl TestHeadersClient {
/// Return the number of times client was polled
pub fn request_attempts(&self) -> u64 {
self.request_attempts.load(Ordering::SeqCst)
}
/// Adds headers to the set.
pub async fn extend(&self, headers: impl IntoIterator<Item = Header>) {
let mut lock = self.responses.lock().await;
lock.extend(headers);
}
/// Clears the set.
pub async fn clear(&self) {
let mut lock = self.responses.lock().await;
lock.clear();
}
/// Set response error
pub async fn set_error(&self, err: RequestError) {
let mut lock = self.error.lock().await;
lock.replace(err);
}
}
impl DownloadClient for TestHeadersClient {
fn report_bad_message(&self, _peer_id: PeerId) {
// noop
}
fn num_connected_peers(&self) -> usize {
0
}
}
impl HeadersClient for TestHeadersClient {
type Header = Header;
type Output = TestHeadersFut;
fn get_headers_with_priority(
&self,
request: HeadersRequest,
_priority: Priority,
) -> Self::Output {
let responses = self.responses.clone();
let error = self.error.clone();
self.request_attempts.fetch_add(1, Ordering::SeqCst);
Box::pin(async move {
if let Some(err) = &mut *error.lock().await {
return Err(err.clone())
}
let mut lock = responses.lock().await;
let len = lock.len().min(request.limit as usize);
let resp = lock.drain(..len).collect();
let with_peer_id = WithPeerId::from((PeerId::default(), resp));
Ok(with_peer_id)
})
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/p2p/src/test_utils/full_block.rs | crates/net/p2p/src/test_utils/full_block.rs | use crate::{
bodies::client::BodiesClient,
download::DownloadClient,
error::PeerRequestResult,
headers::client::{HeadersClient, HeadersRequest},
priority::Priority,
BlockClient,
};
use alloy_consensus::Header;
use alloy_eips::{BlockHashOrNumber, BlockNumHash};
use alloy_primitives::B256;
use parking_lot::Mutex;
use reth_eth_wire_types::HeadersDirection;
use reth_ethereum_primitives::{Block, BlockBody};
use reth_network_peers::{PeerId, WithPeerId};
use reth_primitives_traits::{SealedBlock, SealedHeader};
use std::{collections::HashMap, ops::RangeInclusive, sync::Arc};
/// A headers+bodies client that stores the headers and bodies in memory, with an artificial soft
/// bodies response limit that is set to 20 by default.
///
/// This full block client can be [Clone]d and shared between multiple tasks.
#[derive(Clone, Debug)]
pub struct TestFullBlockClient {
headers: Arc<Mutex<HashMap<B256, Header>>>,
bodies: Arc<Mutex<HashMap<B256, BlockBody>>>,
// soft response limit, max number of bodies to respond with
soft_limit: usize,
}
impl Default for TestFullBlockClient {
fn default() -> Self {
Self {
headers: Arc::new(Mutex::new(HashMap::default())),
bodies: Arc::new(Mutex::new(HashMap::default())),
soft_limit: 20,
}
}
}
impl TestFullBlockClient {
/// Insert a header and body into the client maps.
pub fn insert(&self, header: SealedHeader, body: BlockBody) {
let hash = header.hash();
self.headers.lock().insert(hash, header.unseal());
self.bodies.lock().insert(hash, body);
}
/// Set the soft response limit.
pub const fn set_soft_limit(&mut self, limit: usize) {
self.soft_limit = limit;
}
/// Get the block with the highest block number.
pub fn highest_block(&self) -> Option<SealedBlock<Block>> {
self.headers.lock().iter().max_by_key(|(_, header)| header.number).and_then(
|(hash, header)| {
self.bodies.lock().get(hash).map(|body| {
SealedBlock::from_parts_unchecked(header.clone(), body.clone(), *hash)
})
},
)
}
}
impl DownloadClient for TestFullBlockClient {
/// Reports a bad message from a specific peer.
fn report_bad_message(&self, _peer_id: PeerId) {}
/// Retrieves the number of connected peers.
///
/// Returns the number of connected peers in the test scenario (1).
fn num_connected_peers(&self) -> usize {
1
}
}
/// Implements the `HeadersClient` trait for the `TestFullBlockClient` struct.
impl HeadersClient for TestFullBlockClient {
type Header = Header;
/// Specifies the associated output type.
type Output = futures::future::Ready<PeerRequestResult<Vec<Header>>>;
/// Retrieves headers with a given priority level.
///
/// # Arguments
///
/// * `request` - A `HeadersRequest` indicating the headers to retrieve.
/// * `_priority` - A `Priority` level for the request.
///
/// # Returns
///
/// A `Ready` future containing a `PeerRequestResult` with a vector of retrieved headers.
fn get_headers_with_priority(
&self,
request: HeadersRequest,
_priority: Priority,
) -> Self::Output {
let headers = self.headers.lock();
// Initializes the block hash or number.
let mut block: BlockHashOrNumber = match request.start {
BlockHashOrNumber::Hash(hash) => headers.get(&hash).cloned(),
BlockHashOrNumber::Number(num) => headers.values().find(|h| h.number == num).cloned(),
}
.map(|h| h.number.into())
.unwrap();
// Retrieves headers based on the provided limit and request direction.
let resp = (0..request.limit)
.filter_map(|_| {
headers.iter().find_map(|(hash, header)| {
// Checks if the header matches the specified block or number.
BlockNumHash::new(header.number, *hash).matches_block_or_num(&block).then(
|| {
match request.direction {
HeadersDirection::Falling => block = header.parent_hash.into(),
HeadersDirection::Rising => block = (header.number + 1).into(),
}
header.clone()
},
)
})
})
.collect::<Vec<_>>();
// Returns a future containing the retrieved headers with a random peer ID.
futures::future::ready(Ok(WithPeerId::new(PeerId::random(), resp)))
}
}
/// Implements the `BodiesClient` trait for the `TestFullBlockClient` struct.
impl BodiesClient for TestFullBlockClient {
type Body = BlockBody;
/// Defines the output type of the function.
type Output = futures::future::Ready<PeerRequestResult<Vec<BlockBody>>>;
/// Retrieves block bodies corresponding to provided hashes with a given priority.
///
/// # Arguments
///
/// * `hashes` - A vector of block hashes to retrieve bodies for.
/// * `_priority` - Priority level for block body retrieval (unused in this implementation).
///
/// # Returns
///
/// A future containing the result of the block body retrieval operation.
fn get_block_bodies_with_priority_and_range_hint(
&self,
hashes: Vec<B256>,
_priority: Priority,
_range_hint: Option<RangeInclusive<u64>>,
) -> Self::Output {
// Acquire a lock on the bodies.
let bodies = self.bodies.lock();
// Create a future that immediately returns the result of the block body retrieval
// operation.
futures::future::ready(Ok(WithPeerId::new(
PeerId::random(),
hashes
.iter()
.filter_map(|hash| bodies.get(hash).cloned())
.take(self.soft_limit)
.collect(),
)))
}
}
impl BlockClient for TestFullBlockClient {
type Block = reth_ethereum_primitives::Block;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/p2p/src/test_utils/bodies.rs | crates/net/p2p/src/test_utils/bodies.rs | use crate::{
bodies::client::{BodiesClient, BodiesFut},
download::DownloadClient,
error::PeerRequestResult,
priority::Priority,
};
use alloy_primitives::B256;
use futures::FutureExt;
use reth_ethereum_primitives::BlockBody;
use reth_network_peers::PeerId;
use std::{
fmt::{Debug, Formatter},
ops::RangeInclusive,
};
use tokio::sync::oneshot;
/// A test client for fetching bodies
pub struct TestBodiesClient<F> {
/// The function that is called on each body request.
pub responder: F,
}
impl<F> Debug for TestBodiesClient<F> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("TestBodiesClient").finish_non_exhaustive()
}
}
impl<F: Sync + Send> DownloadClient for TestBodiesClient<F> {
fn report_bad_message(&self, _peer_id: PeerId) {
// noop
}
fn num_connected_peers(&self) -> usize {
0
}
}
impl<F> BodiesClient for TestBodiesClient<F>
where
F: Fn(Vec<B256>) -> PeerRequestResult<Vec<BlockBody>> + Send + Sync,
{
type Body = BlockBody;
type Output = BodiesFut;
fn get_block_bodies_with_priority_and_range_hint(
&self,
hashes: Vec<B256>,
_priority: Priority,
_range_hint: Option<RangeInclusive<u64>>,
) -> Self::Output {
let (tx, rx) = oneshot::channel();
let _ = tx.send((self.responder)(hashes));
Box::pin(rx.map(|x| match x {
Ok(value) => value,
Err(err) => Err(err.into()),
}))
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/p2p/src/test_utils/mod.rs | crates/net/p2p/src/test_utils/mod.rs | mod bodies;
mod full_block;
mod headers;
pub use bodies::*;
pub use full_block::*;
pub use headers::*;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/discv5/src/config.rs | crates/net/discv5/src/config.rs | //! Wrapper around [`discv5::Config`].
use std::{
collections::HashSet,
fmt::Debug,
net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6},
};
use alloy_primitives::Bytes;
use derive_more::Display;
use discv5::{
multiaddr::{Multiaddr, Protocol},
ListenConfig,
};
use reth_ethereum_forks::{EnrForkIdEntry, ForkId};
use reth_network_peers::NodeRecord;
use tracing::debug;
use crate::{enr::discv4_id_to_multiaddr_id, filter::MustNotIncludeKeys, NetworkStackId};
/// The default address for discv5 via UDP is IPv4.
///
/// Default is 0.0.0.0, all interfaces. See [`discv5::ListenConfig`] default.
pub const DEFAULT_DISCOVERY_V5_ADDR: Ipv4Addr = Ipv4Addr::UNSPECIFIED;
/// The default IPv6 address for discv5 via UDP.
///
/// Default is ::, all interfaces.
pub const DEFAULT_DISCOVERY_V5_ADDR_IPV6: Ipv6Addr = Ipv6Addr::UNSPECIFIED;
/// The default port for discv5 via UDP.
///
/// Default is port 9200.
pub const DEFAULT_DISCOVERY_V5_PORT: u16 = 9200;
/// The default [`discv5::ListenConfig`].
///
/// This is different from the upstream default.
pub const DEFAULT_DISCOVERY_V5_LISTEN_CONFIG: ListenConfig =
ListenConfig::Ipv4 { ip: DEFAULT_DISCOVERY_V5_ADDR, port: DEFAULT_DISCOVERY_V5_PORT };
/// Default interval in seconds at which to run a lookup up query.
///
/// Default is 20 seconds.
pub const DEFAULT_SECONDS_LOOKUP_INTERVAL: u64 = 20;
/// Default number of times to do pulse lookup queries, at bootstrap (pulse intervals, defaulting
/// to 5 seconds).
///
/// Default is 200 counts.
pub const DEFAULT_COUNT_BOOTSTRAP_LOOKUPS: u64 = 200;
/// Default duration of the pulse lookup interval at bootstrap.
///
/// Default is 5 seconds.
pub const DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL: u64 = 5;
/// Builds a [`Config`].
#[derive(Debug)]
pub struct ConfigBuilder {
/// Config used by [`discv5::Discv5`]. Contains the discovery listen socket.
discv5_config: Option<discv5::Config>,
/// Nodes to boot from.
bootstrap_nodes: HashSet<BootNode>,
/// Fork kv-pair to set in local node record. Identifies which network/chain/fork the node
/// belongs, e.g. `(b"opstack", ChainId)` or `(b"eth", ForkId)`.
///
/// Defaults to L1 mainnet if not set.
fork: Option<(&'static [u8], ForkId)>,
/// `RLPx` TCP socket to advertise.
///
/// NOTE: IP address of `RLPx` socket overwrites IP address of same IP version in
/// [`discv5::ListenConfig`].
tcp_socket: SocketAddr,
/// List of `(key, rlp-encoded-value)` tuples that should be advertised in local node record
/// (in addition to tcp port, udp port and fork).
other_enr_kv_pairs: Vec<(&'static [u8], Bytes)>,
/// Interval in seconds at which to run a lookup up query to populate kbuckets.
lookup_interval: Option<u64>,
/// Interval in seconds at which to run pulse lookup queries at bootstrap to boost kbucket
/// population.
bootstrap_lookup_interval: Option<u64>,
/// Number of times to run boost lookup queries at start up.
bootstrap_lookup_countdown: Option<u64>,
/// Custom filter rules to apply to a discovered peer in order to determine if it should be
/// passed up to rlpx or dropped.
discovered_peer_filter: Option<MustNotIncludeKeys>,
}
impl ConfigBuilder {
/// Returns a new builder, with all fields set like given instance.
pub fn new_from(discv5_config: Config) -> Self {
let Config {
discv5_config,
bootstrap_nodes,
fork,
tcp_socket,
other_enr_kv_pairs,
lookup_interval,
bootstrap_lookup_interval,
bootstrap_lookup_countdown,
discovered_peer_filter,
} = discv5_config;
Self {
discv5_config: Some(discv5_config),
bootstrap_nodes,
fork: fork.map(|(key, fork_id)| (key, fork_id.fork_id)),
tcp_socket,
other_enr_kv_pairs,
lookup_interval: Some(lookup_interval),
bootstrap_lookup_interval: Some(bootstrap_lookup_interval),
bootstrap_lookup_countdown: Some(bootstrap_lookup_countdown),
discovered_peer_filter: Some(discovered_peer_filter),
}
}
/// Set [`discv5::Config`], which contains the [`discv5::Discv5`] listen socket.
pub fn discv5_config(mut self, discv5_config: discv5::Config) -> Self {
self.discv5_config = Some(discv5_config);
self
}
/// Adds multiple boot nodes from a list of [`Enr`](discv5::Enr)s.
pub fn add_signed_boot_nodes(mut self, nodes: impl IntoIterator<Item = discv5::Enr>) -> Self {
self.bootstrap_nodes.extend(nodes.into_iter().map(BootNode::Enr));
self
}
/// Parses a comma-separated list of serialized [`Enr`](discv5::Enr)s, signed node records, and
/// adds any successfully deserialized records to boot nodes. Note: this type is serialized in
/// CL format since [`discv5`] is originally a CL library.
pub fn add_cl_serialized_signed_boot_nodes(mut self, enrs: &str) -> Self {
let bootstrap_nodes = &mut self.bootstrap_nodes;
for node in enrs.split(&[',']).flat_map(|record| record.trim().parse::<discv5::Enr>()) {
bootstrap_nodes.insert(BootNode::Enr(node));
}
self
}
/// Adds boot nodes in the form a list of [`NodeRecord`]s, parsed enodes.
pub fn add_unsigned_boot_nodes(mut self, enodes: impl IntoIterator<Item = NodeRecord>) -> Self {
for node in enodes {
if let Ok(node) = BootNode::from_unsigned(node) {
self.bootstrap_nodes.insert(node);
}
}
self
}
/// Adds a comma-separated list of enodes, serialized unsigned node records, to boot nodes.
pub fn add_serialized_unsigned_boot_nodes(mut self, enodes: &[&str]) -> Self {
for node in enodes {
if let Ok(node) = node.parse() {
if let Ok(node) = BootNode::from_unsigned(node) {
self.bootstrap_nodes.insert(node);
}
}
}
self
}
/// Set fork ID kv-pair to set in local [`Enr`](discv5::enr::Enr). This lets peers on discovery
/// network know which chain this node belongs to.
pub const fn fork(mut self, fork_key: &'static [u8], fork_id: ForkId) -> Self {
self.fork = Some((fork_key, fork_id));
self
}
/// Sets the tcp socket to advertise in the local [`Enr`](discv5::enr::Enr). The IP address of
/// this socket will overwrite the discovery address of the same IP version, if one is
/// configured.
pub const fn tcp_socket(mut self, socket: SocketAddr) -> Self {
self.tcp_socket = socket;
self
}
/// Adds an additional kv-pair to include in the local [`Enr`](discv5::enr::Enr). Takes the key
/// to use for the kv-pair and the rlp encoded value.
pub fn add_enr_kv_pair(mut self, key: &'static [u8], value: Bytes) -> Self {
self.other_enr_kv_pairs.push((key, value));
self
}
/// Sets the interval at which to run lookup queries, in order to fill kbuckets. Lookup queries
/// are done periodically at the given interval for the whole run of the program.
pub const fn lookup_interval(mut self, seconds: u64) -> Self {
self.lookup_interval = Some(seconds);
self
}
/// Sets the interval at which to run boost lookup queries at start up. Queries will be started
/// at this interval for the configured number of times after start up.
pub const fn bootstrap_lookup_interval(mut self, seconds: u64) -> Self {
self.bootstrap_lookup_interval = Some(seconds);
self
}
/// Sets the number of times at which to run boost lookup queries to bootstrap the node.
pub const fn bootstrap_lookup_countdown(mut self, counts: u64) -> Self {
self.bootstrap_lookup_countdown = Some(counts);
self
}
/// Adds keys to disallow when filtering a discovered peer, to determine whether or not it
/// should be passed to rlpx. The discovered node record is scanned for any kv-pairs where the
/// key matches the disallowed keys. If not explicitly set, b"eth2" key will be disallowed.
pub fn must_not_include_keys(mut self, not_keys: &[&'static [u8]]) -> Self {
let mut filter = self.discovered_peer_filter.unwrap_or_default();
filter.add_disallowed_keys(not_keys);
self.discovered_peer_filter = Some(filter);
self
}
/// Returns a new [`Config`].
pub fn build(self) -> Config {
let Self {
discv5_config,
bootstrap_nodes,
fork,
tcp_socket,
other_enr_kv_pairs,
lookup_interval,
bootstrap_lookup_interval,
bootstrap_lookup_countdown,
discovered_peer_filter,
} = self;
let mut discv5_config = discv5_config.unwrap_or_else(|| {
discv5::ConfigBuilder::new(DEFAULT_DISCOVERY_V5_LISTEN_CONFIG).build()
});
discv5_config.listen_config =
amend_listen_config_wrt_rlpx(&discv5_config.listen_config, tcp_socket.ip());
let fork = fork.map(|(key, fork_id)| (key, fork_id.into()));
let lookup_interval = lookup_interval.unwrap_or(DEFAULT_SECONDS_LOOKUP_INTERVAL);
let bootstrap_lookup_interval =
bootstrap_lookup_interval.unwrap_or(DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL);
let bootstrap_lookup_countdown =
bootstrap_lookup_countdown.unwrap_or(DEFAULT_COUNT_BOOTSTRAP_LOOKUPS);
let discovered_peer_filter = discovered_peer_filter
.unwrap_or_else(|| MustNotIncludeKeys::new(&[NetworkStackId::ETH2]));
Config {
discv5_config,
bootstrap_nodes,
fork,
tcp_socket,
other_enr_kv_pairs,
lookup_interval,
bootstrap_lookup_interval,
bootstrap_lookup_countdown,
discovered_peer_filter,
}
}
}
/// Config used to bootstrap [`discv5::Discv5`].
#[derive(Clone, Debug)]
pub struct Config {
/// Config used by [`discv5::Discv5`]. Contains the [`ListenConfig`], with the discovery listen
/// socket.
pub(super) discv5_config: discv5::Config,
/// Nodes to boot from.
pub(super) bootstrap_nodes: HashSet<BootNode>,
/// Fork kv-pair to set in local node record. Identifies which network/chain/fork the node
/// belongs, e.g. `(b"opstack", ChainId)` or `(b"eth", [ForkId])`.
pub(super) fork: Option<(&'static [u8], EnrForkIdEntry)>,
/// `RLPx` TCP socket to advertise.
///
/// NOTE: IP address of `RLPx` socket overwrites IP address of same IP version in
/// [`discv5::ListenConfig`].
pub(super) tcp_socket: SocketAddr,
/// Additional kv-pairs (besides tcp port, udp port and fork) that should be advertised to
/// peers by including in local node record.
pub(super) other_enr_kv_pairs: Vec<(&'static [u8], Bytes)>,
/// Interval in seconds at which to run a lookup up query with to populate kbuckets.
pub(super) lookup_interval: u64,
/// Interval in seconds at which to run pulse lookup queries at bootstrap to boost kbucket
/// population.
pub(super) bootstrap_lookup_interval: u64,
/// Number of times to run boost lookup queries at start up.
pub(super) bootstrap_lookup_countdown: u64,
/// Custom filter rules to apply to a discovered peer in order to determine if it should be
/// passed up to rlpx or dropped.
pub(super) discovered_peer_filter: MustNotIncludeKeys,
}
impl Config {
/// Returns a new [`ConfigBuilder`], with the `RLPx` TCP port and IP version configured w.r.t.
/// the given socket.
pub fn builder(rlpx_tcp_socket: SocketAddr) -> ConfigBuilder {
ConfigBuilder {
discv5_config: None,
bootstrap_nodes: HashSet::default(),
fork: None,
tcp_socket: rlpx_tcp_socket,
other_enr_kv_pairs: Vec::new(),
lookup_interval: None,
bootstrap_lookup_interval: None,
bootstrap_lookup_countdown: None,
discovered_peer_filter: None,
}
}
/// Inserts a new boot node to the list of boot nodes.
pub fn insert_boot_node(&mut self, boot_node: BootNode) {
self.bootstrap_nodes.insert(boot_node);
}
/// Inserts a new unsigned enode boot node to the list of boot nodes if it can be parsed, see
/// also [`BootNode::from_unsigned`].
pub fn insert_unsigned_boot_node(&mut self, node_record: NodeRecord) {
let _ = BootNode::from_unsigned(node_record).map(|node| self.insert_boot_node(node));
}
/// Extends the list of boot nodes with a list of enode boot nodes if they can be parsed.
pub fn extend_unsigned_boot_nodes(
&mut self,
node_records: impl IntoIterator<Item = NodeRecord>,
) {
for node_record in node_records {
self.insert_unsigned_boot_node(node_record);
}
}
/// Returns the discovery (UDP) socket contained in the [`discv5::Config`]. Returns the IPv6
/// socket, if both IPv4 and v6 are configured. This socket will be advertised to peers in the
/// local [`Enr`](discv5::enr::Enr).
pub fn discovery_socket(&self) -> SocketAddr {
match self.discv5_config.listen_config {
ListenConfig::Ipv4 { ip, port } => (ip, port).into(),
ListenConfig::Ipv6 { ip, port } => (ip, port).into(),
ListenConfig::DualStack { ipv6, ipv6_port, .. } => (ipv6, ipv6_port).into(),
}
}
/// Returns the `RLPx` (TCP) socket contained in the [`discv5::Config`]. This socket will be
/// advertised to peers in the local [`Enr`](discv5::enr::Enr).
pub const fn rlpx_socket(&self) -> &SocketAddr {
&self.tcp_socket
}
}
/// Returns the IPv4 discovery socket if one is configured.
pub const fn ipv4(listen_config: &ListenConfig) -> Option<SocketAddrV4> {
match listen_config {
ListenConfig::Ipv4 { ip, port } |
ListenConfig::DualStack { ipv4: ip, ipv4_port: port, .. } => {
Some(SocketAddrV4::new(*ip, *port))
}
ListenConfig::Ipv6 { .. } => None,
}
}
/// Returns the IPv6 discovery socket if one is configured.
pub const fn ipv6(listen_config: &ListenConfig) -> Option<SocketAddrV6> {
match listen_config {
ListenConfig::Ipv4 { .. } => None,
ListenConfig::Ipv6 { ip, port } |
ListenConfig::DualStack { ipv6: ip, ipv6_port: port, .. } => {
Some(SocketAddrV6::new(*ip, *port, 0, 0))
}
}
}
/// Returns the amended [`discv5::ListenConfig`] based on the `RLPx` IP address. The ENR is limited
/// to one IP address per IP version (atm, may become spec'd how to advertise different addresses).
/// The `RLPx` address overwrites the discv5 address w.r.t. IP version.
pub fn amend_listen_config_wrt_rlpx(
listen_config: &ListenConfig,
rlpx_addr: IpAddr,
) -> ListenConfig {
let discv5_socket_ipv4 = ipv4(listen_config);
let discv5_socket_ipv6 = ipv6(listen_config);
let discv5_port_ipv4 =
discv5_socket_ipv4.map(|socket| socket.port()).unwrap_or(DEFAULT_DISCOVERY_V5_PORT);
let discv5_addr_ipv4 = discv5_socket_ipv4.map(|socket| *socket.ip());
let discv5_port_ipv6 =
discv5_socket_ipv6.map(|socket| socket.port()).unwrap_or(DEFAULT_DISCOVERY_V5_PORT);
let discv5_addr_ipv6 = discv5_socket_ipv6.map(|socket| *socket.ip());
let (discv5_socket_ipv4, discv5_socket_ipv6) = discv5_sockets_wrt_rlpx_addr(
rlpx_addr,
discv5_addr_ipv4,
discv5_port_ipv4,
discv5_addr_ipv6,
discv5_port_ipv6,
);
ListenConfig::from_two_sockets(discv5_socket_ipv4, discv5_socket_ipv6)
}
/// Returns the sockets that can be used for discv5 with respect to the `RLPx` address. ENR specs
/// only acknowledge one address per IP version.
pub fn discv5_sockets_wrt_rlpx_addr(
rlpx_addr: IpAddr,
discv5_addr_ipv4: Option<Ipv4Addr>,
discv5_port_ipv4: u16,
discv5_addr_ipv6: Option<Ipv6Addr>,
discv5_port_ipv6: u16,
) -> (Option<SocketAddrV4>, Option<SocketAddrV6>) {
match rlpx_addr {
IpAddr::V4(rlpx_addr) => {
let discv5_socket_ipv6 =
discv5_addr_ipv6.map(|ip| SocketAddrV6::new(ip, discv5_port_ipv6, 0, 0));
if let Some(discv5_addr) = discv5_addr_ipv4 {
if discv5_addr != rlpx_addr {
debug!(target: "net::discv5",
%discv5_addr,
%rlpx_addr,
"Overwriting discv5 IPv4 address with RLPx IPv4 address, limited to one advertised IP address per IP version"
);
}
}
// overwrite discv5 ipv4 addr with RLPx address. this is since there is no
// spec'd way to advertise a different address for rlpx and discovery in the
// ENR.
(Some(SocketAddrV4::new(rlpx_addr, discv5_port_ipv4)), discv5_socket_ipv6)
}
IpAddr::V6(rlpx_addr) => {
let discv5_socket_ipv4 =
discv5_addr_ipv4.map(|ip| SocketAddrV4::new(ip, discv5_port_ipv4));
if let Some(discv5_addr) = discv5_addr_ipv6 {
if discv5_addr != rlpx_addr {
debug!(target: "net::discv5",
%discv5_addr,
%rlpx_addr,
"Overwriting discv5 IPv6 address with RLPx IPv6 address, limited to one advertised IP address per IP version"
);
}
}
// overwrite discv5 ipv6 addr with RLPx address. this is since there is no
// spec'd way to advertise a different address for rlpx and discovery in the
// ENR.
(discv5_socket_ipv4, Some(SocketAddrV6::new(rlpx_addr, discv5_port_ipv6, 0, 0)))
}
}
}
/// A boot node can be added either as a string in either 'enode' URL scheme or serialized from
/// [`Enr`](discv5::Enr) type.
#[derive(Clone, Debug, PartialEq, Eq, Hash, Display)]
pub enum BootNode {
/// An unsigned node record.
#[display("{_0}")]
Enode(Multiaddr),
/// A signed node record.
#[display("{_0:?}")]
Enr(discv5::Enr),
}
impl BootNode {
/// Parses a [`NodeRecord`] and serializes according to CL format. Note: [`discv5`] is
/// originally a CL library hence needs this format to add the node.
pub fn from_unsigned(node_record: NodeRecord) -> Result<Self, secp256k1::Error> {
let NodeRecord { address, udp_port, id, .. } = node_record;
let mut multi_address = Multiaddr::empty();
match address {
IpAddr::V4(ip) => multi_address.push(Protocol::Ip4(ip)),
IpAddr::V6(ip) => multi_address.push(Protocol::Ip6(ip)),
}
multi_address.push(Protocol::Udp(udp_port));
let id = discv4_id_to_multiaddr_id(id)?;
multi_address.push(Protocol::P2p(id));
Ok(Self::Enode(multi_address))
}
}
#[cfg(test)]
mod test {
use super::*;
use alloy_primitives::hex;
use std::net::SocketAddrV4;
const MULTI_ADDRESSES: &str = "/ip4/184.72.129.189/udp/30301/p2p/16Uiu2HAmSG2hdLwyQHQmG4bcJBgD64xnW63WMTLcrNq6KoZREfGb,/ip4/3.231.11.52/udp/30301/p2p/16Uiu2HAmMy4V8bi3XP7KDfSLQcLACSvTLroRRwEsTyFUKo8NCkkp,/ip4/54.198.153.150/udp/30301/p2p/16Uiu2HAmSVsb7MbRf1jg3Dvd6a3n5YNqKQwn1fqHCFgnbqCsFZKe,/ip4/3.220.145.177/udp/30301/p2p/16Uiu2HAm74pBDGdQ84XCZK27GRQbGFFwQ7RsSqsPwcGmCR3Cwn3B,/ip4/3.231.138.188/udp/30301/p2p/16Uiu2HAmMnTiJwgFtSVGV14ZNpwAvS1LUoF4pWWeNtURuV6C3zYB";
const BOOT_NODES_OP_MAINNET_AND_BASE_MAINNET: &[&str] = &[
"enode://ca2774c3c401325850b2477fd7d0f27911efbf79b1e8b335066516e2bd8c4c9e0ba9696a94b1cb030a88eac582305ff55e905e64fb77fe0edcd70a4e5296d3ec@34.65.175.185:30305",
"enode://dd751a9ef8912be1bfa7a5e34e2c3785cc5253110bd929f385e07ba7ac19929fb0e0c5d93f77827291f4da02b2232240fbc47ea7ce04c46e333e452f8656b667@34.65.107.0:30305",
"enode://c5d289b56a77b6a2342ca29956dfd07aadf45364dde8ab20d1dc4efd4d1bc6b4655d902501daea308f4d8950737a4e93a4dfedd17b49cd5760ffd127837ca965@34.65.202.239:30305",
"enode://87a32fd13bd596b2ffca97020e31aef4ddcc1bbd4b95bb633d16c1329f654f34049ed240a36b449fda5e5225d70fe40bc667f53c304b71f8e68fc9d448690b51@3.231.138.188:30301",
"enode://ca21ea8f176adb2e229ce2d700830c844af0ea941a1d8152a9513b966fe525e809c3a6c73a2c18a12b74ed6ec4380edf91662778fe0b79f6a591236e49e176f9@184.72.129.189:30301",
"enode://acf4507a211ba7c1e52cdf4eef62cdc3c32e7c9c47998954f7ba024026f9a6b2150cd3f0b734d9c78e507ab70d59ba61dfe5c45e1078c7ad0775fb251d7735a2@3.220.145.177:30301",
"enode://8a5a5006159bf079d06a04e5eceab2a1ce6e0f721875b2a9c96905336219dbe14203d38f70f3754686a6324f786c2f9852d8c0dd3adac2d080f4db35efc678c5@3.231.11.52:30301",
"enode://cdadbe835308ad3557f9a1de8db411da1a260a98f8421d62da90e71da66e55e98aaa8e90aa7ce01b408a54e4bd2253d701218081ded3dbe5efbbc7b41d7cef79@54.198.153.150:30301",
];
#[test]
fn parse_boot_nodes() {
const OP_SEPOLIA_CL_BOOTNODES: &str = "enr:-J64QBwRIWAco7lv6jImSOjPU_W266lHXzpAS5YOh7WmgTyBZkgLgOwo_mxKJq3wz2XRbsoBItbv1dCyjIoNq67mFguGAYrTxM42gmlkgnY0gmlwhBLSsHKHb3BzdGFja4S0lAUAiXNlY3AyNTZrMaEDmoWSi8hcsRpQf2eJsNUx-sqv6fH4btmo2HsAzZFAKnKDdGNwgiQGg3VkcIIkBg,enr:-J64QFa3qMsONLGphfjEkeYyF6Jkil_jCuJmm7_a42ckZeUQGLVzrzstZNb1dgBp1GGx9bzImq5VxJLP-BaptZThGiWGAYrTytOvgmlkgnY0gmlwhGsV-zeHb3BzdGFja4S0lAUAiXNlY3AyNTZrMaEDahfSECTIS_cXyZ8IyNf4leANlZnrsMEWTkEYxf4GMCmDdGNwgiQGg3VkcIIkBg";
let config = Config::builder((Ipv4Addr::UNSPECIFIED, 30303).into())
.add_cl_serialized_signed_boot_nodes(OP_SEPOLIA_CL_BOOTNODES)
.build();
let socket_1 = "18.210.176.114:9222".parse::<SocketAddrV4>().unwrap();
let socket_2 = "107.21.251.55:9222".parse::<SocketAddrV4>().unwrap();
for node in config.bootstrap_nodes {
let BootNode::Enr(node) = node else { panic!() };
assert!(
socket_1 == node.udp4_socket().unwrap() && socket_1 == node.tcp4_socket().unwrap() ||
socket_2 == node.udp4_socket().unwrap() &&
socket_2 == node.tcp4_socket().unwrap()
);
assert_eq!("84b4940500", hex::encode(node.get_raw_rlp("opstack").unwrap()));
}
}
#[test]
fn parse_enodes() {
let config = Config::builder((Ipv4Addr::UNSPECIFIED, 30303).into())
.add_serialized_unsigned_boot_nodes(BOOT_NODES_OP_MAINNET_AND_BASE_MAINNET)
.build();
let bootstrap_nodes =
config.bootstrap_nodes.into_iter().map(|node| format!("{node}")).collect::<Vec<_>>();
for node in MULTI_ADDRESSES.split(&[',']) {
assert!(bootstrap_nodes.contains(&node.to_string()));
}
}
#[test]
fn overwrite_ipv4_addr() {
let rlpx_addr: Ipv4Addr = "192.168.0.1".parse().unwrap();
let listen_config = DEFAULT_DISCOVERY_V5_LISTEN_CONFIG;
let amended_config = amend_listen_config_wrt_rlpx(&listen_config, rlpx_addr.into());
let config_socket_ipv4 = ipv4(&amended_config).unwrap();
assert_eq!(*config_socket_ipv4.ip(), rlpx_addr);
assert_eq!(config_socket_ipv4.port(), DEFAULT_DISCOVERY_V5_PORT);
assert_eq!(ipv6(&amended_config), ipv6(&listen_config));
}
#[test]
fn overwrite_ipv6_addr() {
let rlpx_addr: Ipv6Addr = "fe80::1".parse().unwrap();
let listen_config = DEFAULT_DISCOVERY_V5_LISTEN_CONFIG;
let amended_config = amend_listen_config_wrt_rlpx(&listen_config, rlpx_addr.into());
let config_socket_ipv6 = ipv6(&amended_config).unwrap();
assert_eq!(*config_socket_ipv6.ip(), rlpx_addr);
assert_eq!(config_socket_ipv6.port(), DEFAULT_DISCOVERY_V5_PORT);
assert_eq!(ipv4(&amended_config), ipv4(&listen_config));
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/discv5/src/enr.rs | crates/net/discv5/src/enr.rs | //! Interface between node identification on protocol version 5 and 4. Specifically, between types
//! [`discv5::enr::NodeId`] and [`PeerId`].
use discv5::enr::{CombinedPublicKey, EnrPublicKey, NodeId};
use enr::Enr;
use reth_network_peers::{id2pk, pk2id, PeerId};
use secp256k1::{PublicKey, SecretKey};
/// Extracts a [`CombinedPublicKey::Secp256k1`] from a [`discv5::Enr`] and converts it to a
/// [`PeerId`]. Note: conversion from discv5 ID to discv4 ID is not possible.
pub fn enr_to_discv4_id(enr: &discv5::Enr) -> Option<PeerId> {
let pk = enr.public_key();
if !matches!(pk, CombinedPublicKey::Secp256k1(_)) {
return None
}
let pk = PublicKey::from_slice(&pk.encode()).unwrap();
Some(pk2id(&pk))
}
/// Converts a [`PeerId`] to a [`discv5::enr::NodeId`].
pub fn discv4_id_to_discv5_id(peer_id: PeerId) -> Result<NodeId, secp256k1::Error> {
Ok(id2pk(peer_id)?.into())
}
/// Converts a [`PeerId`] to a [`reth_network_peers::PeerId`].
pub fn discv4_id_to_multiaddr_id(
peer_id: PeerId,
) -> Result<discv5::libp2p_identity::PeerId, secp256k1::Error> {
let pk = id2pk(peer_id)?.encode();
let pk: discv5::libp2p_identity::PublicKey =
discv5::libp2p_identity::secp256k1::PublicKey::try_from_bytes(&pk).unwrap().into();
Ok(pk.to_peer_id())
}
/// Wrapper around [`discv5::Enr`] ([`Enr<CombinedKey>`]).
#[derive(Debug, Clone)]
pub struct EnrCombinedKeyWrapper(pub discv5::Enr);
impl From<Enr<SecretKey>> for EnrCombinedKeyWrapper {
fn from(value: Enr<SecretKey>) -> Self {
let encoded_enr = alloy_rlp::encode(&value);
Self(alloy_rlp::Decodable::decode(&mut &encoded_enr[..]).unwrap())
}
}
impl From<EnrCombinedKeyWrapper> for Enr<SecretKey> {
fn from(val: EnrCombinedKeyWrapper) -> Self {
let encoded_enr = alloy_rlp::encode(&val.0);
alloy_rlp::Decodable::decode(&mut &encoded_enr[..]).unwrap()
}
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_rlp::Encodable;
use discv5::enr::{CombinedKey, EnrKey};
use reth_chainspec::{EthereumHardfork, MAINNET};
use reth_network_peers::NodeRecord;
#[test]
fn discv5_discv4_id_conversion() {
let discv5_pk = CombinedKey::generate_secp256k1().public();
let discv5_peer_id = NodeId::from(discv5_pk.clone());
// convert to discv4 id
let pk = secp256k1::PublicKey::from_slice(&discv5_pk.encode()).unwrap();
let discv4_peer_id = pk2id(&pk);
// convert back to discv5 id
let discv5_peer_id_from_discv4_peer_id = discv4_id_to_discv5_id(discv4_peer_id).unwrap();
assert_eq!(discv5_peer_id, discv5_peer_id_from_discv4_peer_id)
}
#[test]
fn conversion_to_node_record_from_enr() {
const IP: &str = "::";
const TCP_PORT: u16 = 30303;
const UDP_PORT: u16 = 9000;
let key = CombinedKey::generate_secp256k1();
let mut buf = Vec::new();
let fork_id = MAINNET.hardfork_fork_id(EthereumHardfork::Frontier);
fork_id.unwrap().encode(&mut buf);
let enr = Enr::builder()
.ip6(IP.parse().unwrap())
.udp6(UDP_PORT)
.tcp6(TCP_PORT)
.build(&key)
.unwrap();
let enr = EnrCombinedKeyWrapper(enr).into();
let node_record = NodeRecord::try_from(&enr).unwrap();
assert_eq!(
NodeRecord {
address: IP.parse().unwrap(),
tcp_port: TCP_PORT,
udp_port: UDP_PORT,
id: pk2id(&enr.public_key())
},
node_record
)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/discv5/src/lib.rs | crates/net/discv5/src/lib.rs | //! Wrapper around [`discv5::Discv5`].
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
use std::{
collections::HashSet,
fmt,
net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr},
sync::Arc,
time::Duration,
};
use ::enr::Enr;
use alloy_primitives::bytes::Bytes;
use discv5::ListenConfig;
use enr::{discv4_id_to_discv5_id, EnrCombinedKeyWrapper};
use futures::future::join_all;
use itertools::Itertools;
use rand::{Rng, RngCore};
use reth_ethereum_forks::{EnrForkIdEntry, ForkId};
use reth_network_peers::{NodeRecord, PeerId};
use secp256k1::SecretKey;
use tokio::{sync::mpsc, task};
use tracing::{debug, error, trace};
pub mod config;
pub mod enr;
pub mod error;
pub mod filter;
pub mod metrics;
pub mod network_stack_id;
pub use discv5::{self, IpMode};
pub use config::{
BootNode, Config, ConfigBuilder, DEFAULT_COUNT_BOOTSTRAP_LOOKUPS, DEFAULT_DISCOVERY_V5_ADDR,
DEFAULT_DISCOVERY_V5_ADDR_IPV6, DEFAULT_DISCOVERY_V5_LISTEN_CONFIG, DEFAULT_DISCOVERY_V5_PORT,
DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL, DEFAULT_SECONDS_LOOKUP_INTERVAL,
};
pub use enr::enr_to_discv4_id;
pub use error::Error;
pub use filter::{FilterOutcome, MustNotIncludeKeys};
pub use network_stack_id::NetworkStackId;
use metrics::{DiscoveredPeersMetrics, Discv5Metrics};
/// Max kbucket index is 255.
///
/// This is the max log2distance for 32 byte [`NodeId`](discv5::enr::NodeId) - 1. See <https://github.com/sigp/discv5/blob/e9e0d4f93ec35591832a9a8d937b4161127da87b/src/kbucket.rs#L586-L587>.
pub const MAX_KBUCKET_INDEX: usize = 255;
/// Default lowest kbucket index to attempt filling, in periodic look up query to populate kbuckets.
///
/// The peer at the 0th kbucket index is at log2distance 1 from the local node ID. See <https://github.com/sigp/discv5/blob/e9e0d4f93ec35591832a9a8d937b4161127da87b/src/kbucket.rs#L586-L587>.
///
/// Default is 0th index.
pub const DEFAULT_MIN_TARGET_KBUCKET_INDEX: usize = 0;
/// Transparent wrapper around [`discv5::Discv5`].
#[derive(Clone)]
pub struct Discv5 {
/// sigp/discv5 node.
discv5: Arc<discv5::Discv5>,
/// [`IpMode`] of the `RLPx` network.
rlpx_ip_mode: IpMode,
/// Key used in kv-pair to ID chain, e.g. 'opstack' or 'eth'.
fork_key: Option<&'static [u8]>,
/// Filter applied to a discovered peers before passing it up to app.
discovered_peer_filter: MustNotIncludeKeys,
/// Metrics for underlying [`discv5::Discv5`] node and filtered discovered peers.
metrics: Discv5Metrics,
}
impl Discv5 {
////////////////////////////////////////////////////////////////////////////////////////////////
// Minimal interface with `reth_network::discovery`
////////////////////////////////////////////////////////////////////////////////////////////////
/// Adds the node to the table, if it is not already present.
#[expect(clippy::result_large_err)]
pub fn add_node(&self, node_record: Enr<SecretKey>) -> Result<(), Error> {
let EnrCombinedKeyWrapper(enr) = node_record.into();
self.discv5.add_enr(enr).map_err(Error::AddNodeFailed)
}
/// Sets the pair in the EIP-868 [`Enr`] of the node.
///
/// If the key already exists, this will update it.
///
/// CAUTION: The value **must** be rlp encoded
pub fn set_eip868_in_local_enr(&self, key: Vec<u8>, rlp: Bytes) {
let Ok(key_str) = std::str::from_utf8(&key) else {
error!(target: "net::discv5",
err="key not utf-8",
"failed to update local enr"
);
return
};
if let Err(err) = self.discv5.enr_insert(key_str, &rlp) {
error!(target: "net::discv5",
%err,
"failed to update local enr"
);
}
}
/// Sets the pair in the EIP-868 [`Enr`] of the node.
///
/// If the key already exists, this will update it.
pub fn encode_and_set_eip868_in_local_enr(
&self,
key: Vec<u8>,
value: impl alloy_rlp::Encodable,
) {
let mut buf = Vec::new();
value.encode(&mut buf);
self.set_eip868_in_local_enr(key, buf.into())
}
/// Adds the peer and id to the ban list.
///
/// This will prevent any future inclusion in the table
pub fn ban(&self, peer_id: PeerId, ip: IpAddr) {
match discv4_id_to_discv5_id(peer_id) {
Ok(node_id) => {
self.discv5.ban_node(&node_id, None);
self.ban_ip(ip);
}
Err(err) => error!(target: "net::discv5",
%err,
"failed to ban peer"
),
}
}
/// Adds the ip to the ban list.
///
/// This will prevent any future inclusion in the table
pub fn ban_ip(&self, ip: IpAddr) {
self.discv5.ban_ip(ip, None);
}
/// Returns the [`NodeRecord`] of the local node.
///
/// This includes the currently tracked external IP address of the node.
///
/// Returns `None` if the local ENR does not contain the required fields.
pub fn node_record(&self) -> Option<NodeRecord> {
let enr: Enr<_> = EnrCombinedKeyWrapper(self.discv5.local_enr()).into();
enr.try_into().ok()
}
/// Spawns [`discv5::Discv5`]. Returns [`discv5::Discv5`] handle in reth compatible wrapper type
/// [`Discv5`], a receiver of [`discv5::Event`]s from the underlying node, and the local
/// [`Enr`](discv5::Enr) converted into the reth compatible [`NodeRecord`] type.
pub async fn start(
sk: &SecretKey,
discv5_config: Config,
) -> Result<(Self, mpsc::Receiver<discv5::Event>, NodeRecord), Error> {
//
// 1. make local enr from listen config
//
let (enr, bc_enr, fork_key, rlpx_ip_mode) = build_local_enr(sk, &discv5_config);
trace!(target: "net::discv5",
?enr,
"local ENR"
);
//
// 2. start discv5
//
let Config {
discv5_config,
bootstrap_nodes,
lookup_interval,
bootstrap_lookup_interval,
bootstrap_lookup_countdown,
discovered_peer_filter,
..
} = discv5_config;
let EnrCombinedKeyWrapper(enr) = enr.into();
let sk = discv5::enr::CombinedKey::secp256k1_from_bytes(&mut sk.secret_bytes()).unwrap();
let mut discv5 = match discv5::Discv5::new(enr, sk, discv5_config) {
Ok(discv5) => discv5,
Err(err) => return Err(Error::InitFailure(err)),
};
discv5.start().await.map_err(Error::Discv5Error)?;
// start discv5 updates stream
let discv5_updates = discv5.event_stream().await.map_err(Error::Discv5Error)?;
let discv5 = Arc::new(discv5);
//
// 3. add boot nodes
//
bootstrap(bootstrap_nodes, &discv5).await?;
let metrics = Discv5Metrics::default();
//
// 4. start bg kbuckets maintenance
//
spawn_populate_kbuckets_bg(
lookup_interval,
bootstrap_lookup_interval,
bootstrap_lookup_countdown,
metrics.clone(),
discv5.clone(),
);
Ok((
Self { discv5, rlpx_ip_mode, fork_key, discovered_peer_filter, metrics },
discv5_updates,
bc_enr,
))
}
/// Process an event from the underlying [`discv5::Discv5`] node.
pub fn on_discv5_update(&self, update: discv5::Event) -> Option<DiscoveredPeer> {
#[expect(clippy::match_same_arms)]
match update {
discv5::Event::SocketUpdated(_) | discv5::Event::TalkRequest(_) |
// `Discovered` not unique discovered peers
discv5::Event::Discovered(_) => None,
discv5::Event::NodeInserted { replaced: _, .. } => {
// node has been inserted into kbuckets
// `replaced` partly covers `reth_discv4::DiscoveryUpdate::Removed(_)`
self.metrics.discovered_peers.increment_kbucket_insertions(1);
None
}
discv5::Event::SessionEstablished(enr, remote_socket) => {
// this branch is semantically similar to branches of
// `reth_discv4::DiscoveryUpdate`: `DiscoveryUpdate::Added(_)` and
// `DiscoveryUpdate::DiscoveredAtCapacity(_)
// peer has been discovered as part of query, or, by incoming session (peer has
// discovered us)
self.metrics.discovered_peers.increment_established_sessions_raw(1);
self.on_discovered_peer(&enr, remote_socket)
}
discv5::Event::UnverifiableEnr {
enr,
socket,
node_id: _,
} => {
// this branch is semantically similar to branches of
// `reth_discv4::DiscoveryUpdate`: `DiscoveryUpdate::Added(_)` and
// `DiscoveryUpdate::DiscoveredAtCapacity(_)
// peer has been discovered as part of query, or, by an outgoing session (but peer
// is behind NAT and responds from a different socket)
// NOTE: `discv5::Discv5` won't initiate a session with any peer with an
// unverifiable node record, for example one that advertises a reserved LAN IP
// address on a WAN network. This is in order to prevent DoS attacks, where some
// malicious peers may advertise a victim's socket. We will still try and connect
// to them over RLPx, to be compatible with EL discv5 implementations that don't
// enforce this security measure.
trace!(target: "net::discv5",
?enr,
%socket,
"discovered unverifiable enr, source socket doesn't match socket advertised in ENR"
);
self.metrics.discovered_peers.increment_unverifiable_enrs_raw_total(1);
self.on_discovered_peer(&enr, socket)
}
_ => None
}
}
/// Processes a discovered peer. Returns `true` if peer is added to
pub fn on_discovered_peer(
&self,
enr: &discv5::Enr,
socket: SocketAddr,
) -> Option<DiscoveredPeer> {
self.metrics.discovered_peers_advertised_networks.increment_once_by_network_type(enr);
let node_record = match self.try_into_reachable(enr, socket) {
Ok(enr_bc) => enr_bc,
Err(err) => {
trace!(target: "net::discv5",
%err,
?enr,
"discovered peer is unreachable"
);
self.metrics.discovered_peers.increment_established_sessions_unreachable_enr(1);
return None
}
};
if let FilterOutcome::Ignore { reason } = self.filter_discovered_peer(enr) {
trace!(target: "net::discv5",
?enr,
reason,
"filtered out discovered peer"
);
self.metrics.discovered_peers.increment_established_sessions_filtered(1);
return None
}
// todo: extend for all network stacks in reth-network rlpx logic
let fork_id = (self.fork_key == Some(NetworkStackId::ETH))
.then(|| self.get_fork_id(enr).ok())
.flatten();
trace!(target: "net::discv5",
?fork_id,
?enr,
"discovered peer"
);
Some(DiscoveredPeer { node_record, fork_id })
}
/// Tries to recover an unreachable [`Enr`](discv5::Enr) received via
/// [`discv5::Event::UnverifiableEnr`], into a [`NodeRecord`] usable by `RLPx`.
///
/// NOTE: Fallback solution to be compatible with Geth which includes peers into the discv5
/// WAN topology which, for example, advertise in their ENR that localhost is their UDP IP
/// address. These peers are only discovered if they initiate a connection attempt, and we by
/// such means learn their reachable IP address. If we receive their ENR from any other peer
/// as part of a lookup query, we won't find a reachable IP address on which to dial them by
/// reading their ENR.
pub fn try_into_reachable(
&self,
enr: &discv5::Enr,
socket: SocketAddr,
) -> Result<NodeRecord, Error> {
// ignore UDP socket advertised in ENR, use sender socket instead
let address = socket.ip();
let udp_port = socket.port();
let id = enr_to_discv4_id(enr).ok_or(Error::IncompatibleKeyType)?;
let tcp_port = (match self.rlpx_ip_mode {
IpMode::Ip4 => enr.tcp4(),
IpMode::Ip6 => enr.tcp6(),
IpMode::DualStack => unimplemented!("dual-stack support not implemented for rlpx"),
})
.unwrap_or(
// tcp socket is missing from ENR, or is wrong IP version.
//
// by default geth runs discv5 and discv4 behind the same udp port (the discv4 default
// port 30303), so rlpx has a chance of successfully dialing the peer on its discv5
// udp port if its running geth's p2p code.
udp_port,
);
Ok(NodeRecord { address, tcp_port, udp_port, id })
}
/// Applies filtering rules on an ENR. Returns [`Ok`](FilterOutcome::Ok) if peer should be
/// passed up to app, and [`Ignore`](FilterOutcome::Ignore) if peer should instead be dropped.
pub fn filter_discovered_peer(&self, enr: &discv5::Enr) -> FilterOutcome {
self.discovered_peer_filter.filter(enr)
}
/// Returns the [`ForkId`] of the given [`Enr`](discv5::Enr) w.r.t. the local node's network
/// stack, if field is set.
#[expect(clippy::result_large_err)]
pub fn get_fork_id<K: discv5::enr::EnrKey>(
&self,
enr: &discv5::enr::Enr<K>,
) -> Result<ForkId, Error> {
let Some(key) = self.fork_key else { return Err(Error::NetworkStackIdNotConfigured) };
let fork_id = enr
.get_decodable::<EnrForkIdEntry>(key)
.ok_or(Error::ForkMissing(key))?
.map(Into::into)?;
Ok(fork_id)
}
////////////////////////////////////////////////////////////////////////////////////////////////
// Interface with sigp/discv5
////////////////////////////////////////////////////////////////////////////////////////////////
/// Exposes API of [`discv5::Discv5`].
pub fn with_discv5<F, R>(&self, f: F) -> R
where
F: FnOnce(&discv5::Discv5) -> R,
{
f(&self.discv5)
}
////////////////////////////////////////////////////////////////////////////////////////////////
// Complementary
////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns the `RLPx` [`IpMode`] of the local node.
pub const fn ip_mode(&self) -> IpMode {
self.rlpx_ip_mode
}
/// Returns the key to use to identify the [`ForkId`] kv-pair on the [`Enr`](discv5::Enr).
pub const fn fork_key(&self) -> Option<&[u8]> {
self.fork_key
}
}
impl fmt::Debug for Discv5 {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
"{ .. }".fmt(f)
}
}
/// Result of successfully processing a peer discovered by [`discv5::Discv5`].
#[derive(Debug)]
pub struct DiscoveredPeer {
/// A discovery v4 backwards compatible ENR.
pub node_record: NodeRecord,
/// [`ForkId`] extracted from ENR w.r.t. configured
pub fork_id: Option<ForkId>,
}
/// Builds the local ENR with the supplied key.
pub fn build_local_enr(
sk: &SecretKey,
config: &Config,
) -> (Enr<SecretKey>, NodeRecord, Option<&'static [u8]>, IpMode) {
let mut builder = discv5::enr::Enr::builder();
let Config { discv5_config, fork, tcp_socket, other_enr_kv_pairs, .. } = config;
let socket = match discv5_config.listen_config {
ListenConfig::Ipv4 { ip, port } => {
if ip != Ipv4Addr::UNSPECIFIED {
builder.ip4(ip);
}
builder.udp4(port);
builder.tcp4(tcp_socket.port());
(ip, port).into()
}
ListenConfig::Ipv6 { ip, port } => {
if ip != Ipv6Addr::UNSPECIFIED {
builder.ip6(ip);
}
builder.udp6(port);
builder.tcp6(tcp_socket.port());
(ip, port).into()
}
ListenConfig::DualStack { ipv4, ipv4_port, ipv6, ipv6_port } => {
if ipv4 != Ipv4Addr::UNSPECIFIED {
builder.ip4(ipv4);
}
builder.udp4(ipv4_port);
builder.tcp4(tcp_socket.port());
if ipv6 != Ipv6Addr::UNSPECIFIED {
builder.ip6(ipv6);
}
builder.udp6(ipv6_port);
(ipv6, ipv6_port).into()
}
};
let rlpx_ip_mode = if tcp_socket.is_ipv4() { IpMode::Ip4 } else { IpMode::Ip6 };
// identifies which network node is on
let network_stack_id = fork.as_ref().map(|(network_stack_id, fork_value)| {
builder.add_value_rlp(network_stack_id, alloy_rlp::encode(fork_value).into());
*network_stack_id
});
// add other data
for (key, value) in other_enr_kv_pairs {
builder.add_value_rlp(key, value.clone().into());
}
// enr v4 not to get confused with discv4, independent versioning enr and
// discovery
let enr = builder.build(sk).expect("should build enr v4");
// backwards compatible enr
let bc_enr = NodeRecord::from_secret_key(socket, sk);
(enr, bc_enr, network_stack_id, rlpx_ip_mode)
}
/// Bootstraps underlying [`discv5::Discv5`] node with configured peers.
pub async fn bootstrap(
bootstrap_nodes: HashSet<BootNode>,
discv5: &Arc<discv5::Discv5>,
) -> Result<(), Error> {
trace!(target: "net::discv5",
?bootstrap_nodes,
"adding bootstrap nodes .."
);
let mut enr_requests = vec![];
for node in bootstrap_nodes {
match node {
BootNode::Enr(node) => {
if let Err(err) = discv5.add_enr(node) {
return Err(Error::AddNodeFailed(err))
}
}
BootNode::Enode(enode) => {
let discv5 = discv5.clone();
enr_requests.push(async move {
if let Err(err) = discv5.request_enr(enode.to_string()).await {
debug!(target: "net::discv5",
?enode,
%err,
"failed adding boot node"
);
}
})
}
}
}
// If a session is established, the ENR is added straight away to discv5 kbuckets
Ok(_ = join_all(enr_requests).await)
}
/// Backgrounds regular look up queries, in order to keep kbuckets populated.
pub fn spawn_populate_kbuckets_bg(
lookup_interval: u64,
bootstrap_lookup_interval: u64,
bootstrap_lookup_countdown: u64,
metrics: Discv5Metrics,
discv5: Arc<discv5::Discv5>,
) {
let local_node_id = discv5.local_enr().node_id();
let lookup_interval = Duration::from_secs(lookup_interval);
let metrics = metrics.discovered_peers;
let mut kbucket_index = MAX_KBUCKET_INDEX;
let pulse_lookup_interval = Duration::from_secs(bootstrap_lookup_interval);
task::spawn(Box::pin(async move {
// make many fast lookup queries at bootstrap, trying to fill kbuckets at furthest
// log2distance from local node
for i in (0..bootstrap_lookup_countdown).rev() {
let target = discv5::enr::NodeId::random();
trace!(target: "net::discv5",
%target,
bootstrap_boost_runs_countdown=i,
lookup_interval=format!("{:#?}", pulse_lookup_interval),
"starting bootstrap boost lookup query"
);
lookup(target, &discv5, &metrics).await;
tokio::time::sleep(pulse_lookup_interval).await;
}
// initiate regular lookups to populate kbuckets
loop {
// make sure node is connected to each subtree in the network by target
// selection (ref kademlia)
let target = get_lookup_target(kbucket_index, local_node_id);
trace!(target: "net::discv5",
%target,
lookup_interval=format!("{:#?}", lookup_interval),
"starting periodic lookup query"
);
lookup(target, &discv5, &metrics).await;
if kbucket_index > DEFAULT_MIN_TARGET_KBUCKET_INDEX {
// try to populate bucket one step closer
kbucket_index -= 1
} else {
// start over with bucket furthest away
kbucket_index = MAX_KBUCKET_INDEX
}
tokio::time::sleep(lookup_interval).await;
}
}));
}
/// Gets the next lookup target, based on which bucket is currently being targeted.
pub fn get_lookup_target(
kbucket_index: usize,
local_node_id: discv5::enr::NodeId,
) -> discv5::enr::NodeId {
// init target
let mut target = local_node_id.raw();
// make sure target has a 'log2distance'-long suffix that differs from local node id
let bit_offset = MAX_KBUCKET_INDEX.saturating_sub(kbucket_index);
let (byte, bit) = (bit_offset / 8, bit_offset % 8);
// Flip the target bit.
target[byte] ^= 1 << (7 - bit);
// Randomize the bits after the target.
let mut rng = rand::rng();
// Randomize remaining bits in the byte we modified.
if bit < 7 {
// Compute the mask of the bits that need to be randomized.
let bits_to_randomize = 0xff >> (bit + 1);
// Clear.
target[byte] &= !bits_to_randomize;
// Randomize.
target[byte] |= rng.random::<u8>() & bits_to_randomize;
}
// Randomize remaining bytes.
rng.fill_bytes(&mut target[byte + 1..]);
target.into()
}
/// Runs a [`discv5::Discv5`] lookup query.
pub async fn lookup(
target: discv5::enr::NodeId,
discv5: &discv5::Discv5,
metrics: &DiscoveredPeersMetrics,
) {
metrics.set_total_sessions(discv5.metrics().active_sessions);
metrics.set_total_kbucket_peers(
discv5.with_kbuckets(|kbuckets| kbuckets.read().iter_ref().count()),
);
match discv5.find_node(target).await {
Err(err) => trace!(target: "net::discv5",
%err,
"lookup query failed"
),
Ok(peers) => trace!(target: "net::discv5",
target=format!("{:#?}", target),
peers_count=peers.len(),
peers=format!("[{:#}]", peers.iter()
.map(|enr| enr.node_id()
).format(", ")),
"peers returned by lookup query"
),
}
// `Discv5::connected_peers` can be subset of sessions, not all peers make it
// into kbuckets, e.g. incoming sessions from peers with
// unreachable enrs
debug!(target: "net::discv5",
connected_peers=discv5.connected_peers(),
"connected peers in routing table"
);
}
#[cfg(test)]
mod test {
use super::*;
use ::enr::{CombinedKey, EnrKey};
use rand_08::thread_rng;
use reth_chainspec::MAINNET;
use tracing::trace;
fn discv5_noop() -> Discv5 {
let sk = CombinedKey::generate_secp256k1();
Discv5 {
discv5: Arc::new(
discv5::Discv5::new(
Enr::empty(&sk).unwrap(),
sk,
discv5::ConfigBuilder::new(DEFAULT_DISCOVERY_V5_LISTEN_CONFIG).build(),
)
.unwrap(),
),
rlpx_ip_mode: IpMode::Ip4,
fork_key: None,
discovered_peer_filter: MustNotIncludeKeys::default(),
metrics: Discv5Metrics::default(),
}
}
async fn start_discovery_node(
udp_port_discv5: u16,
) -> (Discv5, mpsc::Receiver<discv5::Event>, NodeRecord) {
let secret_key = SecretKey::new(&mut thread_rng());
let discv5_addr: SocketAddr = format!("127.0.0.1:{udp_port_discv5}").parse().unwrap();
let rlpx_addr: SocketAddr = "127.0.0.1:30303".parse().unwrap();
let discv5_listen_config = ListenConfig::from(discv5_addr);
let discv5_config = Config::builder(rlpx_addr)
.discv5_config(discv5::ConfigBuilder::new(discv5_listen_config).build())
.build();
Discv5::start(&secret_key, discv5_config).await.expect("should build discv5")
}
#[tokio::test(flavor = "multi_thread")]
async fn discv5() {
reth_tracing::init_test_tracing();
// rig test
// rig node_1
let (node_1, mut stream_1, _) = start_discovery_node(30344).await;
let node_1_enr = node_1.with_discv5(|discv5| discv5.local_enr());
// rig node_2
let (node_2, mut stream_2, _) = start_discovery_node(30355).await;
let node_2_enr = node_2.with_discv5(|discv5| discv5.local_enr());
trace!(target: "net::discv5::test",
node_1_node_id=format!("{:#}", node_1_enr.node_id()),
node_2_node_id=format!("{:#}", node_2_enr.node_id()),
"started nodes"
);
// test
// add node_2 to discovery handle of node_1 (should add node to discv5 kbuckets)
let node_2_enr_reth_compatible_ty: Enr<SecretKey> =
EnrCombinedKeyWrapper(node_2_enr.clone()).into();
node_1.add_node(node_2_enr_reth_compatible_ty).unwrap();
// verify node_2 is in KBuckets of node_1:discv5
assert!(
node_1.with_discv5(|discv5| discv5.table_entries_id().contains(&node_2_enr.node_id()))
);
// manually trigger connection from node_1 to node_2
node_1.with_discv5(|discv5| discv5.send_ping(node_2_enr.clone())).await.unwrap();
// verify node_1:discv5 is connected to node_2:discv5 and vv
let event_1_v5 = stream_1.recv().await.unwrap();
assert!(matches!(
event_1_v5,
discv5::Event::SessionEstablished(node, socket) if node == node_2_enr && socket == node_2_enr.udp4_socket().unwrap().into()
));
// verify node_1 is in KBuckets of node_2:discv5
let event_2_v5 = stream_2.recv().await.unwrap();
assert!(matches!(
event_2_v5,
discv5::Event::NodeInserted { node_id, replaced } if node_id == node_1_enr.node_id() && replaced.is_none()
));
}
#[test]
fn discovered_enr_disc_socket_missing() {
reth_tracing::init_test_tracing();
// rig test
const REMOTE_RLPX_PORT: u16 = 30303;
let remote_socket = "104.28.44.25:9000".parse().unwrap();
let remote_key = CombinedKey::generate_secp256k1();
let remote_enr = Enr::builder().tcp4(REMOTE_RLPX_PORT).build(&remote_key).unwrap();
let discv5 = discv5_noop();
// test
let filtered_peer = discv5.on_discovered_peer(&remote_enr, remote_socket);
assert_eq!(
NodeRecord {
address: remote_socket.ip(),
udp_port: remote_socket.port(),
tcp_port: REMOTE_RLPX_PORT,
id: enr_to_discv4_id(&remote_enr).unwrap(),
},
filtered_peer.unwrap().node_record
)
}
// Copied from sigp/discv5 with slight modification (U256 type)
// <https://github.com/sigp/discv5/blob/master/src/kbucket/key.rs#L89-L101>
#[expect(unreachable_pub)]
#[expect(unused)]
mod sigp {
use alloy_primitives::U256;
use enr::{
k256::sha2::digest::generic_array::{typenum::U32, GenericArray},
NodeId,
};
/// A `Key` is a cryptographic hash, identifying both the nodes participating in
/// the Kademlia DHT, as well as records stored in the DHT.
///
/// The set of all `Key`s defines the Kademlia keyspace.
///
/// `Key`s have an XOR metric as defined in the Kademlia paper, i.e. the bitwise XOR of
/// the hash digests, interpreted as an integer. See [`Key::distance`].
///
/// A `Key` preserves the preimage of type `T` of the hash function. See [`Key::preimage`].
#[derive(Clone, Debug)]
pub struct Key<T> {
preimage: T,
hash: GenericArray<u8, U32>,
}
impl<T> PartialEq for Key<T> {
fn eq(&self, other: &Self) -> bool {
self.hash == other.hash
}
}
impl<T> Eq for Key<T> {}
impl<TPeerId> AsRef<Self> for Key<TPeerId> {
fn as_ref(&self) -> &Self {
self
}
}
impl<T> Key<T> {
/// Construct a new `Key` by providing the raw 32 byte hash.
pub const fn new_raw(preimage: T, hash: GenericArray<u8, U32>) -> Self {
Self { preimage, hash }
}
/// Borrows the preimage of the key.
pub const fn preimage(&self) -> &T {
&self.preimage
}
/// Converts the key into its preimage.
pub fn into_preimage(self) -> T {
self.preimage
}
/// Computes the distance of the keys according to the XOR metric.
pub fn distance<U>(&self, other: &Key<U>) -> Distance {
let a = U256::from_be_slice(self.hash.as_slice());
let b = U256::from_be_slice(other.hash.as_slice());
Distance(a ^ b)
}
// Used in the FINDNODE query outside of the k-bucket implementation.
/// Computes the integer log-2 distance between two keys, assuming a 256-bit
/// key. The output returns None if the key's are identical. The range is 1-256.
pub fn log2_distance<U>(&self, other: &Key<U>) -> Option<u64> {
let xor_dist = self.distance(other);
let log_dist = (256 - xor_dist.0.leading_zeros() as u64);
(log_dist != 0).then_some(log_dist)
}
}
impl From<NodeId> for Key<NodeId> {
fn from(node_id: NodeId) -> Self {
Self { preimage: node_id, hash: *GenericArray::from_slice(&node_id.raw()) }
}
}
/// A distance between two `Key`s.
#[derive(Copy, Clone, PartialEq, Eq, Default, PartialOrd, Ord, Debug)]
pub struct Distance(pub(super) U256);
}
#[test]
fn select_lookup_target() {
for bucket_index in 0..=MAX_KBUCKET_INDEX {
let sk = CombinedKey::generate_secp256k1();
let local_node_id = discv5::enr::NodeId::from(sk.public());
let target = get_lookup_target(bucket_index, local_node_id);
let local_node_id = sigp::Key::from(local_node_id);
let target = sigp::Key::from(target);
assert_eq!(local_node_id.log2_distance(&target), Some(bucket_index as u64 + 1));
}
}
#[test]
fn build_enr_from_config() {
const TCP_PORT: u16 = 30303;
let fork_id = MAINNET.latest_fork_id();
let config = Config::builder((Ipv4Addr::UNSPECIFIED, TCP_PORT).into())
.fork(NetworkStackId::ETH, fork_id)
.build();
let sk = SecretKey::new(&mut thread_rng());
let (enr, _, _, _) = build_local_enr(&sk, &config);
let decoded_fork_id = enr
.get_decodable::<EnrForkIdEntry>(NetworkStackId::ETH)
.unwrap()
.map(Into::into)
.unwrap();
assert_eq!(fork_id, decoded_fork_id);
assert_eq!(TCP_PORT, enr.tcp4().unwrap()); // listen config is defaulting to ip mode ipv4
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/discv5/src/filter.rs | crates/net/discv5/src/filter.rs | //! Predicates to constrain peer lookups.
use std::collections::HashSet;
use derive_more::Constructor;
use itertools::Itertools;
/// Outcome of applying filtering rules on node record.
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum FilterOutcome {
/// ENR passes filter rules.
Ok,
/// ENR doesn't pass filter rules, for the given reason.
Ignore {
/// Reason for filtering out node record.
reason: String,
},
}
impl FilterOutcome {
/// Returns `true` for [`FilterOutcome::Ok`].
pub const fn is_ok(&self) -> bool {
matches!(self, Self::Ok)
}
}
/// Filter requiring that peers advertise that they belong to some fork of a certain key.
#[derive(Debug, Constructor, Clone, Copy, PartialEq, Eq, Hash)]
pub struct MustIncludeKey {
/// Kv-pair key which node record must advertise.
key: &'static [u8],
}
impl MustIncludeKey {
/// Returns [`FilterOutcome::Ok`] if [`Enr`](discv5::Enr) contains the configured kv-pair key.
pub fn filter(&self, enr: &discv5::Enr) -> FilterOutcome {
if enr.get_raw_rlp(self.key).is_none() {
return FilterOutcome::Ignore {
reason: format!("{} fork required", String::from_utf8_lossy(self.key)),
}
}
FilterOutcome::Ok
}
}
/// Filter requiring that peers not advertise kv-pairs using certain keys, e.g. b"eth2".
#[derive(Debug, Clone, Default)]
pub struct MustNotIncludeKeys {
keys: HashSet<MustIncludeKey>,
}
impl MustNotIncludeKeys {
/// Returns a new instance that disallows node records with a kv-pair that has any of the given
/// keys.
pub fn new(disallow_keys: &[&'static [u8]]) -> Self {
let mut keys = HashSet::with_capacity(disallow_keys.len());
for key in disallow_keys {
_ = keys.insert(MustIncludeKey::new(key));
}
Self { keys }
}
}
impl MustNotIncludeKeys {
/// Returns `true` if [`Enr`](discv5::Enr) passes filtering rules.
pub fn filter(&self, enr: &discv5::Enr) -> FilterOutcome {
for key in &self.keys {
if matches!(key.filter(enr), FilterOutcome::Ok) {
return FilterOutcome::Ignore {
reason: format!(
"{} forks not allowed",
self.keys.iter().map(|key| String::from_utf8_lossy(key.key)).format(",")
),
}
}
}
FilterOutcome::Ok
}
/// Adds a key that must not be present for any kv-pair in a node record.
pub fn add_disallowed_keys(&mut self, keys: &[&'static [u8]]) {
for key in keys {
self.keys.insert(MustIncludeKey::new(key));
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::NetworkStackId;
use alloy_rlp::Bytes;
use discv5::enr::{CombinedKey, Enr};
#[test]
fn must_not_include_key_filter() {
// rig test
let filter = MustNotIncludeKeys::new(&[NetworkStackId::ETH, NetworkStackId::ETH2]);
// enr_1 advertises a fork from one of the keys configured in filter
let sk = CombinedKey::generate_secp256k1();
let enr_1 = Enr::builder()
.add_value_rlp(NetworkStackId::ETH as &[u8], Bytes::from("cancun"))
.build(&sk)
.unwrap();
// enr_2 advertises a fork from one the other key configured in filter
let sk = CombinedKey::generate_secp256k1();
let enr_2 = Enr::builder()
.add_value_rlp(NetworkStackId::ETH2, Bytes::from("deneb"))
.build(&sk)
.unwrap();
// test
assert!(matches!(filter.filter(&enr_1), FilterOutcome::Ignore { .. }));
assert!(matches!(filter.filter(&enr_2), FilterOutcome::Ignore { .. }));
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/discv5/src/network_stack_id.rs | crates/net/discv5/src/network_stack_id.rs | //! Keys of ENR [`ForkId`](reth_ethereum_forks::ForkId) kv-pair. Identifies which network stack a
//! node belongs to.
use reth_chainspec::EthChainSpec;
/// Identifies which Ethereum network stack a node belongs to, on the discovery network.
#[derive(Debug)]
pub struct NetworkStackId;
impl NetworkStackId {
/// ENR fork ID kv-pair key, for an Ethereum L1 EL node.
pub const ETH: &'static [u8] = b"eth";
/// ENR fork ID kv-pair key, for an Ethereum L1 CL node.
pub const ETH2: &'static [u8] = b"eth2";
/// ENR fork ID kv-pair key, for an Optimism EL node.
pub const OPEL: &'static [u8] = b"opel";
/// ENR fork ID kv-pair key, for an Optimism CL node.
pub const OPSTACK: &'static [u8] = b"opstack";
/// Returns the [`NetworkStackId`] that matches the given chain spec.
pub fn id(chain: impl EthChainSpec) -> Option<&'static [u8]> {
if chain.is_optimism() {
return Some(Self::OPEL)
} else if chain.is_ethereum() {
return Some(Self::ETH)
}
None
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/discv5/src/error.rs | crates/net/discv5/src/error.rs | //! Errors interfacing with [`discv5::Discv5`].
/// Errors interfacing with [`discv5::Discv5`].
#[derive(thiserror::Error, Debug)]
pub enum Error {
/// Failure adding node to [`discv5::Discv5`].
#[error("failed adding node to discv5, {0}")]
AddNodeFailed(&'static str),
/// Node record has incompatible key type.
#[error("incompatible key type (not secp256k1)")]
IncompatibleKeyType,
/// No key used to identify rlpx network is configured.
#[error("network stack identifier is not configured")]
NetworkStackIdNotConfigured,
/// Missing key used to identify rlpx network.
#[error("fork missing on enr, key missing")]
ForkMissing(&'static [u8]),
/// Failed to decode [`ForkId`](reth_ethereum_forks::ForkId) rlp value.
#[error("failed to decode fork id, 'eth': {0:?}")]
ForkIdDecodeError(#[from] alloy_rlp::Error),
/// Peer is unreachable over discovery.
#[error("discovery socket missing")]
UnreachableDiscovery,
/// Failed to initialize [`discv5::Discv5`].
#[error("init failed, {0}")]
InitFailure(&'static str),
/// An error from underlying [`discv5::Discv5`] node.
#[error("sigp/discv5 error, {0}")]
Discv5Error(discv5::Error),
/// The [`ListenConfig`](discv5::ListenConfig) has been misconfigured.
#[error("misconfigured listen config, RLPx TCP address must also be supported by discv5")]
ListenConfigMisconfigured,
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/discv5/src/metrics.rs | crates/net/discv5/src/metrics.rs | //! Tracks peer discovery for [`Discv5`](crate::Discv5).
use metrics::{Counter, Gauge};
use reth_metrics::Metrics;
use crate::NetworkStackId;
/// Information tracked by [`Discv5`](crate::Discv5).
#[derive(Debug, Default, Clone)]
pub struct Discv5Metrics {
/// Frequency of networks advertised in discovered peers' node records.
pub discovered_peers_advertised_networks: AdvertisedChainMetrics,
/// Tracks discovered peers.
pub discovered_peers: DiscoveredPeersMetrics,
}
/// Tracks discovered peers.
#[derive(Metrics, Clone)]
#[metrics(scope = "discv5")]
pub struct DiscoveredPeersMetrics {
////////////////////////////////////////////////////////////////////////////////////////////////
// Kbuckets
////////////////////////////////////////////////////////////////////////////////////////////////
/// Total peers currently in [`discv5::Discv5`]'s kbuckets.
kbucket_peers_raw_total: Gauge,
/// Total discovered peers that are inserted into [`discv5::Discv5`]'s kbuckets.
///
/// This is a subset of the total established sessions, in which all peers advertise a udp
/// socket in their node record which is reachable from the local node. Only these peers make
/// it into [`discv5::Discv5`]'s kbuckets and will hence be included in queries.
///
/// Note: the definition of 'discovered' is not exactly synonymous in `reth_discv4::Discv4`.
inserted_kbucket_peers_raw_total: Counter,
////////////////////////////////////////////////////////////////////////////////////////////////
// Sessions
////////////////////////////////////////////////////////////////////////////////////////////////
/// Total peers currently connected to [`discv5::Discv5`].
sessions_raw_total: Gauge,
/// Total number of sessions established by [`discv5::Discv5`].
established_sessions_raw_total: Counter,
/// Total number of sessions established by [`discv5::Discv5`], with peers that don't advertise
/// a socket which is reachable from the local node in their node record.
///
/// These peers can't make it into [`discv5::Discv5`]'s kbuckets, and hence won't be part of
/// queries (neither shared with peers in NODES responses, nor queried for peers with FINDNODE
/// requests).
established_sessions_unreachable_enr_total: Counter,
/// Total number of sessions established by [`discv5::Discv5`], that pass configured
/// [`filter`](crate::filter) rules.
established_sessions_custom_filtered_total: Counter,
/// Total number of unverifiable ENRs discovered by [`discv5::Discv5`].
///
/// These are peers that fail [`discv5::Discv5`] session establishment, because the UDP socket
/// they're making a connection from doesn't match the UDP socket advertised in their ENR.
/// These peers will be denied a session (and hence can't make it into kbuckets) until they
/// have update their ENR, to reflect their actual UDP socket.
unverifiable_enrs_raw_total: Counter,
}
impl DiscoveredPeersMetrics {
/// Sets current total number of peers in [`discv5::Discv5`]'s kbuckets.
pub fn set_total_kbucket_peers(&self, num: usize) {
self.kbucket_peers_raw_total.set(num as f64)
}
/// Increments the number of kbucket insertions in [`discv5::Discv5`].
pub fn increment_kbucket_insertions(&self, num: u64) {
self.inserted_kbucket_peers_raw_total.increment(num)
}
/// Sets current total number of peers connected to [`discv5::Discv5`].
pub fn set_total_sessions(&self, num: usize) {
self.sessions_raw_total.set(num as f64)
}
/// Increments number of sessions established by [`discv5::Discv5`].
pub fn increment_established_sessions_raw(&self, num: u64) {
self.established_sessions_raw_total.increment(num)
}
/// Increments number of sessions established by [`discv5::Discv5`], with peers that don't have
/// a reachable node record.
pub fn increment_established_sessions_unreachable_enr(&self, num: u64) {
self.established_sessions_unreachable_enr_total.increment(num)
}
/// Increments number of sessions established by [`discv5::Discv5`], that pass configured
/// [`filter`](crate::filter) rules.
pub fn increment_established_sessions_filtered(&self, num: u64) {
self.established_sessions_custom_filtered_total.increment(num)
}
/// Increments number of unverifiable ENRs discovered by [`discv5::Discv5`]. These are peers
/// that fail session establishment because their advertised UDP socket doesn't match the
/// socket they are making the connection from.
pub fn increment_unverifiable_enrs_raw_total(&self, num: u64) {
self.unverifiable_enrs_raw_total.increment(num)
}
}
/// Tracks frequency of networks that are advertised by discovered peers.
///
/// Peers advertise the chain they belong to as a kv-pair in their node record, using the network
/// as key.
#[derive(Metrics, Clone)]
#[metrics(scope = "discv5")]
pub struct AdvertisedChainMetrics {
/// Frequency of node records with a kv-pair with [`OPEL`](NetworkStackId::OPEL) as
/// key.
opel: Counter,
/// Frequency of node records with a kv-pair with [`OPSTACK`](NetworkStackId::OPSTACK) as
/// key.
opstack: Counter,
/// Frequency of node records with a kv-pair with [`ETH`](NetworkStackId::ETH) as key.
eth: Counter,
/// Frequency of node records with a kv-pair with [`ETH2`](NetworkStackId::ETH2) as key.
eth2: Counter,
}
impl AdvertisedChainMetrics {
/// Counts each recognised network stack type that is advertised on node record, once.
pub fn increment_once_by_network_type(&self, enr: &discv5::Enr) {
if enr.get_raw_rlp(NetworkStackId::OPEL).is_some() {
self.opel.increment(1u64)
}
if enr.get_raw_rlp(NetworkStackId::OPSTACK).is_some() {
self.opstack.increment(1u64)
}
if enr.get_raw_rlp(NetworkStackId::ETH).is_some() {
self.eth.increment(1u64)
}
if enr.get_raw_rlp(NetworkStackId::ETH2).is_some() {
self.eth2.increment(1u64)
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/nat/src/lib.rs | crates/net/nat/src/lib.rs | //! Helpers for resolving the external IP.
//!
//! ## Feature Flags
//!
//! - `serde` (default): Enable serde support
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
pub mod net_if;
pub use net_if::{NetInterfaceError, DEFAULT_NET_IF_NAME};
use std::{
fmt,
future::{poll_fn, Future},
net::{AddrParseError, IpAddr},
pin::Pin,
str::FromStr,
task::{Context, Poll},
time::Duration,
};
use tracing::debug;
use crate::net_if::resolve_net_if_ip;
#[cfg(feature = "serde")]
use serde_with::{DeserializeFromStr, SerializeDisplay};
/// URLs to `GET` the external IP address.
///
/// Taken from: <https://stackoverflow.com/questions/3253701/get-public-external-ip-address>
const EXTERNAL_IP_APIS: &[&str] =
&["https://ipinfo.io/ip", "https://icanhazip.com", "https://ifconfig.me"];
/// All builtin resolvers.
#[derive(Debug, Clone, Copy, Eq, PartialEq, Default, Hash)]
#[cfg_attr(feature = "serde", derive(SerializeDisplay, DeserializeFromStr))]
pub enum NatResolver {
/// Resolve with any available resolver.
#[default]
Any,
/// Resolve external IP via `UPnP`.
Upnp,
/// Resolve external IP via a network request.
PublicIp,
/// Use the given [`IpAddr`]
ExternalIp(IpAddr),
/// Resolve external IP via the network interface.
NetIf,
/// Resolve nothing
None,
}
impl NatResolver {
/// Attempts to produce an IP address (best effort).
pub async fn external_addr(self) -> Option<IpAddr> {
external_addr_with(self).await
}
/// Returns the external ip, if it is [`NatResolver::ExternalIp`]
pub const fn as_external_ip(self) -> Option<IpAddr> {
match self {
Self::ExternalIp(ip) => Some(ip),
_ => None,
}
}
}
impl fmt::Display for NatResolver {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Any => f.write_str("any"),
Self::Upnp => f.write_str("upnp"),
Self::PublicIp => f.write_str("publicip"),
Self::ExternalIp(ip) => write!(f, "extip:{ip}"),
Self::NetIf => f.write_str("netif"),
Self::None => f.write_str("none"),
}
}
}
/// Error when parsing a [`NatResolver`]
#[derive(Debug, thiserror::Error)]
pub enum ParseNatResolverError {
/// Failed to parse provided IP
#[error(transparent)]
AddrParseError(#[from] AddrParseError),
/// Failed to parse due to unknown variant
#[error("Unknown Nat Resolver variant: {0}")]
UnknownVariant(String),
}
impl FromStr for NatResolver {
type Err = ParseNatResolverError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let r = match s {
"any" => Self::Any,
"upnp" => Self::Upnp,
"none" => Self::None,
"publicip" | "public-ip" => Self::PublicIp,
"netif" => Self::NetIf,
s => {
let Some(ip) = s.strip_prefix("extip:") else {
return Err(ParseNatResolverError::UnknownVariant(format!(
"Unknown Nat Resolver: {s}"
)))
};
Self::ExternalIp(ip.parse()?)
}
};
Ok(r)
}
}
/// With this type you can resolve the external public IP address on an interval basis.
#[must_use = "Does nothing unless polled"]
pub struct ResolveNatInterval {
resolver: NatResolver,
future: Option<Pin<Box<dyn Future<Output = Option<IpAddr>> + Send>>>,
interval: tokio::time::Interval,
}
impl fmt::Debug for ResolveNatInterval {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("ResolveNatInterval")
.field("resolver", &self.resolver)
.field("future", &self.future.as_ref().map(drop))
.field("interval", &self.interval)
.finish()
}
}
impl ResolveNatInterval {
fn with_interval(resolver: NatResolver, interval: tokio::time::Interval) -> Self {
Self { resolver, future: None, interval }
}
/// Creates a new [`ResolveNatInterval`] that attempts to resolve the public IP with interval of
/// period. See also [`tokio::time::interval`]
#[track_caller]
pub fn interval(resolver: NatResolver, period: Duration) -> Self {
let interval = tokio::time::interval(period);
Self::with_interval(resolver, interval)
}
/// Creates a new [`ResolveNatInterval`] that attempts to resolve the public IP with interval of
/// period with the first attempt starting at `start`. See also [`tokio::time::interval_at`]
#[track_caller]
pub fn interval_at(
resolver: NatResolver,
start: tokio::time::Instant,
period: Duration,
) -> Self {
let interval = tokio::time::interval_at(start, period);
Self::with_interval(resolver, interval)
}
/// Returns the resolver used by this interval
pub const fn resolver(&self) -> &NatResolver {
&self.resolver
}
/// Completes when the next [`IpAddr`] in the interval has been reached.
pub async fn tick(&mut self) -> Option<IpAddr> {
poll_fn(|cx| self.poll_tick(cx)).await
}
/// Polls for the next resolved [`IpAddr`] in the interval to be reached.
///
/// This method can return the following values:
///
/// * `Poll::Pending` if the next [`IpAddr`] has not yet been resolved.
/// * `Poll::Ready(Option<IpAddr>)` if the next [`IpAddr`] has been resolved. This returns
/// `None` if the attempt was unsuccessful.
pub fn poll_tick(&mut self, cx: &mut Context<'_>) -> Poll<Option<IpAddr>> {
if self.interval.poll_tick(cx).is_ready() {
self.future = Some(Box::pin(self.resolver.external_addr()));
}
if let Some(mut fut) = self.future.take() {
match fut.as_mut().poll(cx) {
Poll::Ready(ip) => return Poll::Ready(ip),
Poll::Pending => self.future = Some(fut),
}
}
Poll::Pending
}
}
/// Attempts to produce an IP address with all builtin resolvers (best effort).
pub async fn external_ip() -> Option<IpAddr> {
external_addr_with(NatResolver::Any).await
}
/// Given a [`NatResolver`] attempts to produce an IP address (best effort).
pub async fn external_addr_with(resolver: NatResolver) -> Option<IpAddr> {
match resolver {
NatResolver::Any | NatResolver::Upnp | NatResolver::PublicIp => resolve_external_ip().await,
NatResolver::ExternalIp(ip) => Some(ip),
NatResolver::NetIf => resolve_net_if_ip(DEFAULT_NET_IF_NAME)
.inspect_err(|err| {
debug!(target: "net::nat",
%err,
"Failed to resolve network interface IP"
);
})
.ok(),
NatResolver::None => None,
}
}
async fn resolve_external_ip() -> Option<IpAddr> {
let futures = EXTERNAL_IP_APIS.iter().copied().map(resolve_external_ip_url_res).map(Box::pin);
futures_util::future::select_ok(futures)
.await
.inspect_err(|err| {
debug!(target: "net::nat",
?err,
external_ip_apis=?EXTERNAL_IP_APIS,
"Failed to resolve external IP from any API");
})
.ok()
.map(|(ip, _)| ip)
}
async fn resolve_external_ip_url_res(url: &str) -> Result<IpAddr, ()> {
resolve_external_ip_url(url).await.ok_or(())
}
async fn resolve_external_ip_url(url: &str) -> Option<IpAddr> {
let client = reqwest::Client::builder().timeout(Duration::from_secs(10)).build().ok()?;
let response = client.get(url).send().await.ok()?;
let response = response.error_for_status().ok()?;
let text = response.text().await.ok()?;
text.trim().parse().ok()
}
#[cfg(test)]
mod tests {
use super::*;
use std::net::Ipv4Addr;
#[tokio::test]
#[ignore]
async fn get_external_ip() {
reth_tracing::init_test_tracing();
let ip = external_ip().await;
dbg!(ip);
}
#[tokio::test]
#[ignore]
async fn get_external_ip_interval() {
reth_tracing::init_test_tracing();
let mut interval = ResolveNatInterval::interval(Default::default(), Duration::from_secs(5));
let ip = interval.tick().await;
dbg!(ip);
let ip = interval.tick().await;
dbg!(ip);
}
#[test]
fn test_from_str() {
assert_eq!(NatResolver::Any, "any".parse().unwrap());
assert_eq!(NatResolver::None, "none".parse().unwrap());
let ip = NatResolver::ExternalIp(IpAddr::V4(Ipv4Addr::UNSPECIFIED));
let s = "extip:0.0.0.0";
assert_eq!(ip, s.parse().unwrap());
assert_eq!(ip.to_string().as_str(), s);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/nat/src/net_if.rs | crates/net/nat/src/net_if.rs | //! IP resolution on non-host Docker network.
use std::{io, net::IpAddr};
/// The 'eth0' interface tends to be the default interface that docker containers use to
/// communicate with each other.
pub const DEFAULT_NET_IF_NAME: &str = "eth0";
/// Errors resolving network interface IP.
#[derive(Debug, thiserror::Error)]
pub enum NetInterfaceError {
/// Error reading OS interfaces.
#[error("failed to read OS interfaces: {0}")]
Io(io::Error),
/// No interface found with given name.
#[error("interface not found: {0}, found other interfaces: {1:?}")]
IFNotFound(String, Vec<String>),
}
/// Reads IP of OS interface with given name, if exists.
pub fn resolve_net_if_ip(if_name: &str) -> Result<IpAddr, NetInterfaceError> {
match if_addrs::get_if_addrs() {
Ok(ifs) => {
let ip = ifs.iter().find(|i| i.name == if_name).map(|i| i.ip());
match ip {
Some(ip) => Ok(ip),
None => {
let ifs = ifs.into_iter().map(|i| i.name.as_str().into()).collect();
Err(NetInterfaceError::IFNotFound(if_name.into(), ifs))
}
}
}
Err(err) => Err(NetInterfaceError::Io(err)),
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::net::Ipv4Addr;
#[test]
fn read_docker_if_addr() {
const LOCALHOST_IF: [&str; 2] = ["lo0", "lo"];
let ip = resolve_net_if_ip(LOCALHOST_IF[0])
.unwrap_or_else(|_| resolve_net_if_ip(LOCALHOST_IF[1]).unwrap());
assert_eq!(ip, Ipv4Addr::LOCALHOST);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/etl/src/lib.rs | crates/etl/src/lib.rs | //! ETL data collector.
//!
//! This crate is useful for dumping unsorted data into temporary files and iterating on their
//! sorted representation later on.
//!
//! This has multiple uses, such as optimizing database inserts (for Btree based databases) and
//! memory management (as it moves the buffer to disk instead of memory).
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
use std::{
cmp::Reverse,
collections::BinaryHeap,
io::{self, BufReader, BufWriter, Read, Seek, SeekFrom, Write},
path::{Path, PathBuf},
};
/// Key len and Value len encode use [`usize::to_be_bytes()`] the length is 8.
const KV_LEN: usize = 8;
use rayon::prelude::*;
use reth_db_api::table::{Compress, Encode, Key, Value};
use tempfile::{NamedTempFile, TempDir};
/// An ETL (extract, transform, load) data collector.
///
/// Data is pushed (extract) to the collector which internally flushes the data in a sorted
/// (transform) manner to files of some specified capacity. the data can later be iterated over
/// (load) in a sorted manner.
///
/// Used mainly to insert data into `MDBX` in a sorted manner. This is important because performance
/// and storage space degrades greatly if the data is inserted unsorted (eg. tables with hashes as
/// keys.) as opposed to append & sorted insert. Some benchmarks can be found [here](https://github.com/paradigmxyz/reth/pull/1130#issuecomment-1418642755).
#[derive(Debug)]
pub struct Collector<K, V>
where
K: Encode + Ord,
V: Compress,
{
/// Parent directory where to create ETL files
parent_dir: Option<PathBuf>,
/// Directory for temporary file storage
dir: Option<TempDir>,
/// Collection of temporary ETL files
files: Vec<EtlFile>,
/// Current buffer size in bytes
buffer_size_bytes: usize,
/// Maximum buffer capacity in bytes, triggers flush when reached
buffer_capacity_bytes: usize,
/// In-memory buffer storing encoded and compressed key-value pairs
buffer: Vec<(<K as Encode>::Encoded, <V as Compress>::Compressed)>,
/// Total number of elements in the collector, including all files
len: usize,
}
impl<K, V> Collector<K, V>
where
K: Key,
V: Value,
{
/// Create a new collector with some capacity.
///
/// Once the capacity (in bytes) is reached, the data is sorted and flushed to disk.
pub const fn new(buffer_capacity_bytes: usize, parent_dir: Option<PathBuf>) -> Self {
Self {
parent_dir,
dir: None,
buffer_size_bytes: 0,
files: Vec::new(),
buffer_capacity_bytes,
buffer: Vec::new(),
len: 0,
}
}
/// Returns number of elements currently in the collector.
pub const fn len(&self) -> usize {
self.len
}
/// Returns `true` if there are currently no elements in the collector.
pub const fn is_empty(&self) -> bool {
self.len == 0
}
/// Clears the collector, removing all data, including the temporary directory.
pub fn clear(&mut self) {
self.dir = None;
// Clear vectors and free the allocated memory
self.files = Vec::new();
self.buffer = Vec::new();
self.buffer_size_bytes = 0;
self.len = 0;
}
/// Insert an entry into the collector.
pub fn insert(&mut self, key: K, value: V) -> io::Result<()> {
let key = key.encode();
let value = value.compress();
self.buffer_size_bytes += key.as_ref().len() + value.as_ref().len();
self.buffer.push((key, value));
if self.buffer_size_bytes > self.buffer_capacity_bytes {
self.flush()?;
}
self.len += 1;
Ok(())
}
/// Returns a reference to the temporary directory used by the collector. If the directory
/// doesn't exist, it will be created.
fn dir(&mut self) -> io::Result<&TempDir> {
if self.dir.is_none() {
self.dir = match &self.parent_dir {
Some(dir) => {
if !dir.exists() {
std::fs::create_dir_all(dir)?;
}
Some(TempDir::new_in(dir)?)
}
None => Some(TempDir::new()?),
};
}
Ok(self.dir.as_ref().unwrap())
}
fn flush(&mut self) -> io::Result<()> {
self.buffer_size_bytes = 0;
self.buffer.par_sort_unstable_by(|a, b| a.0.cmp(&b.0));
let mut buf = Vec::with_capacity(self.buffer.len());
std::mem::swap(&mut buf, &mut self.buffer);
let path = self.dir()?.path().to_path_buf();
self.files.push(EtlFile::new(path.as_path(), buf)?);
Ok(())
}
/// Returns an iterator over the collector data.
///
/// The items of the iterator are sorted across all underlying files.
///
/// # Note
///
/// The keys and values have been pre-encoded, meaning they *SHOULD NOT* be encoded or
/// compressed again.
pub fn iter(&mut self) -> std::io::Result<EtlIter<'_>> {
// Flush the remaining items to disk
if self.buffer_size_bytes > 0 {
self.flush()?;
}
let mut heap = BinaryHeap::new();
for (current_id, file) in self.files.iter_mut().enumerate() {
if let Some((current_key, current_value)) = file.read_next()? {
heap.push((Reverse((current_key, current_value)), current_id));
}
}
Ok(EtlIter { heap, files: &mut self.files })
}
}
/// Type alias for the items stored in the heap of [`EtlIter`].
///
/// Each item in the heap is a tuple containing:
/// - A `Reverse` tuple of a key-value pair (`Vec<u8>, Vec<u8>`), used to maintain the heap in
/// ascending order of keys.
/// - An index (`usize`) representing the source file from which the key-value pair was read.
type HeapItem = (Reverse<(Vec<u8>, Vec<u8>)>, usize);
/// `EtlIter` is an iterator for traversing through sorted key-value pairs in a collection of ETL
/// files. These files are created using the [`Collector`] and contain data where keys are encoded
/// and values are compressed.
///
/// This iterator returns each key-value pair in ascending order based on the key.
/// It is particularly designed to efficiently handle large datasets by employing a binary heap for
/// managing the iteration order.
#[derive(Debug)]
pub struct EtlIter<'a> {
/// Heap managing the next items to be iterated.
heap: BinaryHeap<HeapItem>,
/// Reference to the vector of ETL files being iterated over.
files: &'a mut Vec<EtlFile>,
}
impl EtlIter<'_> {
/// Peeks into the next element
pub fn peek(&self) -> Option<&(Vec<u8>, Vec<u8>)> {
self.heap.peek().map(|(Reverse(entry), _)| entry)
}
}
impl Iterator for EtlIter<'_> {
type Item = std::io::Result<(Vec<u8>, Vec<u8>)>;
fn next(&mut self) -> Option<Self::Item> {
// Get the next sorted entry from the heap
let (Reverse(entry), id) = self.heap.pop()?;
// Populate the heap with the next entry from the same file
match self.files[id].read_next() {
Ok(Some((key, value))) => {
self.heap.push((Reverse((key, value)), id));
Some(Ok(entry))
}
Ok(None) => Some(Ok(entry)),
err => err.transpose(),
}
}
}
/// A temporary ETL file.
#[derive(Debug)]
struct EtlFile {
file: BufReader<NamedTempFile>,
len: usize,
}
impl EtlFile {
/// Create a new file with the given data (which should be pre-sorted) at the given path.
///
/// The file will be a temporary file.
pub(crate) fn new<K, V>(dir: &Path, buffer: Vec<(K, V)>) -> std::io::Result<Self>
where
Self: Sized,
K: AsRef<[u8]>,
V: AsRef<[u8]>,
{
let file = NamedTempFile::new_in(dir)?;
let mut w = BufWriter::new(file);
for entry in &buffer {
let k = entry.0.as_ref();
let v = entry.1.as_ref();
w.write_all(&k.len().to_be_bytes())?;
w.write_all(&v.len().to_be_bytes())?;
w.write_all(k)?;
w.write_all(v)?;
}
let mut file = BufReader::new(w.into_inner()?);
file.seek(SeekFrom::Start(0))?;
let len = buffer.len();
Ok(Self { file, len })
}
/// Read the next entry in the file.
///
/// Can return error if it reaches EOF before filling the internal buffers.
pub(crate) fn read_next(&mut self) -> std::io::Result<Option<(Vec<u8>, Vec<u8>)>> {
if self.len == 0 {
return Ok(None)
}
let mut buffer_key_length = [0; KV_LEN];
let mut buffer_value_length = [0; KV_LEN];
self.file.read_exact(&mut buffer_key_length)?;
self.file.read_exact(&mut buffer_value_length)?;
let key_length = usize::from_be_bytes(buffer_key_length);
let value_length = usize::from_be_bytes(buffer_value_length);
let mut key = vec![0; key_length];
let mut value = vec![0; value_length];
self.file.read_exact(&mut key)?;
self.file.read_exact(&mut value)?;
self.len -= 1;
Ok(Some((key, value)))
}
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_primitives::{TxHash, TxNumber};
#[test]
fn etl_hashes() {
let mut entries: Vec<_> =
(0..10_000).map(|id| (TxHash::random(), id as TxNumber)).collect();
let mut collector = Collector::new(1024, None);
assert!(collector.dir.is_none());
for (k, v) in entries.clone() {
collector.insert(k, v).unwrap();
}
entries.sort_unstable_by_key(|entry| entry.0);
for (id, entry) in collector.iter().unwrap().enumerate() {
let expected = entries[id];
assert_eq!(
entry.unwrap(),
(expected.0.encode().to_vec(), expected.1.compress().clone())
);
}
let temp_dir_path = collector.dir.as_ref().unwrap().path().to_path_buf();
collector.clear();
assert!(collector.dir.is_none());
assert!(collector.files.is_empty());
assert_eq!(collector.buffer_size_bytes, 0);
assert!(collector.buffer.is_empty());
assert_eq!(collector.len, 0);
assert!(collector.is_empty());
assert!(!temp_dir_path.exists());
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/ress/provider/src/lib.rs | crates/ress/provider/src/lib.rs | //! Reth implementation of [`reth_ress_protocol::RessProtocolProvider`].
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
use alloy_consensus::BlockHeader as _;
use alloy_primitives::{Bytes, B256};
use parking_lot::Mutex;
use reth_chain_state::{
ExecutedBlock, ExecutedBlockWithTrieUpdates, ExecutedTrieUpdates, MemoryOverlayStateProvider,
};
use reth_errors::{ProviderError, ProviderResult};
use reth_ethereum_primitives::{Block, BlockBody, EthPrimitives};
use reth_evm::{execute::Executor, ConfigureEvm};
use reth_primitives_traits::{Block as _, Header, RecoveredBlock};
use reth_ress_protocol::RessProtocolProvider;
use reth_revm::{database::StateProviderDatabase, db::State, witness::ExecutionWitnessRecord};
use reth_tasks::TaskSpawner;
use reth_trie::{MultiProofTargets, Nibbles, TrieInput};
use schnellru::{ByLength, LruMap};
use std::{sync::Arc, time::Instant};
use tokio::sync::{oneshot, Semaphore};
use tracing::*;
mod recorder;
use recorder::StateWitnessRecorderDatabase;
mod pending_state;
pub use pending_state::*;
use reth_storage_api::{BlockReader, BlockSource, StateProviderFactory};
/// Reth provider implementing [`RessProtocolProvider`].
#[expect(missing_debug_implementations)]
#[derive(Clone)]
pub struct RethRessProtocolProvider<P, E> {
provider: P,
evm_config: E,
task_spawner: Box<dyn TaskSpawner>,
max_witness_window: u64,
witness_semaphore: Arc<Semaphore>,
witness_cache: Arc<Mutex<LruMap<B256, Arc<Vec<Bytes>>>>>,
pending_state: PendingState<EthPrimitives>,
}
impl<P, E> RethRessProtocolProvider<P, E>
where
P: BlockReader<Block = Block> + StateProviderFactory,
E: ConfigureEvm<Primitives = EthPrimitives> + 'static,
{
/// Create new ress protocol provider.
pub fn new(
provider: P,
evm_config: E,
task_spawner: Box<dyn TaskSpawner>,
max_witness_window: u64,
witness_max_parallel: usize,
cache_size: u32,
pending_state: PendingState<EthPrimitives>,
) -> eyre::Result<Self> {
Ok(Self {
provider,
evm_config,
task_spawner,
max_witness_window,
witness_semaphore: Arc::new(Semaphore::new(witness_max_parallel)),
witness_cache: Arc::new(Mutex::new(LruMap::new(ByLength::new(cache_size)))),
pending_state,
})
}
/// Retrieve a valid or invalid block by block hash.
pub fn block_by_hash(
&self,
block_hash: B256,
) -> ProviderResult<Option<Arc<RecoveredBlock<Block>>>> {
// NOTE: we keep track of the pending state locally because reth does not provider a way
// to access non-canonical or invalid blocks via the provider.
let maybe_block = if let Some(block) = self.pending_state.recovered_block(&block_hash) {
Some(block)
} else if let Some(block) =
self.provider.find_block_by_hash(block_hash, BlockSource::Any)?
{
let signers = block.recover_signers()?;
Some(Arc::new(block.into_recovered_with_signers(signers)))
} else {
// we attempt to look up invalid block last
self.pending_state.invalid_recovered_block(&block_hash)
};
Ok(maybe_block)
}
/// Generate state witness
pub fn generate_witness(&self, block_hash: B256) -> ProviderResult<Vec<Bytes>> {
if let Some(witness) = self.witness_cache.lock().get(&block_hash).cloned() {
return Ok(witness.as_ref().clone())
}
let block =
self.block_by_hash(block_hash)?.ok_or(ProviderError::BlockHashNotFound(block_hash))?;
let best_block_number = self.provider.best_block_number()?;
if best_block_number.saturating_sub(block.number()) > self.max_witness_window {
return Err(ProviderError::TrieWitnessError(
"witness target block exceeds maximum witness window".to_owned(),
))
}
let mut executed_ancestors = Vec::new();
let mut ancestor_hash = block.parent_hash();
let historical = 'sp: loop {
match self.provider.state_by_block_hash(ancestor_hash) {
Ok(state_provider) => break 'sp state_provider,
Err(_) => {
// Attempt to retrieve a valid executed block first.
let mut executed = self.pending_state.executed_block(&ancestor_hash);
// If it's not present, attempt to lookup invalid block.
if executed.is_none() {
if let Some(invalid) =
self.pending_state.invalid_recovered_block(&ancestor_hash)
{
trace!(target: "reth::ress_provider", %block_hash, %ancestor_hash, "Using invalid ancestor block for witness construction");
executed = Some(ExecutedBlockWithTrieUpdates {
block: ExecutedBlock {
recovered_block: invalid,
..Default::default()
},
trie: ExecutedTrieUpdates::empty(),
});
}
}
let Some(executed) = executed else {
return Err(ProviderError::StateForHashNotFound(ancestor_hash))
};
ancestor_hash = executed.sealed_block().parent_hash();
executed_ancestors.push(executed);
}
};
};
// Execute all gathered blocks to gather accesses state.
let mut db = StateWitnessRecorderDatabase::new(StateProviderDatabase::new(
MemoryOverlayStateProvider::new(historical, executed_ancestors.clone()),
));
let mut record = ExecutionWitnessRecord::default();
// We allow block execution to fail, since we still want to record all accessed state by
// invalid blocks.
if let Err(error) = self.evm_config.batch_executor(&mut db).execute_with_state_closure(
&block,
|state: &State<_>| {
record.record_executed_state(state);
},
) {
debug!(target: "reth::ress_provider", %block_hash, %error, "Error executing the block");
}
// NOTE: there might be a race condition where target ancestor hash gets evicted from the
// database.
let witness_state_provider = self.provider.state_by_block_hash(ancestor_hash)?;
let mut trie_input = TrieInput::default();
for block in executed_ancestors.into_iter().rev() {
trie_input.append_cached_ref(block.trie.as_ref().unwrap(), &block.hashed_state);
}
let mut hashed_state = db.into_state();
hashed_state.extend(record.hashed_state);
// Gather the state witness.
let witness = if hashed_state.is_empty() {
// If no state was accessed, at least the root node must be present.
let multiproof = witness_state_provider.multiproof(
trie_input,
MultiProofTargets::from_iter([(B256::ZERO, Default::default())]),
)?;
let mut witness = Vec::new();
if let Some(root_node) =
multiproof.account_subtree.into_inner().remove(&Nibbles::default())
{
witness.push(root_node);
}
witness
} else {
witness_state_provider.witness(trie_input, hashed_state)?
};
// Insert witness into the cache.
let cached_witness = Arc::new(witness.clone());
self.witness_cache.lock().insert(block_hash, cached_witness);
Ok(witness)
}
}
impl<P, E> RessProtocolProvider for RethRessProtocolProvider<P, E>
where
P: BlockReader<Block = Block> + StateProviderFactory + Clone + 'static,
E: ConfigureEvm<Primitives = EthPrimitives> + 'static,
{
fn header(&self, block_hash: B256) -> ProviderResult<Option<Header>> {
trace!(target: "reth::ress_provider", %block_hash, "Serving header");
Ok(self.block_by_hash(block_hash)?.map(|b| b.header().clone()))
}
fn block_body(&self, block_hash: B256) -> ProviderResult<Option<BlockBody>> {
trace!(target: "reth::ress_provider", %block_hash, "Serving block body");
Ok(self.block_by_hash(block_hash)?.map(|b| b.body().clone()))
}
fn bytecode(&self, code_hash: B256) -> ProviderResult<Option<Bytes>> {
trace!(target: "reth::ress_provider", %code_hash, "Serving bytecode");
let maybe_bytecode = 'bytecode: {
if let Some(bytecode) = self.pending_state.find_bytecode(code_hash) {
break 'bytecode Some(bytecode);
}
self.provider.latest()?.bytecode_by_hash(&code_hash)?
};
Ok(maybe_bytecode.map(|bytecode| bytecode.original_bytes()))
}
async fn witness(&self, block_hash: B256) -> ProviderResult<Vec<Bytes>> {
trace!(target: "reth::ress_provider", %block_hash, "Serving witness");
let started_at = Instant::now();
let _permit = self.witness_semaphore.acquire().await.map_err(ProviderError::other)?;
let this = self.clone();
let (tx, rx) = oneshot::channel();
self.task_spawner.spawn_blocking(Box::pin(async move {
let result = this.generate_witness(block_hash);
let _ = tx.send(result);
}));
match rx.await {
Ok(Ok(witness)) => {
trace!(target: "reth::ress_provider", %block_hash, elapsed = ?started_at.elapsed(), "Computed witness");
Ok(witness)
}
Ok(Err(error)) => Err(error),
Err(_) => Err(ProviderError::TrieWitnessError("dropped".to_owned())),
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/ress/provider/src/pending_state.rs | crates/ress/provider/src/pending_state.rs | use alloy_consensus::BlockHeader as _;
use alloy_primitives::{
map::{B256HashSet, B256Map},
BlockNumber, B256,
};
use futures::StreamExt;
use parking_lot::RwLock;
use reth_chain_state::ExecutedBlockWithTrieUpdates;
use reth_ethereum_primitives::EthPrimitives;
use reth_node_api::{ConsensusEngineEvent, NodePrimitives};
use reth_primitives_traits::{Bytecode, RecoveredBlock};
use reth_storage_api::BlockNumReader;
use reth_tokio_util::EventStream;
use std::{collections::BTreeMap, sync::Arc};
use tracing::*;
/// Pending state for [`crate::RethRessProtocolProvider`].
#[derive(Clone, Default, Debug)]
pub struct PendingState<N: NodePrimitives>(Arc<RwLock<PendingStateInner<N>>>);
#[derive(Default, Debug)]
struct PendingStateInner<N: NodePrimitives> {
blocks_by_hash: B256Map<ExecutedBlockWithTrieUpdates<N>>,
invalid_blocks_by_hash: B256Map<Arc<RecoveredBlock<N::Block>>>,
block_hashes_by_number: BTreeMap<BlockNumber, B256HashSet>,
}
impl<N: NodePrimitives> PendingState<N> {
/// Insert executed block with trie updates.
pub fn insert_block(&self, block: ExecutedBlockWithTrieUpdates<N>) {
let mut this = self.0.write();
let block_hash = block.recovered_block.hash();
this.block_hashes_by_number
.entry(block.recovered_block.number())
.or_default()
.insert(block_hash);
this.blocks_by_hash.insert(block_hash, block);
}
/// Insert invalid block.
pub fn insert_invalid_block(&self, block: Arc<RecoveredBlock<N::Block>>) {
let mut this = self.0.write();
let block_hash = block.hash();
this.block_hashes_by_number.entry(block.number()).or_default().insert(block_hash);
this.invalid_blocks_by_hash.insert(block_hash, block);
}
/// Returns only valid executed blocks by hash.
pub fn executed_block(&self, hash: &B256) -> Option<ExecutedBlockWithTrieUpdates<N>> {
self.0.read().blocks_by_hash.get(hash).cloned()
}
/// Returns valid recovered block.
pub fn recovered_block(&self, hash: &B256) -> Option<Arc<RecoveredBlock<N::Block>>> {
self.executed_block(hash).map(|b| b.recovered_block.clone())
}
/// Returns invalid recovered block.
pub fn invalid_recovered_block(&self, hash: &B256) -> Option<Arc<RecoveredBlock<N::Block>>> {
self.0.read().invalid_blocks_by_hash.get(hash).cloned()
}
/// Find bytecode in executed blocks state.
pub fn find_bytecode(&self, code_hash: B256) -> Option<Bytecode> {
let this = self.0.read();
for block in this.blocks_by_hash.values() {
if let Some(contract) = block.execution_output.bytecode(&code_hash) {
return Some(contract);
}
}
None
}
/// Remove all blocks before the specified block number.
pub fn remove_before(&self, block_number: BlockNumber) -> u64 {
let mut removed = 0;
let mut this = self.0.write();
while this
.block_hashes_by_number
.first_key_value()
.is_some_and(|(number, _)| number <= &block_number)
{
let (_, block_hashes) = this.block_hashes_by_number.pop_first().unwrap();
for block_hash in block_hashes {
removed += 1;
this.blocks_by_hash.remove(&block_hash);
this.invalid_blocks_by_hash.remove(&block_hash);
}
}
removed
}
}
/// A task to maintain pending state based on consensus engine events.
pub async fn maintain_pending_state<P>(
mut events: EventStream<ConsensusEngineEvent<EthPrimitives>>,
provider: P,
pending_state: PendingState<EthPrimitives>,
) where
P: BlockNumReader,
{
while let Some(event) = events.next().await {
match event {
ConsensusEngineEvent::CanonicalBlockAdded(block, _) |
ConsensusEngineEvent::ForkBlockAdded(block, _) => {
trace!(target: "reth::ress_provider", block = ? block.recovered_block().num_hash(), "Insert block into pending state");
pending_state.insert_block(block);
}
ConsensusEngineEvent::InvalidBlock(block) => {
if let Ok(block) = block.try_recover() {
trace!(target: "reth::ress_provider", block = ?block.num_hash(), "Insert invalid block into pending state");
pending_state.insert_invalid_block(Arc::new(block));
}
}
ConsensusEngineEvent::ForkchoiceUpdated(state, status) => {
if status.is_valid() {
let target = state.finalized_block_hash;
if let Ok(Some(block_number)) = provider.block_number(target) {
let count = pending_state.remove_before(block_number);
trace!(target: "reth::ress_provider", block_number, count, "Removing blocks before finalized");
}
}
}
// ignore
ConsensusEngineEvent::CanonicalChainCommitted(_, _) |
ConsensusEngineEvent::BlockReceived(_) |
ConsensusEngineEvent::LiveSyncProgress(_) => (),
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/ress/provider/src/recorder.rs | crates/ress/provider/src/recorder.rs | use alloy_primitives::{keccak256, Address, B256, U256};
use reth_revm::{
state::{AccountInfo, Bytecode, FlaggedStorage},
Database,
};
use reth_trie::{HashedPostState, HashedStorage};
/// The state witness recorder that records all state accesses during execution.
/// It does so by implementing the [`reth_revm::Database`] and recording accesses of accounts and
/// slots.
#[derive(Debug)]
pub(crate) struct StateWitnessRecorderDatabase<D> {
database: D,
state: HashedPostState,
}
impl<D> StateWitnessRecorderDatabase<D> {
pub(crate) fn new(database: D) -> Self {
Self { database, state: Default::default() }
}
pub(crate) fn into_state(self) -> HashedPostState {
self.state
}
}
impl<D: Database> Database for StateWitnessRecorderDatabase<D> {
type Error = D::Error;
fn basic(&mut self, address: Address) -> Result<Option<AccountInfo>, Self::Error> {
let maybe_account = self.database.basic(address)?;
let hashed_address = keccak256(address);
self.state.accounts.insert(hashed_address, maybe_account.as_ref().map(|acc| acc.into()));
Ok(maybe_account)
}
fn storage(&mut self, address: Address, index: U256) -> Result<FlaggedStorage, Self::Error> {
let value = self.database.storage(address, index)?;
let hashed_address = keccak256(address);
let hashed_slot = keccak256(B256::from(index));
self.state
.storages
.entry(hashed_address)
.or_insert_with(|| HashedStorage::new(false))
.storage
.insert(hashed_slot, value);
Ok(value)
}
fn block_hash(&mut self, number: u64) -> Result<B256, Self::Error> {
self.database.block_hash(number)
}
fn code_by_hash(&mut self, code_hash: B256) -> Result<Bytecode, Self::Error> {
self.database.code_by_hash(code_hash)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/ress/protocol/src/lib.rs | crates/ress/protocol/src/lib.rs | //! `ress` protocol is an `RLPx` subprotocol for stateless nodes.
//! following [RLPx specs](https://github.com/ethereum/devp2p/blob/master/rlpx.md)
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
mod types;
pub use types::*;
mod message;
pub use message::*;
mod provider;
pub use provider::*;
mod handlers;
pub use handlers::*;
mod connection;
pub use connection::{RessPeerRequest, RessProtocolConnection};
#[cfg(any(test, feature = "test-utils"))]
pub mod test_utils;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/ress/protocol/src/handlers.rs | crates/ress/protocol/src/handlers.rs | use crate::{
connection::{RessPeerRequest, RessProtocolConnection},
NodeType, RessProtocolMessage, RessProtocolProvider,
};
use reth_eth_wire::{
capability::SharedCapabilities, multiplex::ProtocolConnection, protocol::Protocol,
};
use reth_network::protocol::{ConnectionHandler, OnNotSupported, ProtocolHandler};
use reth_network_api::{test_utils::PeersHandle, Direction, PeerId};
use std::{
fmt,
net::SocketAddr,
sync::{
atomic::{AtomicU64, Ordering},
Arc,
},
};
use tokio::sync::mpsc;
use tokio_stream::wrappers::UnboundedReceiverStream;
use tracing::*;
/// The events that can be emitted by our custom protocol.
#[derive(Debug)]
pub enum ProtocolEvent {
/// Connection established.
Established {
/// Connection direction.
direction: Direction,
/// Peer ID.
peer_id: PeerId,
/// Sender part for forwarding commands.
to_connection: mpsc::UnboundedSender<RessPeerRequest>,
},
/// Number of max active connections exceeded. New connection was rejected.
MaxActiveConnectionsExceeded {
/// The current number
num_active: u64,
},
}
/// Protocol state is an helper struct to store the protocol events.
#[derive(Clone, Debug)]
pub struct ProtocolState {
/// Protocol event sender.
pub events_sender: mpsc::UnboundedSender<ProtocolEvent>,
/// The number of active connections.
pub active_connections: Arc<AtomicU64>,
}
impl ProtocolState {
/// Create new protocol state.
pub fn new(events_sender: mpsc::UnboundedSender<ProtocolEvent>) -> Self {
Self { events_sender, active_connections: Arc::default() }
}
/// Returns the current number of active connections.
pub fn active_connections(&self) -> u64 {
self.active_connections.load(Ordering::Relaxed)
}
}
/// The protocol handler takes care of incoming and outgoing connections.
#[derive(Clone)]
pub struct RessProtocolHandler<P> {
/// Provider.
pub provider: P,
/// Node type.
pub node_type: NodeType,
/// Peers handle.
pub peers_handle: PeersHandle,
/// The maximum number of active connections.
pub max_active_connections: u64,
/// Current state of the protocol.
pub state: ProtocolState,
}
impl<P> fmt::Debug for RessProtocolHandler<P> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("RessProtocolHandler")
.field("node_type", &self.node_type)
.field("peers_handle", &self.peers_handle)
.field("max_active_connections", &self.max_active_connections)
.field("state", &self.state)
.finish_non_exhaustive()
}
}
impl<P> ProtocolHandler for RessProtocolHandler<P>
where
P: RessProtocolProvider + Clone + Unpin + 'static,
{
type ConnectionHandler = Self;
fn on_incoming(&self, socket_addr: SocketAddr) -> Option<Self::ConnectionHandler> {
let num_active = self.state.active_connections();
if num_active >= self.max_active_connections {
trace!(
target: "ress::net",
num_active, max_connections = self.max_active_connections, %socket_addr,
"ignoring incoming connection, max active reached"
);
let _ = self
.state
.events_sender
.send(ProtocolEvent::MaxActiveConnectionsExceeded { num_active });
None
} else {
Some(self.clone())
}
}
fn on_outgoing(
&self,
socket_addr: SocketAddr,
peer_id: PeerId,
) -> Option<Self::ConnectionHandler> {
let num_active = self.state.active_connections();
if num_active >= self.max_active_connections {
trace!(
target: "ress::net",
num_active, max_connections = self.max_active_connections, %socket_addr, %peer_id,
"ignoring outgoing connection, max active reached"
);
let _ = self
.state
.events_sender
.send(ProtocolEvent::MaxActiveConnectionsExceeded { num_active });
None
} else {
Some(self.clone())
}
}
}
impl<P> ConnectionHandler for RessProtocolHandler<P>
where
P: RessProtocolProvider + Clone + Unpin + 'static,
{
type Connection = RessProtocolConnection<P>;
fn protocol(&self) -> Protocol {
RessProtocolMessage::protocol()
}
fn on_unsupported_by_peer(
self,
_supported: &SharedCapabilities,
_direction: Direction,
_peer_id: PeerId,
) -> OnNotSupported {
if self.node_type.is_stateful() {
OnNotSupported::KeepAlive
} else {
OnNotSupported::Disconnect
}
}
fn into_connection(
self,
direction: Direction,
peer_id: PeerId,
conn: ProtocolConnection,
) -> Self::Connection {
let (tx, rx) = mpsc::unbounded_channel();
// Emit connection established event.
self.state
.events_sender
.send(ProtocolEvent::Established { direction, peer_id, to_connection: tx })
.ok();
// Increment the number of active sessions.
self.state.active_connections.fetch_add(1, Ordering::Relaxed);
RessProtocolConnection::new(
self.provider.clone(),
self.node_type,
self.peers_handle,
peer_id,
conn,
UnboundedReceiverStream::from(rx),
self.state.active_connections,
)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/ress/protocol/src/test_utils.rs | crates/ress/protocol/src/test_utils.rs | //! Miscellaneous test utilities.
use crate::RessProtocolProvider;
use alloy_consensus::Header;
use alloy_primitives::{map::B256HashMap, Bytes, B256};
use reth_ethereum_primitives::BlockBody;
use reth_storage_errors::provider::ProviderResult;
use std::{
sync::{Arc, Mutex},
time::Duration,
};
/// Noop implementation of [`RessProtocolProvider`].
#[derive(Clone, Copy, Default, Debug)]
pub struct NoopRessProtocolProvider;
impl RessProtocolProvider for NoopRessProtocolProvider {
fn header(&self, _block_hash: B256) -> ProviderResult<Option<Header>> {
Ok(None)
}
fn block_body(&self, _block_hash: B256) -> ProviderResult<Option<BlockBody>> {
Ok(None)
}
fn bytecode(&self, _code_hash: B256) -> ProviderResult<Option<Bytes>> {
Ok(None)
}
async fn witness(&self, _block_hash: B256) -> ProviderResult<Vec<Bytes>> {
Ok(Vec::new())
}
}
/// Mock implementation of [`RessProtocolProvider`].
#[derive(Clone, Default, Debug)]
pub struct MockRessProtocolProvider {
headers: Arc<Mutex<B256HashMap<Header>>>,
block_bodies: Arc<Mutex<B256HashMap<BlockBody>>>,
bytecodes: Arc<Mutex<B256HashMap<Bytes>>>,
witnesses: Arc<Mutex<B256HashMap<Vec<Bytes>>>>,
witness_delay: Option<Duration>,
}
impl MockRessProtocolProvider {
/// Configure witness response delay.
pub const fn with_witness_delay(mut self, delay: Duration) -> Self {
self.witness_delay = Some(delay);
self
}
/// Insert header.
pub fn add_header(&self, block_hash: B256, header: Header) {
self.headers.lock().unwrap().insert(block_hash, header);
}
/// Extend headers from iterator.
pub fn extend_headers(&self, headers: impl IntoIterator<Item = (B256, Header)>) {
self.headers.lock().unwrap().extend(headers);
}
/// Insert block body.
pub fn add_block_body(&self, block_hash: B256, body: BlockBody) {
self.block_bodies.lock().unwrap().insert(block_hash, body);
}
/// Extend block bodies from iterator.
pub fn extend_block_bodies(&self, bodies: impl IntoIterator<Item = (B256, BlockBody)>) {
self.block_bodies.lock().unwrap().extend(bodies);
}
/// Insert bytecode.
pub fn add_bytecode(&self, code_hash: B256, bytecode: Bytes) {
self.bytecodes.lock().unwrap().insert(code_hash, bytecode);
}
/// Extend bytecodes from iterator.
pub fn extend_bytecodes(&self, bytecodes: impl IntoIterator<Item = (B256, Bytes)>) {
self.bytecodes.lock().unwrap().extend(bytecodes);
}
/// Insert witness.
pub fn add_witness(&self, block_hash: B256, witness: Vec<Bytes>) {
self.witnesses.lock().unwrap().insert(block_hash, witness);
}
/// Extend witnesses from iterator.
pub fn extend_witnesses(&self, witnesses: impl IntoIterator<Item = (B256, Vec<Bytes>)>) {
self.witnesses.lock().unwrap().extend(witnesses);
}
}
impl RessProtocolProvider for MockRessProtocolProvider {
fn header(&self, block_hash: B256) -> ProviderResult<Option<Header>> {
Ok(self.headers.lock().unwrap().get(&block_hash).cloned())
}
fn block_body(&self, block_hash: B256) -> ProviderResult<Option<BlockBody>> {
Ok(self.block_bodies.lock().unwrap().get(&block_hash).cloned())
}
fn bytecode(&self, code_hash: B256) -> ProviderResult<Option<Bytes>> {
Ok(self.bytecodes.lock().unwrap().get(&code_hash).cloned())
}
async fn witness(&self, block_hash: B256) -> ProviderResult<Vec<Bytes>> {
if let Some(delay) = self.witness_delay {
tokio::time::sleep(delay).await;
}
Ok(self.witnesses.lock().unwrap().get(&block_hash).cloned().unwrap_or_default())
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/ress/protocol/src/connection.rs | crates/ress/protocol/src/connection.rs | use crate::{GetHeaders, NodeType, RessMessage, RessProtocolMessage, RessProtocolProvider};
use alloy_consensus::Header;
use alloy_primitives::{bytes::BytesMut, BlockHash, Bytes, B256};
use futures::{stream::FuturesUnordered, Stream, StreamExt};
use reth_eth_wire::{message::RequestPair, multiplex::ProtocolConnection};
use reth_ethereum_primitives::BlockBody;
use reth_network_api::{test_utils::PeersHandle, PeerId, ReputationChangeKind};
use reth_storage_errors::ProviderResult;
use std::{
collections::HashMap,
future::Future,
pin::Pin,
sync::{
atomic::{AtomicU64, Ordering},
Arc,
},
task::{Context, Poll},
};
use tokio::sync::oneshot;
use tokio_stream::wrappers::UnboundedReceiverStream;
use tracing::*;
/// The connection handler for the custom `RLPx` protocol.
#[derive(Debug)]
pub struct RessProtocolConnection<P> {
/// Provider.
provider: P,
/// The type of this node..
node_type: NodeType,
/// Peers handle.
peers_handle: PeersHandle,
/// Peer ID.
peer_id: PeerId,
/// Protocol connection.
conn: ProtocolConnection,
/// Stream of incoming commands.
commands: UnboundedReceiverStream<RessPeerRequest>,
/// The total number of active connections.
active_connections: Arc<AtomicU64>,
/// Flag indicating whether the node type was sent to the peer.
node_type_sent: bool,
/// Flag indicating whether this stream has previously been terminated.
terminated: bool,
/// Incremental counter for request ids.
next_id: u64,
/// Collection of inflight requests.
inflight_requests: HashMap<u64, RessPeerRequest>,
/// Pending witness responses.
pending_witnesses: FuturesUnordered<WitnessFut>,
}
impl<P> RessProtocolConnection<P> {
/// Create new connection.
pub fn new(
provider: P,
node_type: NodeType,
peers_handle: PeersHandle,
peer_id: PeerId,
conn: ProtocolConnection,
commands: UnboundedReceiverStream<RessPeerRequest>,
active_connections: Arc<AtomicU64>,
) -> Self {
Self {
provider,
node_type,
peers_handle,
peer_id,
conn,
commands,
active_connections,
node_type_sent: false,
terminated: false,
next_id: 0,
inflight_requests: HashMap::default(),
pending_witnesses: FuturesUnordered::new(),
}
}
/// Returns the next request id
const fn next_id(&mut self) -> u64 {
let id = self.next_id;
self.next_id += 1;
id
}
/// Report bad message from current peer.
fn report_bad_message(&self) {
self.peers_handle.reputation_change(self.peer_id, ReputationChangeKind::BadMessage);
}
fn on_command(&mut self, command: RessPeerRequest) -> RessProtocolMessage {
let next_id = self.next_id();
let message = match &command {
RessPeerRequest::GetHeaders { request, .. } => {
RessProtocolMessage::get_headers(next_id, *request)
}
RessPeerRequest::GetBlockBodies { request, .. } => {
RessProtocolMessage::get_block_bodies(next_id, request.clone())
}
RessPeerRequest::GetWitness { block_hash, .. } => {
RessProtocolMessage::get_witness(next_id, *block_hash)
}
RessPeerRequest::GetBytecode { code_hash, .. } => {
RessProtocolMessage::get_bytecode(next_id, *code_hash)
}
};
self.inflight_requests.insert(next_id, command);
message
}
}
impl<P> RessProtocolConnection<P>
where
P: RessProtocolProvider + Clone + 'static,
{
fn on_headers_request(&self, request: GetHeaders) -> Vec<Header> {
match self.provider.headers(request) {
Ok(headers) => headers,
Err(error) => {
trace!(target: "ress::net::connection", peer_id = %self.peer_id, ?request, %error, "error retrieving headers");
Default::default()
}
}
}
fn on_block_bodies_request(&self, request: Vec<B256>) -> Vec<BlockBody> {
match self.provider.block_bodies(request.clone()) {
Ok(bodies) => bodies,
Err(error) => {
trace!(target: "ress::net::connection", peer_id = %self.peer_id, ?request, %error, "error retrieving block bodies");
Default::default()
}
}
}
fn on_bytecode_request(&self, code_hash: B256) -> Bytes {
match self.provider.bytecode(code_hash) {
Ok(Some(bytecode)) => bytecode,
Ok(None) => {
trace!(target: "ress::net::connection", peer_id = %self.peer_id, %code_hash, "bytecode not found");
Default::default()
}
Err(error) => {
trace!(target: "ress::net::connection", peer_id = %self.peer_id, %code_hash, %error, "error retrieving bytecode");
Default::default()
}
}
}
fn on_witness_response(
&self,
request: RequestPair<B256>,
witness_result: ProviderResult<Vec<Bytes>>,
) -> RessProtocolMessage {
let peer_id = self.peer_id;
let block_hash = request.message;
let witness = match witness_result {
Ok(witness) => {
trace!(target: "ress::net::connection", %peer_id, %block_hash, len = witness.len(), "witness found");
witness
}
Err(error) => {
trace!(target: "ress::net::connection", %peer_id, %block_hash, %error, "error retrieving witness");
Default::default()
}
};
RessProtocolMessage::witness(request.request_id, witness)
}
fn on_ress_message(&mut self, msg: RessProtocolMessage) -> OnRessMessageOutcome {
match msg.message {
RessMessage::NodeType(node_type) => {
if !self.node_type.is_valid_connection(&node_type) {
// Note types are not compatible, terminate the connection.
return OnRessMessageOutcome::Terminate;
}
}
RessMessage::GetHeaders(req) => {
let request = req.message;
trace!(target: "ress::net::connection", peer_id = %self.peer_id, ?request, "serving headers");
let header = self.on_headers_request(request);
let response = RessProtocolMessage::headers(req.request_id, header);
return OnRessMessageOutcome::Response(response.encoded());
}
RessMessage::GetBlockBodies(req) => {
let request = req.message;
trace!(target: "ress::net::connection", peer_id = %self.peer_id, ?request, "serving block bodies");
let bodies = self.on_block_bodies_request(request);
let response = RessProtocolMessage::block_bodies(req.request_id, bodies);
return OnRessMessageOutcome::Response(response.encoded());
}
RessMessage::GetBytecode(req) => {
let code_hash = req.message;
trace!(target: "ress::net::connection", peer_id = %self.peer_id, %code_hash, "serving bytecode");
let bytecode = self.on_bytecode_request(code_hash);
let response = RessProtocolMessage::bytecode(req.request_id, bytecode);
return OnRessMessageOutcome::Response(response.encoded());
}
RessMessage::GetWitness(req) => {
let block_hash = req.message;
trace!(target: "ress::net::connection", peer_id = %self.peer_id, %block_hash, "serving witness");
let provider = self.provider.clone();
self.pending_witnesses.push(Box::pin(async move {
let result = provider.witness(block_hash).await;
(req, result)
}));
}
RessMessage::Headers(res) => {
if let Some(RessPeerRequest::GetHeaders { tx, .. }) =
self.inflight_requests.remove(&res.request_id)
{
let _ = tx.send(res.message);
} else {
self.report_bad_message();
}
}
RessMessage::BlockBodies(res) => {
if let Some(RessPeerRequest::GetBlockBodies { tx, .. }) =
self.inflight_requests.remove(&res.request_id)
{
let _ = tx.send(res.message);
} else {
self.report_bad_message();
}
}
RessMessage::Bytecode(res) => {
if let Some(RessPeerRequest::GetBytecode { tx, .. }) =
self.inflight_requests.remove(&res.request_id)
{
let _ = tx.send(res.message);
} else {
self.report_bad_message();
}
}
RessMessage::Witness(res) => {
if let Some(RessPeerRequest::GetWitness { tx, .. }) =
self.inflight_requests.remove(&res.request_id)
{
let _ = tx.send(res.message);
} else {
self.report_bad_message();
}
}
};
OnRessMessageOutcome::None
}
}
impl<P> Drop for RessProtocolConnection<P> {
fn drop(&mut self) {
let _ = self
.active_connections
.fetch_update(Ordering::Relaxed, Ordering::Relaxed, |c| Some(c.saturating_sub(1)));
}
}
impl<P> Stream for RessProtocolConnection<P>
where
P: RessProtocolProvider + Clone + Unpin + 'static,
{
type Item = BytesMut;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = self.get_mut();
if this.terminated {
return Poll::Ready(None)
}
if !this.node_type_sent {
this.node_type_sent = true;
return Poll::Ready(Some(RessProtocolMessage::node_type(this.node_type).encoded()))
}
'conn: loop {
if let Poll::Ready(Some(cmd)) = this.commands.poll_next_unpin(cx) {
let message = this.on_command(cmd);
let encoded = message.encoded();
trace!(target: "ress::net::connection", peer_id = %this.peer_id, ?message, encoded = alloy_primitives::hex::encode(&encoded), "Sending peer command");
return Poll::Ready(Some(encoded));
}
if let Poll::Ready(Some((request, witness_result))) =
this.pending_witnesses.poll_next_unpin(cx)
{
let response = this.on_witness_response(request, witness_result);
return Poll::Ready(Some(response.encoded()));
}
if let Poll::Ready(maybe_msg) = this.conn.poll_next_unpin(cx) {
let Some(next) = maybe_msg else { break 'conn };
let msg = match RessProtocolMessage::decode_message(&mut &next[..]) {
Ok(msg) => {
trace!(target: "ress::net::connection", peer_id = %this.peer_id, message = ?msg.message_type, "Processing message");
msg
}
Err(error) => {
trace!(target: "ress::net::connection", peer_id = %this.peer_id, %error, "Error decoding peer message");
this.report_bad_message();
continue;
}
};
match this.on_ress_message(msg) {
OnRessMessageOutcome::Response(bytes) => return Poll::Ready(Some(bytes)),
OnRessMessageOutcome::Terminate => break 'conn,
OnRessMessageOutcome::None => {}
};
continue;
}
return Poll::Pending;
}
// Terminating the connection.
this.terminated = true;
Poll::Ready(None)
}
}
type WitnessFut =
Pin<Box<dyn Future<Output = (RequestPair<B256>, ProviderResult<Vec<Bytes>>)> + Send>>;
/// Ress peer request.
#[derive(Debug)]
pub enum RessPeerRequest {
/// Get block headers.
GetHeaders {
/// The request for block headers.
request: GetHeaders,
/// The sender for the response.
tx: oneshot::Sender<Vec<Header>>,
},
/// Get block bodies.
GetBlockBodies {
/// The request for block bodies.
request: Vec<BlockHash>,
/// The sender for the response.
tx: oneshot::Sender<Vec<BlockBody>>,
},
/// Get bytecode for specific code hash
GetBytecode {
/// Target code hash that we want to get bytecode for.
code_hash: B256,
/// The sender for the response.
tx: oneshot::Sender<Bytes>,
},
/// Get witness for specific block.
GetWitness {
/// Target block hash that we want to get witness for.
block_hash: BlockHash,
/// The sender for the response.
tx: oneshot::Sender<Vec<Bytes>>,
},
}
enum OnRessMessageOutcome {
/// Response to send to the peer.
Response(BytesMut),
/// Terminate the connection.
Terminate,
/// No action.
None,
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/ress/protocol/src/types.rs | crates/ress/protocol/src/types.rs | use alloy_primitives::bytes::{Buf, BufMut};
use alloy_rlp::{Decodable, Encodable};
/// Node type variant.
#[repr(u8)]
#[derive(PartialEq, Eq, Copy, Clone, Debug)]
#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))]
pub enum NodeType {
/// Stateless ress node.
Stateless = 0x00,
/// Stateful reth node.
Stateful,
}
impl Encodable for NodeType {
fn encode(&self, out: &mut dyn BufMut) {
out.put_u8(*self as u8);
}
fn length(&self) -> usize {
1
}
}
impl Decodable for NodeType {
fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
let id = match buf.first().ok_or(alloy_rlp::Error::InputTooShort)? {
0x00 => Self::Stateless,
0x01 => Self::Stateful,
_ => return Err(alloy_rlp::Error::Custom("Invalid message type")),
};
buf.advance(1);
Ok(id)
}
}
impl NodeType {
/// Return `true` if node type is stateful.
pub const fn is_stateful(&self) -> bool {
matches!(self, Self::Stateful)
}
/// Return `true` if the connection between this and other node types
/// can be considered valid.
///
/// Validity:
/// | stateless | stateful |
/// ----------|-----------|----------|
/// stateless | + | + |
/// stateful | + | - |
pub const fn is_valid_connection(&self, other: &Self) -> bool {
!self.is_stateful() || !other.is_stateful()
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/ress/protocol/src/message.rs | crates/ress/protocol/src/message.rs | //! Implements Ress protocol
//! Defines structs/enums for messages, request-response pairs.
//!
//! Examples include creating, encoding, and decoding protocol messages.
use crate::NodeType;
use alloy_consensus::Header;
use alloy_primitives::{
bytes::{Buf, BufMut},
BlockHash, Bytes, B256,
};
use alloy_rlp::{BytesMut, Decodable, Encodable, RlpDecodable, RlpEncodable};
use reth_eth_wire::{message::RequestPair, protocol::Protocol, Capability};
use reth_ethereum_primitives::BlockBody;
/// An Ress protocol message, containing a message ID and payload.
#[derive(PartialEq, Eq, Clone, Debug)]
pub struct RessProtocolMessage {
/// The unique identifier representing the type of the Ress message.
pub message_type: RessMessageID,
/// The content of the message, including specific data based on the message type.
pub message: RessMessage,
}
#[cfg(any(test, feature = "arbitrary"))]
impl<'a> arbitrary::Arbitrary<'a> for RessProtocolMessage {
fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result<Self> {
let message: RessMessage = u.arbitrary()?;
Ok(Self { message_type: message.message_id(), message })
}
}
impl RessProtocolMessage {
/// Returns the capability for the `ress` protocol.
pub const fn capability() -> Capability {
Capability::new_static("ress", 1)
}
/// Returns the protocol for the `ress` protocol.
pub const fn protocol() -> Protocol {
Protocol::new(Self::capability(), 9)
}
/// Create node type message.
pub const fn node_type(node_type: NodeType) -> Self {
RessMessage::NodeType(node_type).into_protocol_message()
}
/// Headers request.
pub const fn get_headers(request_id: u64, request: GetHeaders) -> Self {
RessMessage::GetHeaders(RequestPair { request_id, message: request })
.into_protocol_message()
}
/// Headers response.
pub const fn headers(request_id: u64, headers: Vec<Header>) -> Self {
RessMessage::Headers(RequestPair { request_id, message: headers }).into_protocol_message()
}
/// Block bodies request.
pub const fn get_block_bodies(request_id: u64, block_hashes: Vec<B256>) -> Self {
RessMessage::GetBlockBodies(RequestPair { request_id, message: block_hashes })
.into_protocol_message()
}
/// Block bodies response.
pub const fn block_bodies(request_id: u64, bodies: Vec<BlockBody>) -> Self {
RessMessage::BlockBodies(RequestPair { request_id, message: bodies })
.into_protocol_message()
}
/// Bytecode request.
pub const fn get_bytecode(request_id: u64, code_hash: B256) -> Self {
RessMessage::GetBytecode(RequestPair { request_id, message: code_hash })
.into_protocol_message()
}
/// Bytecode response.
pub const fn bytecode(request_id: u64, bytecode: Bytes) -> Self {
RessMessage::Bytecode(RequestPair { request_id, message: bytecode }).into_protocol_message()
}
/// Execution witness request.
pub const fn get_witness(request_id: u64, block_hash: BlockHash) -> Self {
RessMessage::GetWitness(RequestPair { request_id, message: block_hash })
.into_protocol_message()
}
/// Execution witness response.
pub const fn witness(request_id: u64, witness: Vec<Bytes>) -> Self {
RessMessage::Witness(RequestPair { request_id, message: witness }).into_protocol_message()
}
/// Return RLP encoded message.
pub fn encoded(&self) -> BytesMut {
let mut buf = BytesMut::with_capacity(self.length());
self.encode(&mut buf);
buf
}
/// Decodes a `RessProtocolMessage` from the given message buffer.
pub fn decode_message(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
let message_type = RessMessageID::decode(buf)?;
let message = match message_type {
RessMessageID::NodeType => RessMessage::NodeType(NodeType::decode(buf)?),
RessMessageID::GetHeaders => RessMessage::GetHeaders(RequestPair::decode(buf)?),
RessMessageID::Headers => RessMessage::Headers(RequestPair::decode(buf)?),
RessMessageID::GetBlockBodies => RessMessage::GetBlockBodies(RequestPair::decode(buf)?),
RessMessageID::BlockBodies => RessMessage::BlockBodies(RequestPair::decode(buf)?),
RessMessageID::GetBytecode => RessMessage::GetBytecode(RequestPair::decode(buf)?),
RessMessageID::Bytecode => RessMessage::Bytecode(RequestPair::decode(buf)?),
RessMessageID::GetWitness => RessMessage::GetWitness(RequestPair::decode(buf)?),
RessMessageID::Witness => RessMessage::Witness(RequestPair::decode(buf)?),
};
Ok(Self { message_type, message })
}
}
impl Encodable for RessProtocolMessage {
fn encode(&self, out: &mut dyn BufMut) {
self.message_type.encode(out);
self.message.encode(out);
}
fn length(&self) -> usize {
self.message_type.length() + self.message.length()
}
}
/// Represents message IDs for `ress` protocol messages.
#[repr(u8)]
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))]
#[cfg_attr(test, derive(strum_macros::EnumCount))]
pub enum RessMessageID {
/// Node type message.
NodeType = 0x00,
/// Headers request message.
GetHeaders = 0x01,
/// Headers response message.
Headers = 0x02,
/// Block bodies request message.
GetBlockBodies = 0x03,
/// Block bodies response message.
BlockBodies = 0x04,
/// Bytecode request message.
GetBytecode = 0x05,
/// Bytecode response message.
Bytecode = 0x06,
/// Witness request message.
GetWitness = 0x07,
/// Witness response message.
Witness = 0x08,
}
impl Encodable for RessMessageID {
fn encode(&self, out: &mut dyn BufMut) {
out.put_u8(*self as u8);
}
fn length(&self) -> usize {
1
}
}
impl Decodable for RessMessageID {
fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
let id = match buf.first().ok_or(alloy_rlp::Error::InputTooShort)? {
0x00 => Self::NodeType,
0x01 => Self::GetHeaders,
0x02 => Self::Headers,
0x03 => Self::GetBlockBodies,
0x04 => Self::BlockBodies,
0x05 => Self::GetBytecode,
0x06 => Self::Bytecode,
0x07 => Self::GetWitness,
0x08 => Self::Witness,
_ => return Err(alloy_rlp::Error::Custom("Invalid message type")),
};
buf.advance(1);
Ok(id)
}
}
/// Represents a message in the ress protocol.
#[derive(PartialEq, Eq, Clone, Debug)]
#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))]
pub enum RessMessage {
/// Represents a node type message required for handshake.
NodeType(NodeType),
/// Represents a headers request message.
GetHeaders(RequestPair<GetHeaders>),
/// Represents a headers response message.
Headers(RequestPair<Vec<Header>>),
/// Represents a block bodies request message.
GetBlockBodies(RequestPair<Vec<B256>>),
/// Represents a block bodies response message.
BlockBodies(RequestPair<Vec<BlockBody>>),
/// Represents a bytecode request message.
GetBytecode(RequestPair<B256>),
/// Represents a bytecode response message.
Bytecode(RequestPair<Bytes>),
/// Represents a witness request message.
GetWitness(RequestPair<BlockHash>),
/// Represents a witness response message.
Witness(RequestPair<Vec<Bytes>>),
}
impl RessMessage {
/// Return [`RessMessageID`] that corresponds to the given message.
pub const fn message_id(&self) -> RessMessageID {
match self {
Self::NodeType(_) => RessMessageID::NodeType,
Self::GetHeaders(_) => RessMessageID::GetHeaders,
Self::Headers(_) => RessMessageID::Headers,
Self::GetBlockBodies(_) => RessMessageID::GetBlockBodies,
Self::BlockBodies(_) => RessMessageID::BlockBodies,
Self::GetBytecode(_) => RessMessageID::GetBytecode,
Self::Bytecode(_) => RessMessageID::Bytecode,
Self::GetWitness(_) => RessMessageID::GetWitness,
Self::Witness(_) => RessMessageID::Witness,
}
}
/// Convert message into [`RessProtocolMessage`].
pub const fn into_protocol_message(self) -> RessProtocolMessage {
let message_type = self.message_id();
RessProtocolMessage { message_type, message: self }
}
}
impl From<RessMessage> for RessProtocolMessage {
fn from(value: RessMessage) -> Self {
value.into_protocol_message()
}
}
impl Encodable for RessMessage {
fn encode(&self, out: &mut dyn BufMut) {
match self {
Self::NodeType(node_type) => node_type.encode(out),
Self::GetHeaders(request) => request.encode(out),
Self::Headers(header) => header.encode(out),
Self::GetBlockBodies(request) => request.encode(out),
Self::BlockBodies(body) => body.encode(out),
Self::GetBytecode(request) | Self::GetWitness(request) => request.encode(out),
Self::Bytecode(bytecode) => bytecode.encode(out),
Self::Witness(witness) => witness.encode(out),
}
}
fn length(&self) -> usize {
match self {
Self::NodeType(node_type) => node_type.length(),
Self::GetHeaders(request) => request.length(),
Self::Headers(header) => header.length(),
Self::GetBlockBodies(request) => request.length(),
Self::BlockBodies(body) => body.length(),
Self::GetBytecode(request) | Self::GetWitness(request) => request.length(),
Self::Bytecode(bytecode) => bytecode.length(),
Self::Witness(witness) => witness.length(),
}
}
}
/// A request for a peer to return block headers starting at the requested block.
/// The peer must return at most [`limit`](#structfield.limit) headers.
/// The headers will be returned starting at [`start_hash`](#structfield.start_hash), traversing
/// towards the genesis block.
#[derive(PartialEq, Eq, Clone, Copy, Debug, RlpEncodable, RlpDecodable)]
#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))]
pub struct GetHeaders {
/// The block hash that the peer should start returning headers from.
pub start_hash: BlockHash,
/// The maximum number of headers to return.
pub limit: u64,
}
#[cfg(test)]
mod tests {
use super::*;
use proptest::prelude::*;
use proptest_arbitrary_interop::arb;
use std::fmt;
use strum::EnumCount;
fn rlp_roundtrip<V>(value: V)
where
V: Encodable + Decodable + PartialEq + fmt::Debug,
{
let encoded = alloy_rlp::encode(&value);
let decoded = V::decode(&mut &encoded[..]);
assert_eq!(Ok(value), decoded);
}
#[test]
fn protocol_message_count() {
let protocol = RessProtocolMessage::protocol();
assert_eq!(protocol.messages(), RessMessageID::COUNT as u8);
}
proptest! {
#[test]
fn message_type_roundtrip(message_type in arb::<RessMessageID>()) {
rlp_roundtrip(message_type);
}
#[test]
fn message_roundtrip(message in arb::<RessProtocolMessage>()) {
let encoded = alloy_rlp::encode(&message);
let decoded = RessProtocolMessage::decode_message(&mut &encoded[..]);
assert_eq!(Ok(message), decoded);
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/ress/protocol/src/provider.rs | crates/ress/protocol/src/provider.rs | use crate::GetHeaders;
use alloy_consensus::Header;
use alloy_primitives::{Bytes, B256};
use alloy_rlp::Encodable;
use reth_ethereum_primitives::BlockBody;
use reth_network::eth_requests::{MAX_BODIES_SERVE, MAX_HEADERS_SERVE, SOFT_RESPONSE_LIMIT};
use reth_storage_errors::provider::ProviderResult;
use std::future::Future;
/// A provider trait for ress protocol.
pub trait RessProtocolProvider: Send + Sync {
/// Return block header by hash.
fn header(&self, block_hash: B256) -> ProviderResult<Option<Header>>;
/// Return block headers.
fn headers(&self, request: GetHeaders) -> ProviderResult<Vec<Header>> {
if request.limit == 0 {
return Ok(Vec::new());
}
let mut total_bytes = 0;
let mut block_hash = request.start_hash;
let mut headers = Vec::new();
while let Some(header) = self.header(block_hash)? {
block_hash = header.parent_hash;
total_bytes += header.length();
headers.push(header);
if headers.len() >= request.limit as usize ||
headers.len() >= MAX_HEADERS_SERVE ||
total_bytes > SOFT_RESPONSE_LIMIT
{
break
}
}
Ok(headers)
}
/// Return block body by hash.
fn block_body(&self, block_hash: B256) -> ProviderResult<Option<BlockBody>>;
/// Return block bodies.
fn block_bodies(&self, block_hashes: Vec<B256>) -> ProviderResult<Vec<BlockBody>> {
let mut total_bytes = 0;
let mut bodies = Vec::new();
for block_hash in block_hashes {
if let Some(body) = self.block_body(block_hash)? {
total_bytes += body.length();
bodies.push(body);
if bodies.len() >= MAX_BODIES_SERVE || total_bytes > SOFT_RESPONSE_LIMIT {
break
}
} else {
break
}
}
Ok(bodies)
}
/// Return bytecode by code hash.
fn bytecode(&self, code_hash: B256) -> ProviderResult<Option<Bytes>>;
/// Return witness by block hash.
fn witness(&self, block_hash: B256) -> impl Future<Output = ProviderResult<Vec<Bytes>>> + Send;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/ress/protocol/tests/it/main.rs | crates/ress/protocol/tests/it/main.rs | #![allow(missing_docs)]
mod e2e;
const fn main() {}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/ress/protocol/tests/it/e2e.rs | crates/ress/protocol/tests/it/e2e.rs | use alloy_primitives::{Bytes, B256};
use futures::StreamExt;
use reth_network::{test_utils::Testnet, NetworkEventListenerProvider, Peers};
use reth_network_api::{
events::{NetworkEvent, PeerEvent},
test_utils::PeersHandleProvider,
};
use reth_provider::test_utils::MockEthProvider;
use reth_ress_protocol::{
test_utils::{MockRessProtocolProvider, NoopRessProtocolProvider},
GetHeaders, NodeType, ProtocolEvent, ProtocolState, RessPeerRequest, RessProtocolHandler,
};
use std::time::{Duration, Instant};
use tokio::sync::{mpsc, oneshot};
#[tokio::test(flavor = "multi_thread")]
#[ignore]
async fn disconnect_on_stateful_pair() {
reth_tracing::init_test_tracing();
let mut net = Testnet::create_with(2, MockEthProvider::default()).await;
let protocol_provider = NoopRessProtocolProvider;
let (tx, mut from_peer0) = mpsc::unbounded_channel();
let peer0 = &mut net.peers_mut()[0];
peer0.add_rlpx_sub_protocol(RessProtocolHandler {
provider: protocol_provider,
node_type: NodeType::Stateful,
peers_handle: peer0.handle().peers_handle().clone(),
max_active_connections: 100,
state: ProtocolState::new(tx),
});
let (tx, mut from_peer1) = mpsc::unbounded_channel();
let peer1 = &mut net.peers_mut()[1];
peer1.add_rlpx_sub_protocol(RessProtocolHandler {
provider: protocol_provider,
node_type: NodeType::Stateful,
peers_handle: peer1.handle().peers_handle().clone(),
max_active_connections: 100,
state: ProtocolState::new(tx),
});
// spawn and connect all the peers
let handle = net.spawn();
handle.connect_peers().await;
match from_peer0.recv().await.unwrap() {
ProtocolEvent::Established { peer_id, .. } => {
assert_eq!(peer_id, *handle.peers()[1].peer_id());
}
ev => {
panic!("unexpected event: {ev:?}");
}
};
match from_peer1.recv().await.unwrap() {
ProtocolEvent::Established { peer_id, .. } => {
assert_eq!(peer_id, *handle.peers()[0].peer_id());
}
ev => {
panic!("unexpected event: {ev:?}");
}
};
let mut peer0_event_listener = handle.peers()[0].network().event_listener();
loop {
if let NetworkEvent::Peer(PeerEvent::SessionClosed { peer_id, .. }) =
peer0_event_listener.next().await.unwrap()
{
assert_eq!(peer_id, *handle.peers()[1].peer_id());
break
}
}
let mut peer1_event_listener = handle.peers()[1].network().event_listener();
loop {
if let NetworkEvent::Peer(PeerEvent::SessionClosed { peer_id, .. }) =
peer1_event_listener.next().await.unwrap()
{
assert_eq!(peer_id, *handle.peers()[0].peer_id());
break
}
}
}
#[tokio::test(flavor = "multi_thread")]
async fn message_exchange() {
reth_tracing::init_test_tracing();
let mut net = Testnet::create_with(2, MockEthProvider::default()).await;
let protocol_provider = NoopRessProtocolProvider;
let (tx, mut from_peer0) = mpsc::unbounded_channel();
let peer0 = &mut net.peers_mut()[0];
peer0.add_rlpx_sub_protocol(RessProtocolHandler {
provider: protocol_provider,
node_type: NodeType::Stateless,
peers_handle: peer0.handle().peers_handle().clone(),
max_active_connections: 100,
state: ProtocolState::new(tx),
});
let (tx, mut from_peer1) = mpsc::unbounded_channel();
let peer1 = &mut net.peers_mut()[1];
peer1.add_rlpx_sub_protocol(RessProtocolHandler {
provider: protocol_provider,
node_type: NodeType::Stateless,
peers_handle: peer1.handle().peers_handle().clone(),
max_active_connections: 100,
state: ProtocolState::new(tx),
});
// spawn and connect all the peers
let handle = net.spawn();
handle.connect_peers().await;
let peer0_to_peer1 = from_peer0.recv().await.unwrap();
let peer0_conn = match peer0_to_peer1 {
ProtocolEvent::Established { direction: _, peer_id, to_connection } => {
assert_eq!(peer_id, *handle.peers()[1].peer_id());
to_connection
}
ev => {
panic!("unexpected event: {ev:?}");
}
};
let peer1_to_peer0 = from_peer1.recv().await.unwrap();
match peer1_to_peer0 {
ProtocolEvent::Established { peer_id, .. } => {
assert_eq!(peer_id, *handle.peers()[0].peer_id());
}
ev => {
panic!("unexpected event: {ev:?}");
}
};
// send get headers message from peer0 to peer1
let (tx, rx) = oneshot::channel();
peer0_conn
.send(RessPeerRequest::GetHeaders {
request: GetHeaders { start_hash: B256::ZERO, limit: 1 },
tx,
})
.unwrap();
assert_eq!(rx.await.unwrap(), Vec::new());
// send get bodies message from peer0 to peer1
let (tx, rx) = oneshot::channel();
peer0_conn.send(RessPeerRequest::GetBlockBodies { request: Vec::new(), tx }).unwrap();
assert_eq!(rx.await.unwrap(), Vec::new());
// send get witness message from peer0 to peer1
let (tx, rx) = oneshot::channel();
peer0_conn.send(RessPeerRequest::GetWitness { block_hash: B256::ZERO, tx }).unwrap();
assert_eq!(rx.await.unwrap(), Vec::<Bytes>::new());
// send get bytecode message from peer0 to peer1
let (tx, rx) = oneshot::channel();
peer0_conn.send(RessPeerRequest::GetBytecode { code_hash: B256::ZERO, tx }).unwrap();
assert_eq!(rx.await.unwrap(), Bytes::default());
}
#[tokio::test(flavor = "multi_thread")]
async fn witness_fetching_does_not_block() {
reth_tracing::init_test_tracing();
let mut net = Testnet::create_with(2, MockEthProvider::default()).await;
let witness_delay = Duration::from_millis(100);
let protocol_provider = MockRessProtocolProvider::default().with_witness_delay(witness_delay);
let (tx, mut from_peer0) = mpsc::unbounded_channel();
let peer0 = &mut net.peers_mut()[0];
peer0.add_rlpx_sub_protocol(RessProtocolHandler {
provider: protocol_provider.clone(),
node_type: NodeType::Stateless,
peers_handle: peer0.handle().peers_handle().clone(),
max_active_connections: 100,
state: ProtocolState::new(tx),
});
let (tx, mut from_peer1) = mpsc::unbounded_channel();
let peer1 = &mut net.peers_mut()[1];
peer1.add_rlpx_sub_protocol(RessProtocolHandler {
provider: protocol_provider,
node_type: NodeType::Stateless,
peers_handle: peer1.handle().peers_handle().clone(),
max_active_connections: 100,
state: ProtocolState::new(tx),
});
// spawn and connect all the peers
let handle = net.spawn();
handle.connect_peers().await;
let peer0_to_peer1 = from_peer0.recv().await.unwrap();
let peer0_conn = match peer0_to_peer1 {
ProtocolEvent::Established { direction: _, peer_id, to_connection } => {
assert_eq!(peer_id, *handle.peers()[1].peer_id());
to_connection
}
ev => {
panic!("unexpected event: {ev:?}");
}
};
let peer1_to_peer0 = from_peer1.recv().await.unwrap();
match peer1_to_peer0 {
ProtocolEvent::Established { peer_id, .. } => {
assert_eq!(peer_id, *handle.peers()[0].peer_id());
}
ev => {
panic!("unexpected event: {ev:?}");
}
};
// send get witness message from peer0 to peer1
let witness_requested_at = Instant::now();
let (witness_tx, witness_rx) = oneshot::channel();
peer0_conn
.send(RessPeerRequest::GetWitness { block_hash: B256::ZERO, tx: witness_tx })
.unwrap();
// send get bytecode message from peer0 to peer1
let bytecode_requested_at = Instant::now();
let (tx, rx) = oneshot::channel();
peer0_conn.send(RessPeerRequest::GetBytecode { code_hash: B256::ZERO, tx }).unwrap();
assert_eq!(rx.await.unwrap(), Bytes::default());
assert!(bytecode_requested_at.elapsed() < witness_delay);
// await for witness response
assert_eq!(witness_rx.await.unwrap(), Vec::<Bytes>::new());
assert!(witness_requested_at.elapsed() >= witness_delay);
}
#[tokio::test(flavor = "multi_thread")]
async fn max_active_connections() {
reth_tracing::init_test_tracing();
let mut net = Testnet::create_with(3, MockEthProvider::default()).await;
let protocol_provider = NoopRessProtocolProvider;
let (tx, mut from_peer0) = mpsc::unbounded_channel();
let peer0 = &mut net.peers_mut()[0];
peer0.add_rlpx_sub_protocol(RessProtocolHandler {
provider: protocol_provider,
node_type: NodeType::Stateful,
peers_handle: peer0.handle().peers_handle().clone(),
max_active_connections: 1,
state: ProtocolState::new(tx),
});
let (tx, _from_peer1) = mpsc::unbounded_channel();
let peer1 = &mut net.peers_mut()[1];
let peer1_id = peer1.peer_id();
let peer1_addr = peer1.local_addr();
peer1.add_rlpx_sub_protocol(RessProtocolHandler {
provider: protocol_provider,
node_type: NodeType::Stateless,
peers_handle: peer1.handle().peers_handle().clone(),
max_active_connections: 100,
state: ProtocolState::new(tx),
});
let (tx, _from_peer2) = mpsc::unbounded_channel();
let peer2 = &mut net.peers_mut()[2];
let peer2_id = peer2.peer_id();
let peer2_addr = peer2.local_addr();
peer2.add_rlpx_sub_protocol(RessProtocolHandler {
provider: protocol_provider,
node_type: NodeType::Stateless,
peers_handle: peer2.handle().peers_handle().clone(),
max_active_connections: 100,
state: ProtocolState::new(tx),
});
let handle = net.spawn();
// connect peers 0 and 1
let peer0_handle = &handle.peers()[0];
peer0_handle.network().add_peer(peer1_id, peer1_addr);
let _peer0_to_peer1 = match from_peer0.recv().await.unwrap() {
ProtocolEvent::Established { peer_id, to_connection, .. } => {
assert_eq!(peer_id, *peer1_id);
to_connection
}
ev => {
panic!("unexpected event: {ev:?}");
}
};
// connect peers 0 and 2, max active connections exceeded.
peer0_handle.network().add_peer(peer2_id, peer2_addr);
match from_peer0.recv().await.unwrap() {
ProtocolEvent::MaxActiveConnectionsExceeded { num_active } => {
assert_eq!(num_active, 1);
}
ev => {
panic!("unexpected event: {ev:?}");
}
};
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/stages/stages/src/prelude.rs | crates/stages/stages/src/prelude.rs | pub use crate::sets::{
DefaultStages, ExecutionStages, HashingStages, HistoryIndexingStages, OfflineStages,
OnlineStages,
};
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/stages/stages/src/lib.rs | crates/stages/stages/src/lib.rs | //! Staged syncing primitives for reth.
//!
//! This crate contains the syncing primitives [`Pipeline`] and [`Stage`], as well as all stages
//! that reth uses to sync.
//!
//! A pipeline can be configured using [`Pipeline::builder()`].
//!
//! For ease of use, this crate also exposes a set of [`StageSet`]s, which are collections of stages
//! that perform specific functions during sync. Stage sets can be customized; it is possible to
//! add, disable and replace stages in the set.
//!
//! # Examples
//!
//! ```
//! # use std::sync::Arc;
//! # use reth_downloaders::bodies::bodies::BodiesDownloaderBuilder;
//! # use reth_downloaders::headers::reverse_headers::ReverseHeadersDownloaderBuilder;
//! # use reth_network_p2p::test_utils::{TestBodiesClient, TestHeadersClient};
//! # use alloy_primitives::B256;
//! # use reth_chainspec::MAINNET;
//! # use reth_prune_types::PruneModes;
//! # use reth_network_peers::PeerId;
//! # use reth_stages::Pipeline;
//! # use reth_stages::sets::DefaultStages;
//! # use tokio::sync::watch;
//! # use reth_evm_ethereum::EthEvmConfig;
//! # use reth_provider::ProviderFactory;
//! # use reth_provider::StaticFileProviderFactory;
//! # use reth_provider::test_utils::{create_test_provider_factory, MockNodeTypesWithDB};
//! # use reth_static_file::StaticFileProducer;
//! # use reth_config::config::StageConfig;
//! # use reth_consensus::{Consensus, ConsensusError};
//! # use reth_consensus::test_utils::TestConsensus;
//! # use reth_consensus::FullConsensus;
//! #
//! # let chain_spec = MAINNET.clone();
//! # let consensus: Arc<dyn FullConsensus<reth_ethereum_primitives::EthPrimitives, Error = ConsensusError>> = Arc::new(TestConsensus::default());
//! # let headers_downloader = ReverseHeadersDownloaderBuilder::default().build(
//! # Arc::new(TestHeadersClient::default()),
//! # consensus.clone()
//! # );
//! # let provider_factory = create_test_provider_factory();
//! # let bodies_downloader = BodiesDownloaderBuilder::default().build(
//! # Arc::new(TestBodiesClient { responder: |_| Ok((PeerId::ZERO, vec![]).into()) }),
//! # consensus.clone(),
//! # provider_factory.clone()
//! # );
//! # let (tip_tx, tip_rx) = watch::channel(B256::default());
//! # let executor_provider = EthEvmConfig::mainnet();
//! # let static_file_producer = StaticFileProducer::new(
//! # provider_factory.clone(),
//! # PruneModes::default()
//! # );
//! # let era_import_source = None;
//! // Create a pipeline that can fully sync
//! # let pipeline =
//! Pipeline::<MockNodeTypesWithDB>::builder()
//! .with_tip_sender(tip_tx)
//! .add_stages(DefaultStages::new(
//! provider_factory.clone(),
//! tip_rx,
//! consensus,
//! headers_downloader,
//! bodies_downloader,
//! executor_provider,
//! StageConfig::default(),
//! PruneModes::default(),
//! era_import_source,
//! ))
//! .build(provider_factory, static_file_producer);
//! ```
//!
//! ## Feature Flags
//!
//! - `test-utils`: Export utilities for testing
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#[expect(missing_docs)]
#[cfg(any(test, feature = "test-utils"))]
pub mod test_utils;
/// A re-export of common structs and traits.
pub mod prelude;
/// Implementations of stages.
pub mod stages;
pub mod sets;
// re-export the stages API
pub use reth_stages_api::*;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/stages/stages/src/sets.rs | crates/stages/stages/src/sets.rs | //! Built-in [`StageSet`]s.
//!
//! The easiest set to use is [`DefaultStages`], which provides all stages required to run an
//! instance of reth.
//!
//! It is also possible to run parts of reth standalone given the required data is present in
//! the environment, such as [`ExecutionStages`] or [`HashingStages`].
//!
//!
//! # Examples
//!
//! ```no_run
//! # use reth_stages::Pipeline;
//! # use reth_stages::sets::{OfflineStages};
//! # use reth_chainspec::MAINNET;
//! # use reth_prune_types::PruneModes;
//! # use reth_evm_ethereum::EthEvmConfig;
//! # use reth_evm::ConfigureEvm;
//! # use reth_provider::StaticFileProviderFactory;
//! # use reth_provider::test_utils::{create_test_provider_factory, MockNodeTypesWithDB};
//! # use reth_static_file::StaticFileProducer;
//! # use reth_config::config::StageConfig;
//! # use reth_ethereum_primitives::EthPrimitives;
//! # use std::sync::Arc;
//! # use reth_consensus::{FullConsensus, ConsensusError};
//!
//! # fn create(exec: impl ConfigureEvm<Primitives = EthPrimitives> + 'static, consensus: impl FullConsensus<EthPrimitives, Error = ConsensusError> + 'static) {
//!
//! let provider_factory = create_test_provider_factory();
//! let static_file_producer =
//! StaticFileProducer::new(provider_factory.clone(), PruneModes::default());
//! // Build a pipeline with all offline stages.
//! let pipeline = Pipeline::<MockNodeTypesWithDB>::builder()
//! .add_stages(OfflineStages::new(exec, Arc::new(consensus), StageConfig::default(), PruneModes::default()))
//! .build(provider_factory, static_file_producer);
//!
//! # }
//! ```
use crate::{
stages::{
AccountHashingStage, BodyStage, EraImportSource, EraStage, ExecutionStage, FinishStage,
HeaderStage, IndexAccountHistoryStage, IndexStorageHistoryStage, MerkleStage,
PruneSenderRecoveryStage, PruneStage, SenderRecoveryStage, StorageHashingStage,
TransactionLookupStage,
},
StageSet, StageSetBuilder,
};
use alloy_primitives::B256;
use reth_config::config::StageConfig;
use reth_consensus::{ConsensusError, FullConsensus};
use reth_evm::ConfigureEvm;
use reth_network_p2p::{bodies::downloader::BodyDownloader, headers::downloader::HeaderDownloader};
use reth_primitives_traits::{Block, NodePrimitives};
use reth_provider::HeaderSyncGapProvider;
use reth_prune_types::PruneModes;
use reth_stages_api::Stage;
use std::{ops::Not, sync::Arc};
use tokio::sync::watch;
/// A set containing all stages to run a fully syncing instance of reth.
///
/// A combination of (in order)
///
/// - [`OnlineStages`]
/// - [`OfflineStages`]
/// - [`FinishStage`]
///
/// This expands to the following series of stages:
/// - [`HeaderStage`]
/// - [`BodyStage`]
/// - [`SenderRecoveryStage`]
/// - [`ExecutionStage`]
/// - [`PruneSenderRecoveryStage`] (execute)
/// - [`MerkleStage`] (unwind)
/// - [`AccountHashingStage`]
/// - [`StorageHashingStage`]
/// - [`MerkleStage`] (execute)
/// - [`TransactionLookupStage`]
/// - [`IndexStorageHistoryStage`]
/// - [`IndexAccountHistoryStage`]
/// - [`PruneStage`] (execute)
/// - [`FinishStage`]
#[derive(Debug)]
pub struct DefaultStages<Provider, H, B, E>
where
H: HeaderDownloader,
B: BodyDownloader,
E: ConfigureEvm,
{
/// Configuration for the online stages
online: OnlineStages<Provider, H, B>,
/// Executor factory needs for execution stage
evm_config: E,
/// Consensus instance
consensus: Arc<dyn FullConsensus<E::Primitives, Error = ConsensusError>>,
/// Configuration for each stage in the pipeline
stages_config: StageConfig,
/// Prune configuration for every segment that can be pruned
prune_modes: PruneModes,
}
impl<Provider, H, B, E> DefaultStages<Provider, H, B, E>
where
H: HeaderDownloader,
B: BodyDownloader,
E: ConfigureEvm<Primitives: NodePrimitives<BlockHeader = H::Header, Block = B::Block>>,
{
/// Create a new set of default stages with default values.
#[expect(clippy::too_many_arguments)]
pub fn new(
provider: Provider,
tip: watch::Receiver<B256>,
consensus: Arc<dyn FullConsensus<E::Primitives, Error = ConsensusError>>,
header_downloader: H,
body_downloader: B,
evm_config: E,
stages_config: StageConfig,
prune_modes: PruneModes,
era_import_source: Option<EraImportSource>,
) -> Self {
Self {
online: OnlineStages::new(
provider,
tip,
header_downloader,
body_downloader,
stages_config.clone(),
era_import_source,
),
evm_config,
consensus,
stages_config,
prune_modes,
}
}
}
impl<P, H, B, E> DefaultStages<P, H, B, E>
where
E: ConfigureEvm,
H: HeaderDownloader,
B: BodyDownloader,
{
/// Appends the default offline stages and default finish stage to the given builder.
pub fn add_offline_stages<Provider>(
default_offline: StageSetBuilder<Provider>,
evm_config: E,
consensus: Arc<dyn FullConsensus<E::Primitives, Error = ConsensusError>>,
stages_config: StageConfig,
prune_modes: PruneModes,
) -> StageSetBuilder<Provider>
where
OfflineStages<E>: StageSet<Provider>,
{
StageSetBuilder::default()
.add_set(default_offline)
.add_set(OfflineStages::new(evm_config, consensus, stages_config, prune_modes))
.add_stage(FinishStage)
}
}
impl<P, H, B, E, Provider> StageSet<Provider> for DefaultStages<P, H, B, E>
where
P: HeaderSyncGapProvider + 'static,
H: HeaderDownloader + 'static,
B: BodyDownloader + 'static,
E: ConfigureEvm,
OnlineStages<P, H, B>: StageSet<Provider>,
OfflineStages<E>: StageSet<Provider>,
{
fn builder(self) -> StageSetBuilder<Provider> {
Self::add_offline_stages(
self.online.builder(),
self.evm_config,
self.consensus,
self.stages_config.clone(),
self.prune_modes,
)
}
}
/// A set containing all stages that require network access by default.
///
/// These stages *can* be run without network access if the specified downloaders are
/// themselves offline.
#[derive(Debug)]
pub struct OnlineStages<Provider, H, B>
where
H: HeaderDownloader,
B: BodyDownloader,
{
/// Sync gap provider for the headers stage.
provider: Provider,
/// The tip for the headers stage.
tip: watch::Receiver<B256>,
/// The block header downloader
header_downloader: H,
/// The block body downloader
body_downloader: B,
/// Configuration for each stage in the pipeline
stages_config: StageConfig,
/// Optional source of ERA1 files. The `EraStage` does nothing unless this is specified.
era_import_source: Option<EraImportSource>,
}
impl<Provider, H, B> OnlineStages<Provider, H, B>
where
H: HeaderDownloader,
B: BodyDownloader,
{
/// Create a new set of online stages with default values.
pub const fn new(
provider: Provider,
tip: watch::Receiver<B256>,
header_downloader: H,
body_downloader: B,
stages_config: StageConfig,
era_import_source: Option<EraImportSource>,
) -> Self {
Self { provider, tip, header_downloader, body_downloader, stages_config, era_import_source }
}
}
impl<P, H, B> OnlineStages<P, H, B>
where
P: HeaderSyncGapProvider + 'static,
H: HeaderDownloader<Header = <B::Block as Block>::Header> + 'static,
B: BodyDownloader + 'static,
{
/// Create a new builder using the given headers stage.
pub fn builder_with_headers<Provider>(
headers: HeaderStage<P, H>,
body_downloader: B,
) -> StageSetBuilder<Provider>
where
HeaderStage<P, H>: Stage<Provider>,
BodyStage<B>: Stage<Provider>,
{
StageSetBuilder::default().add_stage(headers).add_stage(BodyStage::new(body_downloader))
}
/// Create a new builder using the given bodies stage.
pub fn builder_with_bodies<Provider>(
bodies: BodyStage<B>,
provider: P,
tip: watch::Receiver<B256>,
header_downloader: H,
stages_config: StageConfig,
) -> StageSetBuilder<Provider>
where
BodyStage<B>: Stage<Provider>,
HeaderStage<P, H>: Stage<Provider>,
{
StageSetBuilder::default()
.add_stage(HeaderStage::new(provider, header_downloader, tip, stages_config.etl))
.add_stage(bodies)
}
}
impl<Provider, P, H, B> StageSet<Provider> for OnlineStages<P, H, B>
where
P: HeaderSyncGapProvider + 'static,
H: HeaderDownloader<Header = <B::Block as Block>::Header> + 'static,
B: BodyDownloader + 'static,
HeaderStage<P, H>: Stage<Provider>,
BodyStage<B>: Stage<Provider>,
EraStage<<B::Block as Block>::Header, <B::Block as Block>::Body, EraImportSource>:
Stage<Provider>,
{
fn builder(self) -> StageSetBuilder<Provider> {
StageSetBuilder::default()
.add_stage(EraStage::new(self.era_import_source, self.stages_config.etl.clone()))
.add_stage(HeaderStage::new(
self.provider,
self.header_downloader,
self.tip,
self.stages_config.etl.clone(),
))
.add_stage(BodyStage::new(self.body_downloader))
}
}
/// A set containing all stages that do not require network access.
///
/// A combination of (in order)
///
/// - [`ExecutionStages`]
/// - [`PruneSenderRecoveryStage`]
/// - [`HashingStages`]
/// - [`HistoryIndexingStages`]
/// - [`PruneStage`]
#[derive(Debug)]
#[non_exhaustive]
pub struct OfflineStages<E: ConfigureEvm> {
/// Executor factory needs for execution stage
evm_config: E,
/// Consensus instance for validating blocks.
consensus: Arc<dyn FullConsensus<E::Primitives, Error = ConsensusError>>,
/// Configuration for each stage in the pipeline
stages_config: StageConfig,
/// Prune configuration for every segment that can be pruned
prune_modes: PruneModes,
}
impl<E: ConfigureEvm> OfflineStages<E> {
/// Create a new set of offline stages with default values.
pub const fn new(
evm_config: E,
consensus: Arc<dyn FullConsensus<E::Primitives, Error = ConsensusError>>,
stages_config: StageConfig,
prune_modes: PruneModes,
) -> Self {
Self { evm_config, consensus, stages_config, prune_modes }
}
}
impl<E, Provider> StageSet<Provider> for OfflineStages<E>
where
E: ConfigureEvm,
ExecutionStages<E>: StageSet<Provider>,
PruneSenderRecoveryStage: Stage<Provider>,
HashingStages: StageSet<Provider>,
HistoryIndexingStages: StageSet<Provider>,
PruneStage: Stage<Provider>,
{
fn builder(self) -> StageSetBuilder<Provider> {
ExecutionStages::new(self.evm_config, self.consensus, self.stages_config.clone())
.builder()
// If sender recovery prune mode is set, add the prune sender recovery stage.
.add_stage_opt(self.prune_modes.sender_recovery.map(|prune_mode| {
PruneSenderRecoveryStage::new(prune_mode, self.stages_config.prune.commit_threshold)
}))
.add_set(HashingStages { stages_config: self.stages_config.clone() })
.add_set(HistoryIndexingStages {
stages_config: self.stages_config.clone(),
prune_modes: self.prune_modes.clone(),
})
// If any prune modes are set, add the prune stage.
.add_stage_opt(self.prune_modes.is_empty().not().then(|| {
// Prune stage should be added after all hashing stages, because otherwise it will
// delete
PruneStage::new(self.prune_modes.clone(), self.stages_config.prune.commit_threshold)
}))
}
}
/// A set containing all stages that are required to execute pre-existing block data.
#[derive(Debug)]
#[non_exhaustive]
pub struct ExecutionStages<E: ConfigureEvm> {
/// Executor factory that will create executors.
evm_config: E,
/// Consensus instance for validating blocks.
consensus: Arc<dyn FullConsensus<E::Primitives, Error = ConsensusError>>,
/// Configuration for each stage in the pipeline
stages_config: StageConfig,
}
impl<E: ConfigureEvm> ExecutionStages<E> {
/// Create a new set of execution stages with default values.
pub const fn new(
executor_provider: E,
consensus: Arc<dyn FullConsensus<E::Primitives, Error = ConsensusError>>,
stages_config: StageConfig,
) -> Self {
Self { evm_config: executor_provider, consensus, stages_config }
}
}
impl<E, Provider> StageSet<Provider> for ExecutionStages<E>
where
E: ConfigureEvm + 'static,
SenderRecoveryStage: Stage<Provider>,
ExecutionStage<E>: Stage<Provider>,
{
fn builder(self) -> StageSetBuilder<Provider> {
StageSetBuilder::default()
.add_stage(SenderRecoveryStage::new(self.stages_config.sender_recovery))
.add_stage(ExecutionStage::from_config(
self.evm_config,
self.consensus,
self.stages_config.execution,
self.stages_config.execution_external_clean_threshold(),
))
}
}
/// A set containing all stages that hash account state.
#[derive(Debug, Default)]
#[non_exhaustive]
pub struct HashingStages {
/// Configuration for each stage in the pipeline
stages_config: StageConfig,
}
impl<Provider> StageSet<Provider> for HashingStages
where
MerkleStage: Stage<Provider>,
AccountHashingStage: Stage<Provider>,
StorageHashingStage: Stage<Provider>,
{
fn builder(self) -> StageSetBuilder<Provider> {
StageSetBuilder::default()
.add_stage(MerkleStage::default_unwind())
.add_stage(AccountHashingStage::new(
self.stages_config.account_hashing,
self.stages_config.etl.clone(),
))
.add_stage(StorageHashingStage::new(
self.stages_config.storage_hashing,
self.stages_config.etl.clone(),
))
.add_stage(MerkleStage::new_execution(
self.stages_config.merkle.rebuild_threshold,
self.stages_config.merkle.incremental_threshold,
))
}
}
/// A set containing all stages that do additional indexing for historical state.
#[derive(Debug, Default)]
#[non_exhaustive]
pub struct HistoryIndexingStages {
/// Configuration for each stage in the pipeline
stages_config: StageConfig,
/// Prune configuration for every segment that can be pruned
prune_modes: PruneModes,
}
impl<Provider> StageSet<Provider> for HistoryIndexingStages
where
TransactionLookupStage: Stage<Provider>,
IndexStorageHistoryStage: Stage<Provider>,
IndexAccountHistoryStage: Stage<Provider>,
{
fn builder(self) -> StageSetBuilder<Provider> {
StageSetBuilder::default()
.add_stage(TransactionLookupStage::new(
self.stages_config.transaction_lookup,
self.stages_config.etl.clone(),
self.prune_modes.transaction_lookup,
))
.add_stage(IndexStorageHistoryStage::new(
self.stages_config.index_storage_history,
self.stages_config.etl.clone(),
self.prune_modes.storage_history,
))
.add_stage(IndexAccountHistoryStage::new(
self.stages_config.index_account_history,
self.stages_config.etl.clone(),
self.prune_modes.account_history,
))
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/stages/stages/src/stages/prune.rs | crates/stages/stages/src/stages/prune.rs | use reth_db_api::{table::Value, transaction::DbTxMut};
use reth_primitives_traits::NodePrimitives;
use reth_provider::{
BlockReader, DBProvider, PruneCheckpointReader, PruneCheckpointWriter,
StaticFileProviderFactory,
};
use reth_prune::{
PruneMode, PruneModes, PruneSegment, PrunerBuilder, SegmentOutput, SegmentOutputCheckpoint,
};
use reth_stages_api::{
ExecInput, ExecOutput, Stage, StageCheckpoint, StageError, StageId, UnwindInput, UnwindOutput,
};
use tracing::info;
/// The prune stage that runs the pruner with the provided prune modes.
///
/// There are two main reasons to have this stage when running a full node:
/// - Sender Recovery stage inserts a lot of data into the database that's only needed for the
/// Execution stage. Pruner will clean up the unneeded recovered senders.
/// - Pruning during the live sync can take a significant amount of time, especially history
/// segments. If we can prune as much data as possible in one go before starting the live sync, we
/// should do it.
///
/// `commit_threshold` is the maximum number of entries to prune before committing
/// progress to the database.
#[derive(Debug)]
pub struct PruneStage {
prune_modes: PruneModes,
commit_threshold: usize,
}
impl PruneStage {
/// Crate new prune stage with the given prune modes and commit threshold.
pub const fn new(prune_modes: PruneModes, commit_threshold: usize) -> Self {
Self { prune_modes, commit_threshold }
}
}
impl<Provider> Stage<Provider> for PruneStage
where
Provider: DBProvider<Tx: DbTxMut>
+ PruneCheckpointReader
+ PruneCheckpointWriter
+ BlockReader
+ StaticFileProviderFactory<
Primitives: NodePrimitives<SignedTx: Value, Receipt: Value, BlockHeader: Value>,
>,
{
fn id(&self) -> StageId {
StageId::Prune
}
fn execute(&mut self, provider: &Provider, input: ExecInput) -> Result<ExecOutput, StageError> {
let mut pruner = PrunerBuilder::default()
.segments(self.prune_modes.clone())
.delete_limit(self.commit_threshold)
.build::<Provider>(provider.static_file_provider());
let result = pruner.run_with_provider(provider, input.target())?;
if result.progress.is_finished() {
Ok(ExecOutput { checkpoint: StageCheckpoint::new(input.target()), done: true })
} else {
if let Some((last_segment, last_segment_output)) = result.segments.last() {
match last_segment_output {
SegmentOutput {
progress,
pruned,
checkpoint:
checkpoint @ Some(SegmentOutputCheckpoint { block_number: Some(_), .. }),
} => {
info!(
target: "sync::stages::prune::exec",
?last_segment,
?progress,
?pruned,
?checkpoint,
"Last segment has more data to prune"
)
}
SegmentOutput { progress, pruned, checkpoint: _ } => {
info!(
target: "sync::stages::prune::exec",
?last_segment,
?progress,
?pruned,
"Last segment has more data to prune"
)
}
}
}
// We cannot set the checkpoint yet, because prune segments may have different highest
// pruned block numbers
Ok(ExecOutput { checkpoint: input.checkpoint(), done: false })
}
}
fn unwind(
&mut self,
provider: &Provider,
input: UnwindInput,
) -> Result<UnwindOutput, StageError> {
// We cannot recover the data that was pruned in `execute`, so we just update the
// checkpoints.
let prune_checkpoints = provider.get_prune_checkpoints()?;
for (segment, mut checkpoint) in prune_checkpoints {
checkpoint.block_number = Some(input.unwind_to);
provider.save_prune_checkpoint(segment, checkpoint)?;
}
Ok(UnwindOutput { checkpoint: StageCheckpoint::new(input.unwind_to) })
}
}
/// The prune sender recovery stage that runs the pruner with the provided `PruneMode` for the
/// `SenderRecovery` segment.
///
/// Under the hood, this stage has the same functionality as [`PruneStage`].
#[derive(Debug)]
pub struct PruneSenderRecoveryStage(PruneStage);
impl PruneSenderRecoveryStage {
/// Create new prune sender recovery stage with the given prune mode and commit threshold.
pub fn new(prune_mode: PruneMode, commit_threshold: usize) -> Self {
Self(PruneStage::new(
PruneModes { sender_recovery: Some(prune_mode), ..PruneModes::none() },
commit_threshold,
))
}
}
impl<Provider> Stage<Provider> for PruneSenderRecoveryStage
where
Provider: DBProvider<Tx: DbTxMut>
+ PruneCheckpointReader
+ PruneCheckpointWriter
+ BlockReader
+ StaticFileProviderFactory<
Primitives: NodePrimitives<SignedTx: Value, Receipt: Value, BlockHeader: Value>,
>,
{
fn id(&self) -> StageId {
StageId::PruneSenderRecovery
}
fn execute(&mut self, provider: &Provider, input: ExecInput) -> Result<ExecOutput, StageError> {
let mut result = self.0.execute(provider, input)?;
// Adjust the checkpoint to the highest pruned block number of the Sender Recovery segment
if !result.done {
let checkpoint = provider
.get_prune_checkpoint(PruneSegment::SenderRecovery)?
.ok_or(StageError::MissingPruneCheckpoint(PruneSegment::SenderRecovery))?;
// `unwrap_or_default` is safe because we know that genesis block doesn't have any
// transactions and senders
result.checkpoint = StageCheckpoint::new(checkpoint.block_number.unwrap_or_default());
}
Ok(result)
}
fn unwind(
&mut self,
provider: &Provider,
input: UnwindInput,
) -> Result<UnwindOutput, StageError> {
self.0.unwind(provider, input)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test_utils::{
stage_test_suite_ext, ExecuteStageTestRunner, StageTestRunner, StorageKind,
TestRunnerError, TestStageDB, UnwindStageTestRunner,
};
use alloy_primitives::B256;
use reth_ethereum_primitives::Block;
use reth_primitives_traits::{SealedBlock, SignerRecoverable};
use reth_provider::{
providers::StaticFileWriter, TransactionsProvider, TransactionsProviderExt,
};
use reth_prune::PruneMode;
use reth_testing_utils::generators::{self, random_block_range, BlockRangeParams};
stage_test_suite_ext!(PruneTestRunner, prune);
#[derive(Default)]
struct PruneTestRunner {
db: TestStageDB,
}
impl StageTestRunner for PruneTestRunner {
type S = PruneStage;
fn db(&self) -> &TestStageDB {
&self.db
}
fn stage(&self) -> Self::S {
PruneStage {
prune_modes: PruneModes {
sender_recovery: Some(PruneMode::Full),
..Default::default()
},
commit_threshold: usize::MAX,
}
}
}
impl ExecuteStageTestRunner for PruneTestRunner {
type Seed = Vec<SealedBlock<Block>>;
fn seed_execution(&mut self, input: ExecInput) -> Result<Self::Seed, TestRunnerError> {
let mut rng = generators::rng();
let blocks = random_block_range(
&mut rng,
input.checkpoint().block_number..=input.target(),
BlockRangeParams { parent: Some(B256::ZERO), tx_count: 1..3, ..Default::default() },
);
self.db.insert_blocks(blocks.iter(), StorageKind::Static)?;
self.db.insert_transaction_senders(
blocks.iter().flat_map(|block| block.body().transactions.iter()).enumerate().map(
|(i, tx)| (i as u64, tx.recover_signer().expect("failed to recover signer")),
),
)?;
Ok(blocks)
}
fn validate_execution(
&self,
input: ExecInput,
output: Option<ExecOutput>,
) -> Result<(), TestRunnerError> {
if let Some(output) = output {
let start_block = input.next_block();
let end_block = output.checkpoint.block_number;
if start_block > end_block {
return Ok(())
}
let provider = self.db.factory.provider()?;
assert!(output.done);
assert_eq!(
output.checkpoint.block_number,
provider
.get_prune_checkpoint(PruneSegment::SenderRecovery)?
.expect("prune checkpoint must exist")
.block_number
.unwrap_or_default()
);
// Verify that the senders are pruned
let tx_range =
provider.transaction_range_by_block_range(start_block..=end_block)?;
let senders = self.db.factory.provider()?.senders_by_tx_range(tx_range)?;
assert!(senders.is_empty());
}
Ok(())
}
}
impl UnwindStageTestRunner for PruneTestRunner {
fn validate_unwind(&self, _input: UnwindInput) -> Result<(), TestRunnerError> {
Ok(())
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/stages/stages/src/stages/sender_recovery.rs | crates/stages/stages/src/stages/sender_recovery.rs | use alloy_primitives::{Address, TxNumber};
use reth_config::config::SenderRecoveryConfig;
use reth_consensus::ConsensusError;
use reth_db::static_file::TransactionMask;
use reth_db_api::{
cursor::DbCursorRW,
table::Value,
tables,
transaction::{DbTx, DbTxMut},
DbTxUnwindExt, RawValue,
};
use reth_primitives_traits::{GotExpected, NodePrimitives, SignedTransaction};
use reth_provider::{
BlockReader, DBProvider, HeaderProvider, ProviderError, PruneCheckpointReader,
StaticFileProviderFactory, StatsReader,
};
use reth_prune_types::PruneSegment;
use reth_stages_api::{
BlockErrorKind, EntitiesCheckpoint, ExecInput, ExecOutput, Stage, StageCheckpoint, StageError,
StageId, UnwindInput, UnwindOutput,
};
use reth_static_file_types::StaticFileSegment;
use std::{fmt::Debug, ops::Range, sync::mpsc};
use thiserror::Error;
use tracing::*;
/// Maximum amount of transactions to read from disk at one time before we flush their senders to
/// disk. Since each rayon worker will hold at most 100 transactions (`WORKER_CHUNK_SIZE`), we
/// effectively max limit each batch to 1000 channels in memory.
const BATCH_SIZE: usize = 100_000;
/// Maximum number of senders to recover per rayon worker job.
const WORKER_CHUNK_SIZE: usize = 100;
/// Type alias for a sender that transmits the result of sender recovery.
type RecoveryResultSender = mpsc::Sender<Result<(u64, Address), Box<SenderRecoveryStageError>>>;
/// The sender recovery stage iterates over existing transactions,
/// recovers the transaction signer and stores them
/// in [`TransactionSenders`][reth_db_api::tables::TransactionSenders] table.
#[derive(Clone, Debug)]
pub struct SenderRecoveryStage {
/// The size of inserted items after which the control
/// flow will be returned to the pipeline for commit
pub commit_threshold: u64,
}
impl SenderRecoveryStage {
/// Create new instance of [`SenderRecoveryStage`].
pub const fn new(config: SenderRecoveryConfig) -> Self {
Self { commit_threshold: config.commit_threshold }
}
}
impl Default for SenderRecoveryStage {
fn default() -> Self {
Self { commit_threshold: 5_000_000 }
}
}
impl<Provider> Stage<Provider> for SenderRecoveryStage
where
Provider: DBProvider<Tx: DbTxMut>
+ BlockReader
+ StaticFileProviderFactory<Primitives: NodePrimitives<SignedTx: Value + SignedTransaction>>
+ StatsReader
+ PruneCheckpointReader,
{
/// Return the id of the stage
fn id(&self) -> StageId {
StageId::SenderRecovery
}
/// Retrieve the range of transactions to iterate over by querying
/// [`BlockBodyIndices`][reth_db_api::tables::BlockBodyIndices],
/// collect transactions within that range, recover signer for each transaction and store
/// entries in the [`TransactionSenders`][reth_db_api::tables::TransactionSenders] table.
fn execute(&mut self, provider: &Provider, input: ExecInput) -> Result<ExecOutput, StageError> {
if input.target_reached() {
return Ok(ExecOutput::done(input.checkpoint()))
}
let (tx_range, block_range, is_final_range) =
input.next_block_range_with_transaction_threshold(provider, self.commit_threshold)?;
let end_block = *block_range.end();
// No transactions to walk over
if tx_range.is_empty() {
info!(target: "sync::stages::sender_recovery", ?tx_range, "Target transaction already reached");
return Ok(ExecOutput {
checkpoint: StageCheckpoint::new(end_block)
.with_entities_stage_checkpoint(stage_checkpoint(provider)?),
done: is_final_range,
})
}
// Acquire the cursor for inserting elements
let mut senders_cursor = provider.tx_ref().cursor_write::<tables::TransactionSenders>()?;
info!(target: "sync::stages::sender_recovery", ?tx_range, "Recovering senders");
// Iterate over transactions in batches, recover the senders and append them
let batch = tx_range
.clone()
.step_by(BATCH_SIZE)
.map(|start| start..std::cmp::min(start + BATCH_SIZE as u64, tx_range.end))
.collect::<Vec<Range<u64>>>();
let tx_batch_sender = setup_range_recovery(provider);
for range in batch {
recover_range(range, provider, tx_batch_sender.clone(), &mut senders_cursor)?;
}
Ok(ExecOutput {
checkpoint: StageCheckpoint::new(end_block)
.with_entities_stage_checkpoint(stage_checkpoint(provider)?),
done: is_final_range,
})
}
/// Unwind the stage.
fn unwind(
&mut self,
provider: &Provider,
input: UnwindInput,
) -> Result<UnwindOutput, StageError> {
let (_, unwind_to, _) = input.unwind_block_range_with_threshold(self.commit_threshold);
// Lookup latest tx id that we should unwind to
let latest_tx_id = provider
.block_body_indices(unwind_to)?
.ok_or(ProviderError::BlockBodyIndicesNotFound(unwind_to))?
.last_tx_num();
provider.tx_ref().unwind_table_by_num::<tables::TransactionSenders>(latest_tx_id)?;
Ok(UnwindOutput {
checkpoint: StageCheckpoint::new(unwind_to)
.with_entities_stage_checkpoint(stage_checkpoint(provider)?),
})
}
}
fn recover_range<Provider, CURSOR>(
tx_range: Range<u64>,
provider: &Provider,
tx_batch_sender: mpsc::Sender<Vec<(Range<u64>, RecoveryResultSender)>>,
senders_cursor: &mut CURSOR,
) -> Result<(), StageError>
where
Provider: DBProvider + HeaderProvider + StaticFileProviderFactory,
CURSOR: DbCursorRW<tables::TransactionSenders>,
{
debug!(target: "sync::stages::sender_recovery", ?tx_range, "Sending batch for processing");
// Preallocate channels for each chunks in the batch
let (chunks, receivers): (Vec<_>, Vec<_>) = tx_range
.clone()
.step_by(WORKER_CHUNK_SIZE)
.map(|start| {
let range = start..std::cmp::min(start + WORKER_CHUNK_SIZE as u64, tx_range.end);
let (tx, rx) = mpsc::channel();
// Range and channel sender will be sent to rayon worker
((range, tx), rx)
})
.unzip();
if let Some(err) = tx_batch_sender.send(chunks).err() {
return Err(StageError::Fatal(err.into()));
}
debug!(target: "sync::stages::sender_recovery", ?tx_range, "Appending recovered senders to the database");
let mut processed_transactions = 0;
for channel in receivers {
while let Ok(recovered) = channel.recv() {
let (tx_id, sender) = match recovered {
Ok(result) => result,
Err(error) => {
return match *error {
SenderRecoveryStageError::FailedRecovery(err) => {
// get the block number for the bad transaction
let block_number = provider
.tx_ref()
.get::<tables::TransactionBlocks>(err.tx)?
.ok_or(ProviderError::BlockNumberForTransactionIndexNotFound)?;
// fetch the sealed header so we can use it in the sender recovery
// unwind
let sealed_header =
provider.sealed_header(block_number)?.ok_or_else(|| {
ProviderError::HeaderNotFound(block_number.into())
})?;
Err(StageError::Block {
block: Box::new(sealed_header.block_with_parent()),
error: BlockErrorKind::Validation(
ConsensusError::TransactionSignerRecoveryError,
),
})
}
SenderRecoveryStageError::StageError(err) => Err(err),
SenderRecoveryStageError::RecoveredSendersMismatch(expectation) => {
Err(StageError::Fatal(
SenderRecoveryStageError::RecoveredSendersMismatch(expectation)
.into(),
))
}
}
}
};
senders_cursor.append(tx_id, &sender)?;
processed_transactions += 1;
}
}
debug!(target: "sync::stages::sender_recovery", ?tx_range, "Finished recovering senders batch");
// Fail safe to ensure that we do not proceed without having recovered all senders.
let expected = tx_range.end - tx_range.start;
if processed_transactions != expected {
return Err(StageError::Fatal(
SenderRecoveryStageError::RecoveredSendersMismatch(GotExpected {
got: processed_transactions,
expected,
})
.into(),
));
}
Ok(())
}
/// Spawns a thread to handle the recovery of transaction senders for
/// specified chunks of a given batch. It processes incoming ranges, fetching and recovering
/// transactions in parallel using global rayon pool
fn setup_range_recovery<Provider>(
provider: &Provider,
) -> mpsc::Sender<Vec<(Range<u64>, RecoveryResultSender)>>
where
Provider: DBProvider
+ HeaderProvider
+ StaticFileProviderFactory<Primitives: NodePrimitives<SignedTx: Value + SignedTransaction>>,
{
let (tx_sender, tx_receiver) = mpsc::channel::<Vec<(Range<u64>, RecoveryResultSender)>>();
let static_file_provider = provider.static_file_provider();
// We do not use `tokio::task::spawn_blocking` because, during a shutdown,
// there will be a timeout grace period in which Tokio does not allow spawning
// additional blocking tasks. This would cause this function to return
// `SenderRecoveryStageError::RecoveredSendersMismatch` at the end.
//
// However, using `std::thread::spawn` allows us to utilize the timeout grace
// period to complete some work without throwing errors during the shutdown.
std::thread::spawn(move || {
while let Ok(chunks) = tx_receiver.recv() {
for (chunk_range, recovered_senders_tx) in chunks {
// Read the raw value, and let the rayon worker to decompress & decode.
let chunk = match static_file_provider.fetch_range_with_predicate(
StaticFileSegment::Transactions,
chunk_range.clone(),
|cursor, number| {
Ok(cursor
.get_one::<TransactionMask<
RawValue<<Provider::Primitives as NodePrimitives>::SignedTx>,
>>(number.into())?
.map(|tx| (number, tx)))
},
|_| true,
) {
Ok(chunk) => chunk,
Err(err) => {
// We exit early since we could not process this chunk.
let _ = recovered_senders_tx
.send(Err(Box::new(SenderRecoveryStageError::StageError(err.into()))));
break
}
};
// Spawn the task onto the global rayon pool
// This task will send the results through the channel after it has read the
// transaction and calculated the sender.
rayon::spawn(move || {
let mut rlp_buf = Vec::with_capacity(128);
for (number, tx) in chunk {
let res = tx
.value()
.map_err(|err| {
Box::new(SenderRecoveryStageError::StageError(err.into()))
})
.and_then(|tx| recover_sender((number, tx), &mut rlp_buf));
let is_err = res.is_err();
let _ = recovered_senders_tx.send(res);
// Finish early
if is_err {
break
}
}
});
}
}
});
tx_sender
}
#[inline]
fn recover_sender<T: SignedTransaction>(
(tx_id, tx): (TxNumber, T),
rlp_buf: &mut Vec<u8>,
) -> Result<(u64, Address), Box<SenderRecoveryStageError>> {
rlp_buf.clear();
// We call [Signature::encode_and_recover_unchecked] because transactions run in the pipeline
// are known to be valid - this means that we do not need to check whether or not the `s`
// value is greater than `secp256k1n / 2` if past EIP-2. There are transactions
// pre-homestead which have large `s` values, so using [Signature::recover_signer] here
// would not be backwards-compatible.
let sender = tx.recover_unchecked_with_buf(rlp_buf).map_err(|_| {
SenderRecoveryStageError::FailedRecovery(FailedSenderRecoveryError { tx: tx_id })
})?;
Ok((tx_id, sender))
}
fn stage_checkpoint<Provider>(provider: &Provider) -> Result<EntitiesCheckpoint, StageError>
where
Provider: StatsReader + StaticFileProviderFactory + PruneCheckpointReader,
{
let pruned_entries = provider
.get_prune_checkpoint(PruneSegment::SenderRecovery)?
.and_then(|checkpoint| checkpoint.tx_number)
.unwrap_or_default();
Ok(EntitiesCheckpoint {
// If `TransactionSenders` table was pruned, we will have a number of entries in it not
// matching the actual number of processed transactions. To fix that, we add the
// number of pruned `TransactionSenders` entries.
processed: provider.count_entries::<tables::TransactionSenders>()? as u64 + pruned_entries,
// Count only static files entries. If we count the database entries too, we may have
// duplicates. We're sure that the static files have all entries that database has,
// because we run the `StaticFileProducer` before starting the pipeline.
total: provider.static_file_provider().count_entries::<tables::Transactions>()? as u64,
})
}
#[derive(Error, Debug)]
#[error(transparent)]
enum SenderRecoveryStageError {
/// A transaction failed sender recovery
#[error(transparent)]
FailedRecovery(#[from] FailedSenderRecoveryError),
/// Number of recovered senders does not match
#[error("mismatched sender count during recovery: {_0}")]
RecoveredSendersMismatch(GotExpected<u64>),
/// A different type of stage error occurred
#[error(transparent)]
StageError(#[from] StageError),
}
#[derive(Error, Debug)]
#[error("sender recovery failed for transaction {tx}")]
struct FailedSenderRecoveryError {
/// The transaction that failed sender recovery
tx: TxNumber,
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test_utils::{
stage_test_suite_ext, ExecuteStageTestRunner, StageTestRunner, StorageKind,
TestRunnerError, TestStageDB, UnwindStageTestRunner,
};
use alloy_primitives::{BlockNumber, B256};
use assert_matches::assert_matches;
use reth_db_api::cursor::DbCursorRO;
use reth_ethereum_primitives::{Block, TransactionSigned};
use reth_primitives_traits::{SealedBlock, SignerRecoverable};
use reth_provider::{
providers::StaticFileWriter, BlockBodyIndicesProvider, DatabaseProviderFactory,
PruneCheckpointWriter, StaticFileProviderFactory, TransactionsProvider,
};
use reth_prune_types::{PruneCheckpoint, PruneMode};
use reth_stages_api::StageUnitCheckpoint;
use reth_testing_utils::generators::{
self, random_block, random_block_range, BlockParams, BlockRangeParams,
};
stage_test_suite_ext!(SenderRecoveryTestRunner, sender_recovery);
/// Execute a block range with a single transaction
#[tokio::test]
async fn execute_single_transaction() {
let (previous_stage, stage_progress) = (500, 100);
let mut rng = generators::rng();
// Set up the runner
let runner = SenderRecoveryTestRunner::default();
let input = ExecInput {
target: Some(previous_stage),
checkpoint: Some(StageCheckpoint::new(stage_progress)),
};
// Insert blocks with a single transaction at block `stage_progress + 10`
let non_empty_block_number = stage_progress + 10;
let blocks = (stage_progress..=input.target())
.map(|number| {
random_block(
&mut rng,
number,
BlockParams {
tx_count: Some((number == non_empty_block_number) as u8),
..Default::default()
},
)
})
.collect::<Vec<_>>();
runner
.db
.insert_blocks(blocks.iter(), StorageKind::Static)
.expect("failed to insert blocks");
let rx = runner.execute(input);
// Assert the successful result
let result = rx.await.unwrap();
assert_matches!(
result,
Ok(ExecOutput { checkpoint: StageCheckpoint {
block_number,
stage_checkpoint: Some(StageUnitCheckpoint::Entities(EntitiesCheckpoint {
processed: 1,
total: 1
}))
}, done: true }) if block_number == previous_stage
);
// Validate the stage execution
assert!(runner.validate_execution(input, result.ok()).is_ok(), "execution validation");
}
/// Execute the stage twice with input range that exceeds the commit threshold
#[tokio::test]
async fn execute_intermediate_commit() {
let mut rng = generators::rng();
let threshold = 10;
let mut runner = SenderRecoveryTestRunner::default();
runner.set_threshold(threshold);
let (stage_progress, previous_stage) = (1000, 1100); // input exceeds threshold
// Manually seed once with full input range
let seed = random_block_range(
&mut rng,
stage_progress + 1..=previous_stage,
BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..4, ..Default::default() },
); // set tx count range high enough to hit the threshold
runner
.db
.insert_blocks(seed.iter(), StorageKind::Static)
.expect("failed to seed execution");
let total_transactions = runner
.db
.factory
.static_file_provider()
.count_entries::<tables::Transactions>()
.unwrap() as u64;
let first_input = ExecInput {
target: Some(previous_stage),
checkpoint: Some(StageCheckpoint::new(stage_progress)),
};
// Execute first time
let result = runner.execute(first_input).await.unwrap();
let mut tx_count = 0;
let expected_progress = seed
.iter()
.find(|x| {
tx_count += x.transaction_count();
tx_count as u64 > threshold
})
.map(|x| x.number)
.unwrap_or(previous_stage);
assert_matches!(result, Ok(_));
assert_eq!(
result.unwrap(),
ExecOutput {
checkpoint: StageCheckpoint::new(expected_progress).with_entities_stage_checkpoint(
EntitiesCheckpoint {
processed: runner.db.table::<tables::TransactionSenders>().unwrap().len()
as u64,
total: total_transactions
}
),
done: false
}
);
// Execute second time to completion
runner.set_threshold(u64::MAX);
let second_input = ExecInput {
target: Some(previous_stage),
checkpoint: Some(StageCheckpoint::new(expected_progress)),
};
let result = runner.execute(second_input).await.unwrap();
assert_matches!(result, Ok(_));
assert_eq!(
result.as_ref().unwrap(),
&ExecOutput {
checkpoint: StageCheckpoint::new(previous_stage).with_entities_stage_checkpoint(
EntitiesCheckpoint { processed: total_transactions, total: total_transactions }
),
done: true
}
);
assert!(runner.validate_execution(first_input, result.ok()).is_ok(), "validation failed");
}
#[test]
fn stage_checkpoint_pruned() {
let db = TestStageDB::default();
let mut rng = generators::rng();
let blocks = random_block_range(
&mut rng,
0..=100,
BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..10, ..Default::default() },
);
db.insert_blocks(blocks.iter(), StorageKind::Static).expect("insert blocks");
let max_pruned_block = 30;
let max_processed_block = 70;
let mut tx_senders = Vec::new();
let mut tx_number = 0;
for block in &blocks[..=max_processed_block] {
for transaction in &block.body().transactions {
if block.number > max_pruned_block {
tx_senders
.push((tx_number, transaction.recover_signer().expect("recover signer")));
}
tx_number += 1;
}
}
db.insert_transaction_senders(tx_senders).expect("insert tx hash numbers");
let provider = db.factory.provider_rw().unwrap();
provider
.save_prune_checkpoint(
PruneSegment::SenderRecovery,
PruneCheckpoint {
block_number: Some(max_pruned_block),
tx_number: Some(
blocks[..=max_pruned_block as usize]
.iter()
.map(|block| block.transaction_count() as u64)
.sum(),
),
prune_mode: PruneMode::Full,
},
)
.expect("save stage checkpoint");
provider.commit().expect("commit");
let provider = db.factory.database_provider_rw().unwrap();
assert_eq!(
stage_checkpoint(&provider).expect("stage checkpoint"),
EntitiesCheckpoint {
processed: blocks[..=max_processed_block]
.iter()
.map(|block| block.transaction_count() as u64)
.sum(),
total: blocks.iter().map(|block| block.transaction_count() as u64).sum()
}
);
}
struct SenderRecoveryTestRunner {
db: TestStageDB,
threshold: u64,
}
impl Default for SenderRecoveryTestRunner {
fn default() -> Self {
Self { threshold: 1000, db: TestStageDB::default() }
}
}
impl SenderRecoveryTestRunner {
fn set_threshold(&mut self, threshold: u64) {
self.threshold = threshold;
}
/// # Panics
///
/// 1. If there are any entries in the [`tables::TransactionSenders`] table above a given
/// block number.
/// 2. If there is no requested block entry in the bodies table, but
/// [`tables::TransactionSenders`] is not empty.
fn ensure_no_senders_by_block(&self, block: BlockNumber) -> Result<(), TestRunnerError> {
let body_result = self
.db
.factory
.provider_rw()?
.block_body_indices(block)?
.ok_or(ProviderError::BlockBodyIndicesNotFound(block));
match body_result {
Ok(body) => self.db.ensure_no_entry_above::<tables::TransactionSenders, _>(
body.last_tx_num(),
|key| key,
)?,
Err(_) => {
assert!(self.db.table_is_empty::<tables::TransactionSenders>()?);
}
};
Ok(())
}
}
impl StageTestRunner for SenderRecoveryTestRunner {
type S = SenderRecoveryStage;
fn db(&self) -> &TestStageDB {
&self.db
}
fn stage(&self) -> Self::S {
SenderRecoveryStage { commit_threshold: self.threshold }
}
}
impl ExecuteStageTestRunner for SenderRecoveryTestRunner {
type Seed = Vec<SealedBlock<Block>>;
fn seed_execution(&mut self, input: ExecInput) -> Result<Self::Seed, TestRunnerError> {
let mut rng = generators::rng();
let stage_progress = input.checkpoint().block_number;
let end = input.target();
let blocks = random_block_range(
&mut rng,
stage_progress..=end,
BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..2, ..Default::default() },
);
self.db.insert_blocks(blocks.iter(), StorageKind::Static)?;
Ok(blocks)
}
fn validate_execution(
&self,
input: ExecInput,
output: Option<ExecOutput>,
) -> Result<(), TestRunnerError> {
match output {
Some(output) => {
let provider = self.db.factory.provider()?;
let start_block = input.next_block();
let end_block = output.checkpoint.block_number;
if start_block > end_block {
return Ok(())
}
let mut body_cursor =
provider.tx_ref().cursor_read::<tables::BlockBodyIndices>()?;
body_cursor.seek_exact(start_block)?;
while let Some((_, body)) = body_cursor.next()? {
for tx_id in body.tx_num_range() {
let transaction: TransactionSigned = provider
.transaction_by_id_unhashed(tx_id)?
.expect("no transaction entry");
let signer =
transaction.recover_signer().expect("failed to recover signer");
assert_eq!(Some(signer), provider.transaction_sender(tx_id)?)
}
}
}
None => self.ensure_no_senders_by_block(input.checkpoint().block_number)?,
};
Ok(())
}
}
impl UnwindStageTestRunner for SenderRecoveryTestRunner {
fn validate_unwind(&self, input: UnwindInput) -> Result<(), TestRunnerError> {
self.ensure_no_senders_by_block(input.unwind_to)
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/stages/stages/src/stages/index_account_history.rs | crates/stages/stages/src/stages/index_account_history.rs | use super::{collect_history_indices, load_history_indices};
use alloy_primitives::Address;
use reth_config::config::{EtlConfig, IndexHistoryConfig};
use reth_db_api::{models::ShardedKey, table::Decode, tables, transaction::DbTxMut};
use reth_provider::{DBProvider, HistoryWriter, PruneCheckpointReader, PruneCheckpointWriter};
use reth_prune_types::{PruneCheckpoint, PruneMode, PrunePurpose, PruneSegment};
use reth_stages_api::{
ExecInput, ExecOutput, Stage, StageCheckpoint, StageError, StageId, UnwindInput, UnwindOutput,
};
use std::fmt::Debug;
use tracing::info;
/// Stage is indexing history the account changesets generated in
/// [`ExecutionStage`][crate::stages::ExecutionStage]. For more information
/// on index sharding take a look at [`tables::AccountsHistory`]
#[derive(Debug)]
pub struct IndexAccountHistoryStage {
/// Number of blocks after which the control
/// flow will be returned to the pipeline for commit.
pub commit_threshold: u64,
/// Pruning configuration.
pub prune_mode: Option<PruneMode>,
/// ETL configuration
pub etl_config: EtlConfig,
}
impl IndexAccountHistoryStage {
/// Create new instance of [`IndexAccountHistoryStage`].
pub const fn new(
config: IndexHistoryConfig,
etl_config: EtlConfig,
prune_mode: Option<PruneMode>,
) -> Self {
Self { commit_threshold: config.commit_threshold, etl_config, prune_mode }
}
}
impl Default for IndexAccountHistoryStage {
fn default() -> Self {
Self { commit_threshold: 100_000, prune_mode: None, etl_config: EtlConfig::default() }
}
}
impl<Provider> Stage<Provider> for IndexAccountHistoryStage
where
Provider:
DBProvider<Tx: DbTxMut> + HistoryWriter + PruneCheckpointReader + PruneCheckpointWriter,
{
/// Return the id of the stage
fn id(&self) -> StageId {
StageId::IndexAccountHistory
}
/// Execute the stage.
fn execute(
&mut self,
provider: &Provider,
mut input: ExecInput,
) -> Result<ExecOutput, StageError> {
if let Some((target_prunable_block, prune_mode)) = self
.prune_mode
.map(|mode| {
mode.prune_target_block(
input.target(),
PruneSegment::AccountHistory,
PrunePurpose::User,
)
})
.transpose()?
.flatten()
{
if target_prunable_block > input.checkpoint().block_number {
input.checkpoint = Some(StageCheckpoint::new(target_prunable_block));
// Save prune checkpoint only if we don't have one already.
// Otherwise, pruner may skip the unpruned range of blocks.
if provider.get_prune_checkpoint(PruneSegment::AccountHistory)?.is_none() {
provider.save_prune_checkpoint(
PruneSegment::AccountHistory,
PruneCheckpoint {
block_number: Some(target_prunable_block),
tx_number: None,
prune_mode,
},
)?;
}
}
}
if input.target_reached() {
return Ok(ExecOutput::done(input.checkpoint()))
}
let mut range = input.next_block_range();
let first_sync = input.checkpoint().block_number == 0;
// On first sync we might have history coming from genesis. We clear the table since it's
// faster to rebuild from scratch.
if first_sync {
provider.tx_ref().clear::<tables::AccountsHistory>()?;
range = 0..=*input.next_block_range().end();
}
info!(target: "sync::stages::index_account_history::exec", ?first_sync, "Collecting indices");
let collector =
collect_history_indices::<_, tables::AccountChangeSets, tables::AccountsHistory, _>(
provider,
range.clone(),
ShardedKey::new,
|(index, value)| (index, value.address),
&self.etl_config,
)?;
info!(target: "sync::stages::index_account_history::exec", "Loading indices into database");
load_history_indices::<_, tables::AccountsHistory, _>(
provider,
collector,
first_sync,
ShardedKey::new,
ShardedKey::<Address>::decode_owned,
|key| key.key,
)?;
Ok(ExecOutput { checkpoint: StageCheckpoint::new(*range.end()), done: true })
}
/// Unwind the stage.
fn unwind(
&mut self,
provider: &Provider,
input: UnwindInput,
) -> Result<UnwindOutput, StageError> {
let (range, unwind_progress, _) =
input.unwind_block_range_with_threshold(self.commit_threshold);
provider.unwind_account_history_indices_range(range)?;
// from HistoryIndex higher than that number.
Ok(UnwindOutput { checkpoint: StageCheckpoint::new(unwind_progress) })
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test_utils::{
stage_test_suite_ext, ExecuteStageTestRunner, StageTestRunner, TestRunnerError,
TestStageDB, UnwindStageTestRunner,
};
use alloy_primitives::{address, BlockNumber, B256};
use itertools::Itertools;
use reth_db_api::{
cursor::DbCursorRO,
models::{
sharded_key, sharded_key::NUM_OF_INDICES_IN_SHARD, AccountBeforeTx,
StoredBlockBodyIndices,
},
transaction::DbTx,
BlockNumberList,
};
use reth_provider::{providers::StaticFileWriter, DatabaseProviderFactory};
use reth_testing_utils::generators::{
self, random_block_range, random_changeset_range, random_contract_account_range,
BlockRangeParams,
};
use std::collections::BTreeMap;
const ADDRESS: Address = address!("0x0000000000000000000000000000000000000001");
const LAST_BLOCK_IN_FULL_SHARD: BlockNumber = NUM_OF_INDICES_IN_SHARD as BlockNumber;
const MAX_BLOCK: BlockNumber = NUM_OF_INDICES_IN_SHARD as BlockNumber + 2;
const fn acc() -> AccountBeforeTx {
AccountBeforeTx { address: ADDRESS, info: None }
}
/// Shard for account
const fn shard(shard_index: u64) -> ShardedKey<Address> {
ShardedKey { key: ADDRESS, highest_block_number: shard_index }
}
fn list(list: &[u64]) -> BlockNumberList {
BlockNumberList::new(list.iter().copied()).unwrap()
}
fn cast(
table: Vec<(ShardedKey<Address>, BlockNumberList)>,
) -> BTreeMap<ShardedKey<Address>, Vec<u64>> {
table
.into_iter()
.map(|(k, v)| {
let v = v.iter().collect();
(k, v)
})
.collect()
}
fn partial_setup(db: &TestStageDB) {
// setup
db.commit(|tx| {
for block in 0..=MAX_BLOCK {
tx.put::<tables::BlockBodyIndices>(
block,
StoredBlockBodyIndices { tx_count: 3, ..Default::default() },
)?;
// setup changeset that is going to be applied to history index
tx.put::<tables::AccountChangeSets>(block, acc())?;
}
Ok(())
})
.unwrap()
}
fn run(db: &TestStageDB, run_to: u64, input_checkpoint: Option<BlockNumber>) {
let input = ExecInput {
target: Some(run_to),
checkpoint: input_checkpoint
.map(|block_number| StageCheckpoint { block_number, stage_checkpoint: None }),
};
let mut stage = IndexAccountHistoryStage::default();
let provider = db.factory.database_provider_rw().unwrap();
let out = stage.execute(&provider, input).unwrap();
assert_eq!(out, ExecOutput { checkpoint: StageCheckpoint::new(run_to), done: true });
provider.commit().unwrap();
}
fn unwind(db: &TestStageDB, unwind_from: u64, unwind_to: u64) {
let input = UnwindInput {
checkpoint: StageCheckpoint::new(unwind_from),
unwind_to,
..Default::default()
};
let mut stage = IndexAccountHistoryStage::default();
let provider = db.factory.database_provider_rw().unwrap();
let out = stage.unwind(&provider, input).unwrap();
assert_eq!(out, UnwindOutput { checkpoint: StageCheckpoint::new(unwind_to) });
provider.commit().unwrap();
}
#[tokio::test]
async fn insert_index_to_genesis() {
// init
let db = TestStageDB::default();
// setup
partial_setup(&db);
// run
run(&db, 3, None);
// verify
let table = cast(db.table::<tables::AccountsHistory>().unwrap());
assert_eq!(table, BTreeMap::from([(shard(u64::MAX), vec![0, 1, 2, 3])]));
// unwind
unwind(&db, 3, 0);
// verify initial state
let table = cast(db.table::<tables::AccountsHistory>().unwrap());
assert_eq!(table, BTreeMap::from([(shard(u64::MAX), vec![0])]));
}
#[tokio::test]
async fn insert_index_to_not_empty_shard() {
// init
let db = TestStageDB::default();
// setup
partial_setup(&db);
db.commit(|tx| {
tx.put::<tables::AccountsHistory>(shard(u64::MAX), list(&[1, 2, 3])).unwrap();
Ok(())
})
.unwrap();
// run
run(&db, 5, Some(3));
// verify
let table = cast(db.table::<tables::AccountsHistory>().unwrap());
assert_eq!(table, BTreeMap::from([(shard(u64::MAX), vec![1, 2, 3, 4, 5])]));
// unwind
unwind(&db, 5, 3);
// verify initial state
let table = cast(db.table::<tables::AccountsHistory>().unwrap());
assert_eq!(table, BTreeMap::from([(shard(u64::MAX), vec![1, 2, 3])]));
}
#[tokio::test]
async fn insert_index_to_full_shard() {
// init
let db = TestStageDB::default();
let full_list = (1..=LAST_BLOCK_IN_FULL_SHARD).collect::<Vec<_>>();
assert_eq!(full_list.len(), NUM_OF_INDICES_IN_SHARD);
// setup
partial_setup(&db);
db.commit(|tx| {
tx.put::<tables::AccountsHistory>(shard(u64::MAX), list(&full_list)).unwrap();
Ok(())
})
.unwrap();
// run
run(&db, LAST_BLOCK_IN_FULL_SHARD + 2, Some(LAST_BLOCK_IN_FULL_SHARD));
// verify
let table = cast(db.table::<tables::AccountsHistory>().unwrap());
assert_eq!(
table,
BTreeMap::from([
(shard(LAST_BLOCK_IN_FULL_SHARD), full_list.clone()),
(shard(u64::MAX), vec![LAST_BLOCK_IN_FULL_SHARD + 1, LAST_BLOCK_IN_FULL_SHARD + 2])
])
);
// unwind
unwind(&db, LAST_BLOCK_IN_FULL_SHARD + 2, LAST_BLOCK_IN_FULL_SHARD);
// verify initial state
let table = cast(db.table::<tables::AccountsHistory>().unwrap());
assert_eq!(table, BTreeMap::from([(shard(u64::MAX), full_list)]));
}
#[tokio::test]
async fn insert_index_to_fill_shard() {
// init
let db = TestStageDB::default();
let mut almost_full_list = (1..=LAST_BLOCK_IN_FULL_SHARD - 2).collect::<Vec<_>>();
// setup
partial_setup(&db);
db.commit(|tx| {
tx.put::<tables::AccountsHistory>(shard(u64::MAX), list(&almost_full_list)).unwrap();
Ok(())
})
.unwrap();
// run
run(&db, LAST_BLOCK_IN_FULL_SHARD, Some(LAST_BLOCK_IN_FULL_SHARD - 2));
// verify
almost_full_list.push(LAST_BLOCK_IN_FULL_SHARD - 1);
almost_full_list.push(LAST_BLOCK_IN_FULL_SHARD);
let table = cast(db.table::<tables::AccountsHistory>().unwrap());
assert_eq!(table, BTreeMap::from([(shard(u64::MAX), almost_full_list.clone())]));
// unwind
unwind(&db, LAST_BLOCK_IN_FULL_SHARD, LAST_BLOCK_IN_FULL_SHARD - 2);
// verify initial state
almost_full_list.pop();
almost_full_list.pop();
let table = cast(db.table::<tables::AccountsHistory>().unwrap());
assert_eq!(table, BTreeMap::from([(shard(u64::MAX), almost_full_list)]));
// verify initial state
}
#[tokio::test]
async fn insert_index_second_half_shard() {
// init
let db = TestStageDB::default();
let mut almost_full_list = (1..=LAST_BLOCK_IN_FULL_SHARD - 1).collect::<Vec<_>>();
// setup
partial_setup(&db);
db.commit(|tx| {
tx.put::<tables::AccountsHistory>(shard(u64::MAX), list(&almost_full_list)).unwrap();
Ok(())
})
.unwrap();
// run
run(&db, LAST_BLOCK_IN_FULL_SHARD + 1, Some(LAST_BLOCK_IN_FULL_SHARD - 1));
// verify
almost_full_list.push(LAST_BLOCK_IN_FULL_SHARD);
let table = cast(db.table::<tables::AccountsHistory>().unwrap());
assert_eq!(
table,
BTreeMap::from([
(shard(LAST_BLOCK_IN_FULL_SHARD), almost_full_list.clone()),
(shard(u64::MAX), vec![LAST_BLOCK_IN_FULL_SHARD + 1])
])
);
// unwind
unwind(&db, LAST_BLOCK_IN_FULL_SHARD, LAST_BLOCK_IN_FULL_SHARD - 1);
// verify initial state
almost_full_list.pop();
let table = cast(db.table::<tables::AccountsHistory>().unwrap());
assert_eq!(table, BTreeMap::from([(shard(u64::MAX), almost_full_list)]));
}
#[tokio::test]
async fn insert_index_to_third_shard() {
// init
let db = TestStageDB::default();
let full_list = (1..=LAST_BLOCK_IN_FULL_SHARD).collect::<Vec<_>>();
// setup
partial_setup(&db);
db.commit(|tx| {
tx.put::<tables::AccountsHistory>(shard(1), list(&full_list)).unwrap();
tx.put::<tables::AccountsHistory>(shard(2), list(&full_list)).unwrap();
tx.put::<tables::AccountsHistory>(
shard(u64::MAX),
list(&[LAST_BLOCK_IN_FULL_SHARD + 1]),
)
.unwrap();
Ok(())
})
.unwrap();
run(&db, LAST_BLOCK_IN_FULL_SHARD + 2, Some(LAST_BLOCK_IN_FULL_SHARD + 1));
// verify
let table = cast(db.table::<tables::AccountsHistory>().unwrap());
assert_eq!(
table,
BTreeMap::from([
(shard(1), full_list.clone()),
(shard(2), full_list.clone()),
(shard(u64::MAX), vec![LAST_BLOCK_IN_FULL_SHARD + 1, LAST_BLOCK_IN_FULL_SHARD + 2])
])
);
// unwind
unwind(&db, LAST_BLOCK_IN_FULL_SHARD + 2, LAST_BLOCK_IN_FULL_SHARD + 1);
// verify initial state
let table = cast(db.table::<tables::AccountsHistory>().unwrap());
assert_eq!(
table,
BTreeMap::from([
(shard(1), full_list.clone()),
(shard(2), full_list),
(shard(u64::MAX), vec![LAST_BLOCK_IN_FULL_SHARD + 1])
])
);
}
#[tokio::test]
async fn insert_index_with_prune_mode() {
// init
let db = TestStageDB::default();
// setup
db.commit(|tx| {
// we just need first and last
tx.put::<tables::BlockBodyIndices>(
0,
StoredBlockBodyIndices { tx_count: 3, ..Default::default() },
)
.unwrap();
tx.put::<tables::BlockBodyIndices>(
100,
StoredBlockBodyIndices { tx_count: 5, ..Default::default() },
)
.unwrap();
// setup changeset that are going to be applied to history index
tx.put::<tables::AccountChangeSets>(20, acc()).unwrap();
tx.put::<tables::AccountChangeSets>(36, acc()).unwrap();
tx.put::<tables::AccountChangeSets>(100, acc()).unwrap();
Ok(())
})
.unwrap();
// run
let input = ExecInput { target: Some(20000), ..Default::default() };
let mut stage = IndexAccountHistoryStage {
prune_mode: Some(PruneMode::Before(36)),
..Default::default()
};
let provider = db.factory.database_provider_rw().unwrap();
let out = stage.execute(&provider, input).unwrap();
assert_eq!(out, ExecOutput { checkpoint: StageCheckpoint::new(20000), done: true });
provider.commit().unwrap();
// verify
let table = cast(db.table::<tables::AccountsHistory>().unwrap());
assert_eq!(table, BTreeMap::from([(shard(u64::MAX), vec![36, 100])]));
// unwind
unwind(&db, 20000, 0);
// verify initial state
let table = db.table::<tables::AccountsHistory>().unwrap();
assert!(table.is_empty());
}
stage_test_suite_ext!(IndexAccountHistoryTestRunner, index_account_history);
struct IndexAccountHistoryTestRunner {
pub(crate) db: TestStageDB,
commit_threshold: u64,
prune_mode: Option<PruneMode>,
}
impl Default for IndexAccountHistoryTestRunner {
fn default() -> Self {
Self { db: TestStageDB::default(), commit_threshold: 1000, prune_mode: None }
}
}
impl StageTestRunner for IndexAccountHistoryTestRunner {
type S = IndexAccountHistoryStage;
fn db(&self) -> &TestStageDB {
&self.db
}
fn stage(&self) -> Self::S {
Self::S {
commit_threshold: self.commit_threshold,
prune_mode: self.prune_mode,
etl_config: EtlConfig::default(),
}
}
}
impl ExecuteStageTestRunner for IndexAccountHistoryTestRunner {
type Seed = ();
fn seed_execution(&mut self, input: ExecInput) -> Result<Self::Seed, TestRunnerError> {
let stage_process = input.checkpoint().block_number;
let start = stage_process + 1;
let end = input.target();
let mut rng = generators::rng();
let num_of_accounts = 31;
let accounts = random_contract_account_range(&mut rng, &mut (0..num_of_accounts))
.into_iter()
.collect::<BTreeMap<_, _>>();
let blocks = random_block_range(
&mut rng,
start..=end,
BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..3, ..Default::default() },
);
let (changesets, _) = random_changeset_range(
&mut rng,
blocks.iter(),
accounts.into_iter().map(|(addr, acc)| (addr, (acc, Vec::new()))),
0..3,
0..256,
);
// add block changeset from block 1.
self.db.insert_changesets(changesets, Some(start))?;
Ok(())
}
fn validate_execution(
&self,
input: ExecInput,
output: Option<ExecOutput>,
) -> Result<(), TestRunnerError> {
if let Some(output) = output {
let start_block = input.next_block();
let end_block = output.checkpoint.block_number;
if start_block > end_block {
return Ok(())
}
assert_eq!(
output,
ExecOutput { checkpoint: StageCheckpoint::new(input.target()), done: true }
);
let provider = self.db.factory.provider()?;
let mut changeset_cursor =
provider.tx_ref().cursor_read::<tables::AccountChangeSets>()?;
let account_transitions =
changeset_cursor.walk_range(start_block..=end_block)?.try_fold(
BTreeMap::new(),
|mut accounts: BTreeMap<Address, Vec<u64>>,
entry|
-> Result<_, TestRunnerError> {
let (index, account) = entry?;
accounts.entry(account.address).or_default().push(index);
Ok(accounts)
},
)?;
let mut result = BTreeMap::new();
for (address, indices) in account_transitions {
// chunk indices and insert them in shards of N size.
let mut chunks = indices
.iter()
.chunks(sharded_key::NUM_OF_INDICES_IN_SHARD)
.into_iter()
.map(|chunks| chunks.copied().collect::<Vec<_>>())
.collect::<Vec<Vec<_>>>();
let last_chunk = chunks.pop();
for list in chunks {
result.insert(
ShardedKey::new(
address,
*list.last().expect("Chuck does not return empty list")
as BlockNumber,
),
list,
);
}
if let Some(last_list) = last_chunk {
result.insert(ShardedKey::new(address, u64::MAX), last_list);
};
}
let table = cast(self.db.table::<tables::AccountsHistory>().unwrap());
assert_eq!(table, result);
}
Ok(())
}
}
impl UnwindStageTestRunner for IndexAccountHistoryTestRunner {
fn validate_unwind(&self, _input: UnwindInput) -> Result<(), TestRunnerError> {
let table = self.db.table::<tables::AccountsHistory>().unwrap();
assert!(table.is_empty());
Ok(())
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/stages/stages/src/stages/headers.rs | crates/stages/stages/src/stages/headers.rs | use alloy_consensus::BlockHeader;
use alloy_primitives::{BlockHash, BlockNumber, Bytes, B256};
use futures_util::StreamExt;
use reth_config::config::EtlConfig;
use reth_db_api::{
cursor::{DbCursorRO, DbCursorRW},
table::Value,
tables,
transaction::{DbTx, DbTxMut},
DbTxUnwindExt, RawKey, RawTable, RawValue,
};
use reth_etl::Collector;
use reth_network_p2p::headers::{
downloader::{HeaderDownloader, HeaderSyncGap, SyncTarget},
error::HeadersDownloaderError,
};
use reth_primitives_traits::{serde_bincode_compat, FullBlockHeader, NodePrimitives, SealedHeader};
use reth_provider::{
providers::StaticFileWriter, BlockHashReader, DBProvider, HeaderProvider,
HeaderSyncGapProvider, StaticFileProviderFactory,
};
use reth_stages_api::{
CheckpointBlockRange, EntitiesCheckpoint, ExecInput, ExecOutput, HeadersCheckpoint, Stage,
StageCheckpoint, StageError, StageId, UnwindInput, UnwindOutput,
};
use reth_static_file_types::StaticFileSegment;
use reth_storage_errors::provider::ProviderError;
use std::task::{ready, Context, Poll};
use tokio::sync::watch;
use tracing::*;
/// The headers stage.
///
/// The headers stage downloads all block headers from the highest block in storage to
/// the perceived highest block on the network.
///
/// The headers are processed and data is inserted into static files, as well as into the
/// [`HeaderNumbers`][reth_db_api::tables::HeaderNumbers] table.
///
/// NOTE: This stage downloads headers in reverse and pushes them to the ETL [`Collector`]. It then
/// proceeds to push them sequentially to static files. The stage checkpoint is not updated until
/// this stage is done.
#[derive(Debug)]
pub struct HeaderStage<Provider, Downloader: HeaderDownloader> {
/// Database handle.
provider: Provider,
/// Strategy for downloading the headers
downloader: Downloader,
/// The tip for the stage.
///
/// This determines the sync target of the stage (set by the pipeline).
tip: watch::Receiver<B256>,
/// Current sync gap.
sync_gap: Option<HeaderSyncGap<Downloader::Header>>,
/// ETL collector with `HeaderHash` -> `BlockNumber`
hash_collector: Collector<BlockHash, BlockNumber>,
/// ETL collector with `BlockNumber` -> `BincodeSealedHeader`
header_collector: Collector<BlockNumber, Bytes>,
/// Returns true if the ETL collector has all necessary headers to fill the gap.
is_etl_ready: bool,
}
// === impl HeaderStage ===
impl<Provider, Downloader> HeaderStage<Provider, Downloader>
where
Downloader: HeaderDownloader,
{
/// Create a new header stage
pub fn new(
database: Provider,
downloader: Downloader,
tip: watch::Receiver<B256>,
etl_config: EtlConfig,
) -> Self {
Self {
provider: database,
downloader,
tip,
sync_gap: None,
hash_collector: Collector::new(etl_config.file_size / 2, etl_config.dir.clone()),
header_collector: Collector::new(etl_config.file_size / 2, etl_config.dir),
is_etl_ready: false,
}
}
/// Write downloaded headers to storage from ETL.
///
/// Writes to static files ( `Header | HeaderTD | HeaderHash` ) and [`tables::HeaderNumbers`]
/// database table.
fn write_headers<P>(&mut self, provider: &P) -> Result<BlockNumber, StageError>
where
P: DBProvider<Tx: DbTxMut> + StaticFileProviderFactory,
Downloader: HeaderDownloader<Header = <P::Primitives as NodePrimitives>::BlockHeader>,
<P::Primitives as NodePrimitives>::BlockHeader: Value + FullBlockHeader,
{
let total_headers = self.header_collector.len();
info!(target: "sync::stages::headers", total = total_headers, "Writing headers");
let static_file_provider = provider.static_file_provider();
// Consistency check of expected headers in static files vs DB is done on provider::sync_gap
// when poll_execute_ready is polled.
let mut last_header_number = static_file_provider
.get_highest_static_file_block(StaticFileSegment::Headers)
.unwrap_or_default();
// Find the latest total difficulty
let mut td = static_file_provider
.header_td_by_number(last_header_number)?
.ok_or(ProviderError::TotalDifficultyNotFound(last_header_number))?;
// Although headers were downloaded in reverse order, the collector iterates it in ascending
// order
let mut writer = static_file_provider.latest_writer(StaticFileSegment::Headers)?;
let interval = (total_headers / 10).max(1);
for (index, header) in self.header_collector.iter()?.enumerate() {
let (_, header_buf) = header?;
if index > 0 && index.is_multiple_of(interval) && total_headers > 100 {
info!(target: "sync::stages::headers", progress = %format!("{:.2}%", (index as f64 / total_headers as f64) * 100.0), "Writing headers");
}
let sealed_header: SealedHeader<Downloader::Header> =
bincode::deserialize::<serde_bincode_compat::SealedHeader<'_, _>>(&header_buf)
.map_err(|err| StageError::Fatal(Box::new(err)))?
.into();
let (header, header_hash) = sealed_header.split_ref();
if header.number() == 0 {
continue
}
last_header_number = header.number();
// Increase total difficulty
td += header.difficulty();
// Append to Headers segment
writer.append_header(header, td, header_hash)?;
}
info!(target: "sync::stages::headers", total = total_headers, "Writing headers hash index");
let mut cursor_header_numbers =
provider.tx_ref().cursor_write::<RawTable<tables::HeaderNumbers>>()?;
let mut first_sync = false;
// If we only have the genesis block hash, then we are at first sync, and we can remove it,
// add it to the collector and use tx.append on all hashes.
if provider.tx_ref().entries::<RawTable<tables::HeaderNumbers>>()? == 1 {
if let Some((hash, block_number)) = cursor_header_numbers.last()? {
if block_number.value()? == 0 {
self.hash_collector.insert(hash.key()?, 0)?;
cursor_header_numbers.delete_current()?;
first_sync = true;
}
}
}
// Since ETL sorts all entries by hashes, we are either appending (first sync) or inserting
// in order (further syncs).
for (index, hash_to_number) in self.hash_collector.iter()?.enumerate() {
let (hash, number) = hash_to_number?;
if index > 0 && index.is_multiple_of(interval) && total_headers > 100 {
info!(target: "sync::stages::headers", progress = %format!("{:.2}%", (index as f64 / total_headers as f64) * 100.0), "Writing headers hash index");
}
if first_sync {
cursor_header_numbers.append(
RawKey::<BlockHash>::from_vec(hash),
&RawValue::<BlockNumber>::from_vec(number),
)?;
} else {
cursor_header_numbers.upsert(
RawKey::<BlockHash>::from_vec(hash),
&RawValue::<BlockNumber>::from_vec(number),
)?;
}
}
Ok(last_header_number)
}
}
impl<Provider, P, D> Stage<Provider> for HeaderStage<P, D>
where
Provider: DBProvider<Tx: DbTxMut> + StaticFileProviderFactory,
P: HeaderSyncGapProvider<Header = <Provider::Primitives as NodePrimitives>::BlockHeader>,
D: HeaderDownloader<Header = <Provider::Primitives as NodePrimitives>::BlockHeader>,
<Provider::Primitives as NodePrimitives>::BlockHeader: FullBlockHeader + Value,
{
/// Return the id of the stage
fn id(&self) -> StageId {
StageId::Headers
}
fn poll_execute_ready(
&mut self,
cx: &mut Context<'_>,
input: ExecInput,
) -> Poll<Result<(), StageError>> {
let current_checkpoint = input.checkpoint();
// Return if stage has already completed the gap on the ETL files
if self.is_etl_ready {
return Poll::Ready(Ok(()))
}
// Lookup the head and tip of the sync range
let local_head = self.provider.local_tip_header(current_checkpoint.block_number)?;
let target = SyncTarget::Tip(*self.tip.borrow());
let gap = HeaderSyncGap { local_head, target };
let tip = gap.target.tip();
// Nothing to sync
if gap.is_closed() {
info!(
target: "sync::stages::headers",
checkpoint = %current_checkpoint.block_number,
target = ?tip,
"Target block already reached"
);
self.is_etl_ready = true;
self.sync_gap = Some(gap);
return Poll::Ready(Ok(()))
}
debug!(target: "sync::stages::headers", ?tip, head = ?gap.local_head.hash(), "Commencing sync");
let local_head_number = gap.local_head.number();
// let the downloader know what to sync
if self.sync_gap != Some(gap.clone()) {
self.sync_gap = Some(gap.clone());
self.downloader.update_sync_gap(gap.local_head, gap.target);
}
// We only want to stop once we have all the headers on ETL filespace (disk).
loop {
match ready!(self.downloader.poll_next_unpin(cx)) {
Some(Ok(headers)) => {
info!(target: "sync::stages::headers", total = headers.len(), from_block = headers.first().map(|h| h.number()), to_block = headers.last().map(|h| h.number()), "Received headers");
for header in headers {
let header_number = header.number();
self.hash_collector.insert(header.hash(), header_number)?;
self.header_collector.insert(
header_number,
Bytes::from(
bincode::serialize(&serde_bincode_compat::SealedHeader::from(
&header,
))
.map_err(|err| StageError::Fatal(Box::new(err)))?,
),
)?;
// Headers are downloaded in reverse, so if we reach here, we know we have
// filled the gap.
if header_number == local_head_number + 1 {
self.is_etl_ready = true;
return Poll::Ready(Ok(()))
}
}
}
Some(Err(HeadersDownloaderError::DetachedHead { local_head, header, error })) => {
error!(target: "sync::stages::headers", %error, "Cannot attach header to head");
self.sync_gap = None;
return Poll::Ready(Err(StageError::DetachedHead {
local_head: Box::new(local_head.block_with_parent()),
header: Box::new(header.block_with_parent()),
error,
}))
}
None => {
self.sync_gap = None;
return Poll::Ready(Err(StageError::ChannelClosed))
}
}
}
}
/// Download the headers in reverse order (falling block numbers)
/// starting from the tip of the chain
fn execute(&mut self, provider: &Provider, input: ExecInput) -> Result<ExecOutput, StageError> {
let current_checkpoint = input.checkpoint();
if self.sync_gap.take().ok_or(StageError::MissingSyncGap)?.is_closed() {
self.is_etl_ready = false;
return Ok(ExecOutput::done(current_checkpoint))
}
// We should be here only after we have downloaded all headers into the disk buffer (ETL).
if !self.is_etl_ready {
return Err(StageError::MissingDownloadBuffer)
}
// Reset flag
self.is_etl_ready = false;
// Write the headers and related tables to DB from ETL space
let to_be_processed = self.hash_collector.len() as u64;
let last_header_number = self.write_headers(provider)?;
// Clear ETL collectors
self.hash_collector.clear();
self.header_collector.clear();
Ok(ExecOutput {
checkpoint: StageCheckpoint::new(last_header_number).with_headers_stage_checkpoint(
HeadersCheckpoint {
block_range: CheckpointBlockRange {
from: input.checkpoint().block_number,
to: last_header_number,
},
progress: EntitiesCheckpoint {
processed: input.checkpoint().block_number + to_be_processed,
total: last_header_number,
},
},
),
// We only reach here if all headers have been downloaded by ETL, and pushed to DB all
// in one stage run.
done: true,
})
}
/// Unwind the stage.
fn unwind(
&mut self,
provider: &Provider,
input: UnwindInput,
) -> Result<UnwindOutput, StageError> {
self.sync_gap.take();
// First unwind the db tables, until the unwind_to block number. use the walker to unwind
// HeaderNumbers based on the index in CanonicalHeaders
// unwind from the next block number since the unwind_to block is exclusive
provider
.tx_ref()
.unwind_table_by_walker::<tables::CanonicalHeaders, tables::HeaderNumbers>(
(input.unwind_to + 1)..,
)?;
provider.tx_ref().unwind_table_by_num::<tables::CanonicalHeaders>(input.unwind_to)?;
provider
.tx_ref()
.unwind_table_by_num::<tables::HeaderTerminalDifficulties>(input.unwind_to)?;
let unfinalized_headers_unwound =
provider.tx_ref().unwind_table_by_num::<tables::Headers>(input.unwind_to)?;
// determine how many headers to unwind from the static files based on the highest block and
// the unwind_to block
let static_file_provider = provider.static_file_provider();
let highest_block = static_file_provider
.get_highest_static_file_block(StaticFileSegment::Headers)
.unwrap_or_default();
let static_file_headers_to_unwind = highest_block - input.unwind_to;
for block_number in (input.unwind_to + 1)..=highest_block {
let hash = static_file_provider.block_hash(block_number)?;
// we have to delete from HeaderNumbers here as well as in the above unwind, since that
// mapping contains entries for both headers in the db and headers in static files
//
// so if we are unwinding past the lowest block in the db, we have to iterate through
// the HeaderNumbers entries that we'll delete in static files below
if let Some(header_hash) = hash {
provider.tx_ref().delete::<tables::HeaderNumbers>(header_hash, None)?;
}
}
// Now unwind the static files until the unwind_to block number
let mut writer = static_file_provider.latest_writer(StaticFileSegment::Headers)?;
writer.prune_headers(static_file_headers_to_unwind)?;
// Set the stage checkpoint entities processed based on how much we unwound - we add the
// headers unwound from static files and db
let stage_checkpoint =
input.checkpoint.headers_stage_checkpoint().map(|stage_checkpoint| HeadersCheckpoint {
block_range: stage_checkpoint.block_range,
progress: EntitiesCheckpoint {
processed: stage_checkpoint.progress.processed.saturating_sub(
static_file_headers_to_unwind + unfinalized_headers_unwound as u64,
),
total: stage_checkpoint.progress.total,
},
});
let mut checkpoint = StageCheckpoint::new(input.unwind_to);
if let Some(stage_checkpoint) = stage_checkpoint {
checkpoint = checkpoint.with_headers_stage_checkpoint(stage_checkpoint);
}
Ok(UnwindOutput { checkpoint })
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test_utils::{
stage_test_suite, ExecuteStageTestRunner, StageTestRunner, UnwindStageTestRunner,
};
use alloy_primitives::B256;
use assert_matches::assert_matches;
use reth_ethereum_primitives::BlockBody;
use reth_execution_types::ExecutionOutcome;
use reth_primitives_traits::{RecoveredBlock, SealedBlock};
use reth_provider::{BlockWriter, ProviderFactory, StaticFileProviderFactory};
use reth_stages_api::StageUnitCheckpoint;
use reth_testing_utils::generators::{self, random_header, random_header_range};
use reth_trie::{updates::TrieUpdates, HashedPostStateSorted};
use std::sync::Arc;
use test_runner::HeadersTestRunner;
mod test_runner {
use super::*;
use crate::test_utils::{TestRunnerError, TestStageDB};
use reth_consensus::test_utils::TestConsensus;
use reth_downloaders::headers::reverse_headers::{
ReverseHeadersDownloader, ReverseHeadersDownloaderBuilder,
};
use reth_network_p2p::test_utils::{TestHeaderDownloader, TestHeadersClient};
use reth_provider::{test_utils::MockNodeTypesWithDB, BlockNumReader};
use tokio::sync::watch;
pub(crate) struct HeadersTestRunner<D: HeaderDownloader> {
pub(crate) client: TestHeadersClient,
channel: (watch::Sender<B256>, watch::Receiver<B256>),
downloader_factory: Box<dyn Fn() -> D + Send + Sync + 'static>,
db: TestStageDB,
}
impl Default for HeadersTestRunner<TestHeaderDownloader> {
fn default() -> Self {
let client = TestHeadersClient::default();
Self {
client: client.clone(),
channel: watch::channel(B256::ZERO),
downloader_factory: Box::new(move || {
TestHeaderDownloader::new(client.clone(), 1000, 1000)
}),
db: TestStageDB::default(),
}
}
}
impl<D: HeaderDownloader<Header = alloy_consensus::Header> + 'static> StageTestRunner
for HeadersTestRunner<D>
{
type S = HeaderStage<ProviderFactory<MockNodeTypesWithDB>, D>;
fn db(&self) -> &TestStageDB {
&self.db
}
fn stage(&self) -> Self::S {
HeaderStage::new(
self.db.factory.clone(),
(*self.downloader_factory)(),
self.channel.1.clone(),
EtlConfig::default(),
)
}
}
impl<D: HeaderDownloader<Header = alloy_consensus::Header> + 'static> ExecuteStageTestRunner
for HeadersTestRunner<D>
{
type Seed = Vec<SealedHeader>;
fn seed_execution(&mut self, input: ExecInput) -> Result<Self::Seed, TestRunnerError> {
let mut rng = generators::rng();
let start = input.checkpoint().block_number;
let headers = random_header_range(&mut rng, 0..start + 1, B256::ZERO);
let head = headers.last().cloned().unwrap();
self.db.insert_headers_with_td(headers.iter())?;
// use previous checkpoint as seed size
let end = input.target.unwrap_or_default() + 1;
if start + 1 >= end {
return Ok(Vec::default())
}
let mut headers = random_header_range(&mut rng, start + 1..end, head.hash());
headers.insert(0, head);
Ok(headers)
}
/// Validate stored headers
fn validate_execution(
&self,
input: ExecInput,
output: Option<ExecOutput>,
) -> Result<(), TestRunnerError> {
let initial_checkpoint = input.checkpoint().block_number;
match output {
Some(output) if output.checkpoint.block_number > initial_checkpoint => {
let provider = self.db.factory.provider()?;
let mut td = provider
.header_td_by_number(initial_checkpoint.saturating_sub(1))?
.unwrap_or_default();
for block_num in initial_checkpoint..output.checkpoint.block_number {
// look up the header hash
let hash = provider.block_hash(block_num)?.expect("no header hash");
// validate the header number
assert_eq!(provider.block_number(hash)?, Some(block_num));
// validate the header
let header = provider.header_by_number(block_num)?;
assert!(header.is_some());
let header = SealedHeader::seal_slow(header.unwrap());
assert_eq!(header.hash(), hash);
// validate the header total difficulty
td += header.difficulty;
assert_eq!(provider.header_td_by_number(block_num)?, Some(td));
}
}
_ => self.check_no_header_entry_above(initial_checkpoint)?,
};
Ok(())
}
async fn after_execution(&self, headers: Self::Seed) -> Result<(), TestRunnerError> {
self.client.extend(headers.iter().map(|h| h.clone_header())).await;
let tip = if headers.is_empty() {
let tip = random_header(&mut generators::rng(), 0, None);
self.db.insert_headers(std::iter::once(&tip))?;
tip.hash()
} else {
headers.last().unwrap().hash()
};
self.send_tip(tip);
Ok(())
}
}
impl<D: HeaderDownloader<Header = alloy_consensus::Header> + 'static> UnwindStageTestRunner
for HeadersTestRunner<D>
{
fn validate_unwind(&self, input: UnwindInput) -> Result<(), TestRunnerError> {
self.check_no_header_entry_above(input.unwind_to)
}
}
impl HeadersTestRunner<ReverseHeadersDownloader<TestHeadersClient>> {
pub(crate) fn with_linear_downloader() -> Self {
let client = TestHeadersClient::default();
Self {
client: client.clone(),
channel: watch::channel(B256::ZERO),
downloader_factory: Box::new(move || {
ReverseHeadersDownloaderBuilder::default()
.stream_batch_size(500)
.build(client.clone(), Arc::new(TestConsensus::default()))
}),
db: TestStageDB::default(),
}
}
}
impl<D: HeaderDownloader> HeadersTestRunner<D> {
pub(crate) fn check_no_header_entry_above(
&self,
block: BlockNumber,
) -> Result<(), TestRunnerError> {
self.db
.ensure_no_entry_above_by_value::<tables::HeaderNumbers, _>(block, |val| val)?;
self.db.ensure_no_entry_above::<tables::CanonicalHeaders, _>(block, |key| key)?;
self.db.ensure_no_entry_above::<tables::Headers, _>(block, |key| key)?;
self.db.ensure_no_entry_above::<tables::HeaderTerminalDifficulties, _>(
block,
|num| num,
)?;
Ok(())
}
pub(crate) fn send_tip(&self, tip: B256) {
self.channel.0.send(tip).expect("failed to send tip");
}
}
}
stage_test_suite!(HeadersTestRunner, headers);
/// Execute the stage with linear downloader, unwinds, and ensures that the database tables
/// along with the static files are cleaned up.
#[tokio::test]
async fn execute_with_linear_downloader_unwind() {
let mut runner = HeadersTestRunner::with_linear_downloader();
let (checkpoint, previous_stage) = (1000, 1200);
let input = ExecInput {
target: Some(previous_stage),
checkpoint: Some(StageCheckpoint::new(checkpoint)),
};
let headers = runner.seed_execution(input).expect("failed to seed execution");
let rx = runner.execute(input);
runner.client.extend(headers.iter().rev().map(|h| h.clone_header())).await;
// skip `after_execution` hook for linear downloader
let tip = headers.last().unwrap();
runner.send_tip(tip.hash());
let result = rx.await.unwrap();
runner.db().factory.static_file_provider().commit().unwrap();
assert_matches!(result, Ok(ExecOutput { checkpoint: StageCheckpoint {
block_number,
stage_checkpoint: Some(StageUnitCheckpoint::Headers(HeadersCheckpoint {
block_range: CheckpointBlockRange {
from,
to
},
progress: EntitiesCheckpoint {
processed,
total,
}
}))
}, done: true }) if block_number == tip.number &&
from == checkpoint && to == previous_stage &&
// -1 because we don't need to download the local head
processed == checkpoint + headers.len() as u64 - 1 && total == tip.number
);
assert!(runner.validate_execution(input, result.ok()).is_ok(), "validation failed");
assert!(runner.stage().hash_collector.is_empty());
assert!(runner.stage().header_collector.is_empty());
// let's insert some blocks using append_blocks_with_state
let sealed_headers =
random_header_range(&mut generators::rng(), tip.number..tip.number + 10, tip.hash());
// make them sealed blocks with senders by converting them to empty blocks
let sealed_blocks = sealed_headers
.iter()
.map(|header| {
RecoveredBlock::new_sealed(
SealedBlock::from_sealed_parts(header.clone(), BlockBody::default()),
vec![],
)
})
.collect();
// append the blocks
let provider = runner.db().factory.provider_rw().unwrap();
provider
.append_blocks_with_state(
sealed_blocks,
&ExecutionOutcome::default(),
HashedPostStateSorted::default(),
TrieUpdates::default(),
)
.unwrap();
provider.commit().unwrap();
// now we can unwind 10 blocks
let unwind_input = UnwindInput {
checkpoint: StageCheckpoint::new(tip.number + 10),
unwind_to: tip.number,
bad_block: None,
};
let unwind_output = runner.unwind(unwind_input).await.unwrap();
assert_eq!(unwind_output.checkpoint.block_number, tip.number);
// validate the unwind, ensure that the tables are cleaned up
assert!(runner.validate_unwind(unwind_input).is_ok());
}
/// Execute the stage with linear downloader
#[tokio::test]
async fn execute_with_linear_downloader() {
let mut runner = HeadersTestRunner::with_linear_downloader();
let (checkpoint, previous_stage) = (1000, 1200);
let input = ExecInput {
target: Some(previous_stage),
checkpoint: Some(StageCheckpoint::new(checkpoint)),
};
let headers = runner.seed_execution(input).expect("failed to seed execution");
let rx = runner.execute(input);
runner.client.extend(headers.iter().rev().map(|h| h.clone_header())).await;
// skip `after_execution` hook for linear downloader
let tip = headers.last().unwrap();
runner.send_tip(tip.hash());
let result = rx.await.unwrap();
runner.db().factory.static_file_provider().commit().unwrap();
assert_matches!(result, Ok(ExecOutput { checkpoint: StageCheckpoint {
block_number,
stage_checkpoint: Some(StageUnitCheckpoint::Headers(HeadersCheckpoint {
block_range: CheckpointBlockRange {
from,
to
},
progress: EntitiesCheckpoint {
processed,
total,
}
}))
}, done: true }) if block_number == tip.number &&
from == checkpoint && to == previous_stage &&
// -1 because we don't need to download the local head
processed == checkpoint + headers.len() as u64 - 1 && total == tip.number
);
assert!(runner.validate_execution(input, result.ok()).is_ok(), "validation failed");
assert!(runner.stage().hash_collector.is_empty());
assert!(runner.stage().header_collector.is_empty());
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/stages/stages/src/stages/tx_lookup.rs | crates/stages/stages/src/stages/tx_lookup.rs | use alloy_eips::eip2718::Encodable2718;
use alloy_primitives::{TxHash, TxNumber};
use num_traits::Zero;
use reth_config::config::{EtlConfig, TransactionLookupConfig};
use reth_db_api::{
cursor::{DbCursorRO, DbCursorRW},
table::Value,
tables,
transaction::DbTxMut,
RawKey, RawValue,
};
use reth_etl::Collector;
use reth_primitives_traits::{NodePrimitives, SignedTransaction};
use reth_provider::{
BlockReader, DBProvider, PruneCheckpointReader, PruneCheckpointWriter,
StaticFileProviderFactory, StatsReader, TransactionsProvider, TransactionsProviderExt,
};
use reth_prune_types::{PruneCheckpoint, PruneMode, PrunePurpose, PruneSegment};
use reth_stages_api::{
EntitiesCheckpoint, ExecInput, ExecOutput, Stage, StageCheckpoint, StageError, StageId,
UnwindInput, UnwindOutput,
};
use reth_storage_errors::provider::ProviderError;
use tracing::*;
/// The transaction lookup stage.
///
/// This stage walks over existing transactions, and sets the transaction hash of each transaction
/// in a block to the corresponding `BlockNumber` at each block. This is written to the
/// [`tables::TransactionHashNumbers`] This is used for looking up changesets via the transaction
/// hash.
///
/// It uses [`reth_etl::Collector`] to collect all entries before finally writing them to disk.
#[derive(Debug, Clone)]
pub struct TransactionLookupStage {
/// The maximum number of lookup entries to hold in memory before pushing them to
/// [`reth_etl::Collector`].
chunk_size: u64,
etl_config: EtlConfig,
prune_mode: Option<PruneMode>,
}
impl Default for TransactionLookupStage {
fn default() -> Self {
Self { chunk_size: 5_000_000, etl_config: EtlConfig::default(), prune_mode: None }
}
}
impl TransactionLookupStage {
/// Create new instance of [`TransactionLookupStage`].
pub const fn new(
config: TransactionLookupConfig,
etl_config: EtlConfig,
prune_mode: Option<PruneMode>,
) -> Self {
Self { chunk_size: config.chunk_size, etl_config, prune_mode }
}
}
impl<Provider> Stage<Provider> for TransactionLookupStage
where
Provider: DBProvider<Tx: DbTxMut>
+ PruneCheckpointWriter
+ BlockReader
+ PruneCheckpointReader
+ StatsReader
+ StaticFileProviderFactory<Primitives: NodePrimitives<SignedTx: Value + SignedTransaction>>
+ TransactionsProviderExt,
{
/// Return the id of the stage
fn id(&self) -> StageId {
StageId::TransactionLookup
}
/// Write transaction hash -> id entries
fn execute(
&mut self,
provider: &Provider,
mut input: ExecInput,
) -> Result<ExecOutput, StageError> {
if let Some((target_prunable_block, prune_mode)) = self
.prune_mode
.map(|mode| {
mode.prune_target_block(
input.target(),
PruneSegment::TransactionLookup,
PrunePurpose::User,
)
})
.transpose()?
.flatten()
{
if target_prunable_block > input.checkpoint().block_number {
input.checkpoint = Some(StageCheckpoint::new(target_prunable_block));
// Save prune checkpoint only if we don't have one already.
// Otherwise, pruner may skip the unpruned range of blocks.
if provider.get_prune_checkpoint(PruneSegment::TransactionLookup)?.is_none() {
let target_prunable_tx_number = provider
.block_body_indices(target_prunable_block)?
.ok_or(ProviderError::BlockBodyIndicesNotFound(target_prunable_block))?
.last_tx_num();
provider.save_prune_checkpoint(
PruneSegment::TransactionLookup,
PruneCheckpoint {
block_number: Some(target_prunable_block),
tx_number: Some(target_prunable_tx_number),
prune_mode,
},
)?;
}
}
}
if input.target_reached() {
return Ok(ExecOutput::done(input.checkpoint()));
}
// 500MB temporary files
let mut hash_collector: Collector<TxHash, TxNumber> =
Collector::new(self.etl_config.file_size, self.etl_config.dir.clone());
info!(
target: "sync::stages::transaction_lookup",
tx_range = ?input.checkpoint().block_number..=input.target(),
"Updating transaction lookup"
);
loop {
let (tx_range, block_range, is_final_range) =
input.next_block_range_with_transaction_threshold(provider, self.chunk_size)?;
let end_block = *block_range.end();
info!(target: "sync::stages::transaction_lookup", ?tx_range, "Calculating transaction hashes");
for (key, value) in provider.transaction_hashes_by_range(tx_range)? {
hash_collector.insert(key, value)?;
}
input.checkpoint = Some(
StageCheckpoint::new(end_block)
.with_entities_stage_checkpoint(stage_checkpoint(provider)?),
);
if is_final_range {
let append_only =
provider.count_entries::<tables::TransactionHashNumbers>()?.is_zero();
let mut txhash_cursor = provider
.tx_ref()
.cursor_write::<tables::RawTable<tables::TransactionHashNumbers>>()?;
let total_hashes = hash_collector.len();
let interval = (total_hashes / 10).max(1);
for (index, hash_to_number) in hash_collector.iter()?.enumerate() {
let (hash, number) = hash_to_number?;
if index > 0 && index.is_multiple_of(interval) {
info!(
target: "sync::stages::transaction_lookup",
?append_only,
progress = %format!("{:.2}%", (index as f64 / total_hashes as f64) * 100.0),
"Inserting hashes"
);
}
let key = RawKey::<TxHash>::from_vec(hash);
if append_only {
txhash_cursor.append(key, &RawValue::<TxNumber>::from_vec(number))?
} else {
txhash_cursor.insert(key, &RawValue::<TxNumber>::from_vec(number))?
}
}
trace!(target: "sync::stages::transaction_lookup",
total_hashes,
"Transaction hashes inserted"
);
break;
}
}
Ok(ExecOutput {
checkpoint: StageCheckpoint::new(input.target())
.with_entities_stage_checkpoint(stage_checkpoint(provider)?),
done: true,
})
}
/// Unwind the stage.
fn unwind(
&mut self,
provider: &Provider,
input: UnwindInput,
) -> Result<UnwindOutput, StageError> {
let tx = provider.tx_ref();
let (range, unwind_to, _) = input.unwind_block_range_with_threshold(self.chunk_size);
// Cursor to unwind tx hash to number
let mut tx_hash_number_cursor = tx.cursor_write::<tables::TransactionHashNumbers>()?;
let static_file_provider = provider.static_file_provider();
let rev_walker = provider
.block_body_indices_range(range.clone())?
.into_iter()
.zip(range.collect::<Vec<_>>())
.rev();
for (body, number) in rev_walker {
if number <= unwind_to {
break;
}
// Delete all transactions that belong to this block
for tx_id in body.tx_num_range() {
// First delete the transaction and hash to id mapping
if let Some(transaction) = static_file_provider.transaction_by_id(tx_id)? {
if tx_hash_number_cursor.seek_exact(transaction.trie_hash())?.is_some() {
tx_hash_number_cursor.delete_current()?;
}
}
}
}
Ok(UnwindOutput {
checkpoint: StageCheckpoint::new(unwind_to)
.with_entities_stage_checkpoint(stage_checkpoint(provider)?),
})
}
}
fn stage_checkpoint<Provider>(provider: &Provider) -> Result<EntitiesCheckpoint, StageError>
where
Provider: PruneCheckpointReader + StaticFileProviderFactory + StatsReader,
{
let pruned_entries = provider
.get_prune_checkpoint(PruneSegment::TransactionLookup)?
.and_then(|checkpoint| checkpoint.tx_number)
// `+1` is needed because `TxNumber` is 0-indexed
.map(|tx_number| tx_number + 1)
.unwrap_or_default();
Ok(EntitiesCheckpoint {
// If `TransactionHashNumbers` table was pruned, we will have a number of entries in it not
// matching the actual number of processed transactions. To fix that, we add the
// number of pruned `TransactionHashNumbers` entries.
processed: provider.count_entries::<tables::TransactionHashNumbers>()? as u64 +
pruned_entries,
// Count only static files entries. If we count the database entries too, we may have
// duplicates. We're sure that the static files have all entries that database has,
// because we run the `StaticFileProducer` before starting the pipeline.
total: provider.static_file_provider().count_entries::<tables::Transactions>()? as u64,
})
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test_utils::{
stage_test_suite_ext, ExecuteStageTestRunner, StageTestRunner, StorageKind,
TestRunnerError, TestStageDB, UnwindStageTestRunner,
};
use alloy_primitives::{BlockNumber, B256};
use assert_matches::assert_matches;
use reth_db_api::transaction::DbTx;
use reth_ethereum_primitives::Block;
use reth_primitives_traits::SealedBlock;
use reth_provider::{
providers::StaticFileWriter, BlockBodyIndicesProvider, DatabaseProviderFactory,
StaticFileProviderFactory,
};
use reth_stages_api::StageUnitCheckpoint;
use reth_testing_utils::generators::{
self, random_block, random_block_range, BlockParams, BlockRangeParams,
};
use std::ops::Sub;
// Implement stage test suite.
stage_test_suite_ext!(TransactionLookupTestRunner, transaction_lookup);
#[tokio::test]
async fn execute_single_transaction_lookup() {
let (previous_stage, stage_progress) = (500, 100);
let mut rng = generators::rng();
// Set up the runner
let runner = TransactionLookupTestRunner::default();
let input = ExecInput {
target: Some(previous_stage),
checkpoint: Some(StageCheckpoint::new(stage_progress)),
};
// Insert blocks with a single transaction at block `stage_progress + 10`
let non_empty_block_number = stage_progress + 10;
let blocks = (stage_progress..=input.target())
.map(|number| {
random_block(
&mut rng,
number,
BlockParams {
tx_count: Some((number == non_empty_block_number) as u8),
..Default::default()
},
)
})
.collect::<Vec<_>>();
runner
.db
.insert_blocks(blocks.iter(), StorageKind::Static)
.expect("failed to insert blocks");
let rx = runner.execute(input);
// Assert the successful result
let result = rx.await.unwrap();
assert_matches!(
result,
Ok(ExecOutput {
checkpoint: StageCheckpoint {
block_number,
stage_checkpoint: Some(StageUnitCheckpoint::Entities(EntitiesCheckpoint {
processed,
total
}))
}, done: true }) if block_number == previous_stage && processed == total &&
total == runner.db.factory.static_file_provider().count_entries::<tables::Transactions>().unwrap() as u64
);
// Validate the stage execution
assert!(runner.validate_execution(input, result.ok()).is_ok(), "execution validation");
}
#[tokio::test]
async fn execute_pruned_transaction_lookup() {
let (previous_stage, prune_target, stage_progress) = (500, 400, 100);
let mut rng = generators::rng();
// Set up the runner
let mut runner = TransactionLookupTestRunner::default();
let input = ExecInput {
target: Some(previous_stage),
checkpoint: Some(StageCheckpoint::new(stage_progress)),
};
// Seed only once with full input range
let seed = random_block_range(
&mut rng,
stage_progress + 1..=previous_stage,
BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..2, ..Default::default() },
);
runner
.db
.insert_blocks(seed.iter(), StorageKind::Static)
.expect("failed to seed execution");
runner.set_prune_mode(PruneMode::Before(prune_target));
let rx = runner.execute(input);
// Assert the successful result
let result = rx.await.unwrap();
assert_matches!(
result,
Ok(ExecOutput {
checkpoint: StageCheckpoint {
block_number,
stage_checkpoint: Some(StageUnitCheckpoint::Entities(EntitiesCheckpoint {
processed,
total
}))
}, done: true }) if block_number == previous_stage && processed == total &&
total == runner.db.factory.static_file_provider().count_entries::<tables::Transactions>().unwrap() as u64
);
// Validate the stage execution
assert!(runner.validate_execution(input, result.ok()).is_ok(), "execution validation");
}
#[test]
fn stage_checkpoint_pruned() {
let db = TestStageDB::default();
let mut rng = generators::rng();
let blocks = random_block_range(
&mut rng,
0..=100,
BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..10, ..Default::default() },
);
db.insert_blocks(blocks.iter(), StorageKind::Static).expect("insert blocks");
let max_pruned_block = 30;
let max_processed_block = 70;
let mut tx_hash_numbers = Vec::new();
let mut tx_hash_number = 0;
for block in &blocks[..=max_processed_block] {
for transaction in &block.body().transactions {
if block.number > max_pruned_block {
tx_hash_numbers.push((*transaction.tx_hash(), tx_hash_number));
}
tx_hash_number += 1;
}
}
db.insert_tx_hash_numbers(tx_hash_numbers).expect("insert tx hash numbers");
let provider = db.factory.provider_rw().unwrap();
provider
.save_prune_checkpoint(
PruneSegment::TransactionLookup,
PruneCheckpoint {
block_number: Some(max_pruned_block),
tx_number: Some(
blocks[..=max_pruned_block as usize]
.iter()
.map(|block| block.transaction_count() as u64)
.sum::<u64>()
.sub(1), // `TxNumber` is 0-indexed
),
prune_mode: PruneMode::Full,
},
)
.expect("save stage checkpoint");
provider.commit().expect("commit");
let provider = db.factory.database_provider_rw().unwrap();
assert_eq!(
stage_checkpoint(&provider).expect("stage checkpoint"),
EntitiesCheckpoint {
processed: blocks[..=max_processed_block]
.iter()
.map(|block| block.transaction_count() as u64)
.sum(),
total: blocks.iter().map(|block| block.transaction_count() as u64).sum()
}
);
}
struct TransactionLookupTestRunner {
db: TestStageDB,
chunk_size: u64,
etl_config: EtlConfig,
prune_mode: Option<PruneMode>,
}
impl Default for TransactionLookupTestRunner {
fn default() -> Self {
Self {
db: TestStageDB::default(),
chunk_size: 1000,
etl_config: EtlConfig::default(),
prune_mode: None,
}
}
}
impl TransactionLookupTestRunner {
fn set_prune_mode(&mut self, prune_mode: PruneMode) {
self.prune_mode = Some(prune_mode);
}
/// # Panics
///
/// 1. If there are any entries in the [`tables::TransactionHashNumbers`] table above a
/// given block number.
/// 2. If there is no requested block entry in the bodies table, but
/// [`tables::TransactionHashNumbers`] is not empty.
fn ensure_no_hash_by_block(&self, number: BlockNumber) -> Result<(), TestRunnerError> {
let body_result = self
.db
.factory
.provider_rw()?
.block_body_indices(number)?
.ok_or(ProviderError::BlockBodyIndicesNotFound(number));
match body_result {
Ok(body) => {
self.db.ensure_no_entry_above_by_value::<tables::TransactionHashNumbers, _>(
body.last_tx_num(),
|key| key,
)?
}
Err(_) => {
assert!(self.db.table_is_empty::<tables::TransactionHashNumbers>()?);
}
};
Ok(())
}
}
impl StageTestRunner for TransactionLookupTestRunner {
type S = TransactionLookupStage;
fn db(&self) -> &TestStageDB {
&self.db
}
fn stage(&self) -> Self::S {
TransactionLookupStage {
chunk_size: self.chunk_size,
etl_config: self.etl_config.clone(),
prune_mode: self.prune_mode,
}
}
}
impl ExecuteStageTestRunner for TransactionLookupTestRunner {
type Seed = Vec<SealedBlock<Block>>;
fn seed_execution(&mut self, input: ExecInput) -> Result<Self::Seed, TestRunnerError> {
let stage_progress = input.checkpoint().block_number;
let end = input.target();
let mut rng = generators::rng();
let blocks = random_block_range(
&mut rng,
stage_progress + 1..=end,
BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..2, ..Default::default() },
);
self.db.insert_blocks(blocks.iter(), StorageKind::Static)?;
Ok(blocks)
}
fn validate_execution(
&self,
mut input: ExecInput,
output: Option<ExecOutput>,
) -> Result<(), TestRunnerError> {
match output {
Some(output) => {
let provider = self.db.factory.provider()?;
if let Some((target_prunable_block, _)) = self
.prune_mode
.map(|mode| {
mode.prune_target_block(
input.target(),
PruneSegment::TransactionLookup,
PrunePurpose::User,
)
})
.transpose()
.expect("prune target block for transaction lookup")
.flatten()
{
if target_prunable_block > input.checkpoint().block_number {
input.checkpoint = Some(StageCheckpoint::new(target_prunable_block));
}
}
let start_block = input.next_block();
let end_block = output.checkpoint.block_number;
if start_block > end_block {
return Ok(())
}
let mut body_cursor =
provider.tx_ref().cursor_read::<tables::BlockBodyIndices>()?;
body_cursor.seek_exact(start_block)?;
while let Some((_, body)) = body_cursor.next()? {
for tx_id in body.tx_num_range() {
let transaction =
provider.transaction_by_id(tx_id)?.expect("no transaction entry");
assert_eq!(
Some(tx_id),
provider.transaction_id(*transaction.tx_hash())?
);
}
}
}
None => self.ensure_no_hash_by_block(input.checkpoint().block_number)?,
};
Ok(())
}
}
impl UnwindStageTestRunner for TransactionLookupTestRunner {
fn validate_unwind(&self, input: UnwindInput) -> Result<(), TestRunnerError> {
self.ensure_no_hash_by_block(input.unwind_to)
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/stages/stages/src/stages/index_storage_history.rs | crates/stages/stages/src/stages/index_storage_history.rs | use super::{collect_history_indices, load_history_indices};
use crate::{StageCheckpoint, StageId};
use reth_config::config::{EtlConfig, IndexHistoryConfig};
use reth_db_api::{
models::{storage_sharded_key::StorageShardedKey, AddressStorageKey, BlockNumberAddress},
table::Decode,
tables,
transaction::DbTxMut,
};
use reth_provider::{DBProvider, HistoryWriter, PruneCheckpointReader, PruneCheckpointWriter};
use reth_prune_types::{PruneCheckpoint, PruneMode, PrunePurpose, PruneSegment};
use reth_stages_api::{ExecInput, ExecOutput, Stage, StageError, UnwindInput, UnwindOutput};
use std::fmt::Debug;
use tracing::info;
/// Stage is indexing history the account changesets generated in
/// [`ExecutionStage`][crate::stages::ExecutionStage]. For more information
/// on index sharding take a look at [`tables::StoragesHistory`].
#[derive(Debug)]
pub struct IndexStorageHistoryStage {
/// Number of blocks after which the control
/// flow will be returned to the pipeline for commit.
pub commit_threshold: u64,
/// Pruning configuration.
pub prune_mode: Option<PruneMode>,
/// ETL configuration
pub etl_config: EtlConfig,
}
impl IndexStorageHistoryStage {
/// Create new instance of [`IndexStorageHistoryStage`].
pub const fn new(
config: IndexHistoryConfig,
etl_config: EtlConfig,
prune_mode: Option<PruneMode>,
) -> Self {
Self { commit_threshold: config.commit_threshold, prune_mode, etl_config }
}
}
impl Default for IndexStorageHistoryStage {
fn default() -> Self {
Self { commit_threshold: 100_000, prune_mode: None, etl_config: EtlConfig::default() }
}
}
impl<Provider> Stage<Provider> for IndexStorageHistoryStage
where
Provider:
DBProvider<Tx: DbTxMut> + PruneCheckpointWriter + HistoryWriter + PruneCheckpointReader,
{
/// Return the id of the stage
fn id(&self) -> StageId {
StageId::IndexStorageHistory
}
/// Execute the stage.
fn execute(
&mut self,
provider: &Provider,
mut input: ExecInput,
) -> Result<ExecOutput, StageError> {
if let Some((target_prunable_block, prune_mode)) = self
.prune_mode
.map(|mode| {
mode.prune_target_block(
input.target(),
PruneSegment::StorageHistory,
PrunePurpose::User,
)
})
.transpose()?
.flatten()
{
if target_prunable_block > input.checkpoint().block_number {
input.checkpoint = Some(StageCheckpoint::new(target_prunable_block));
// Save prune checkpoint only if we don't have one already.
// Otherwise, pruner may skip the unpruned range of blocks.
if provider.get_prune_checkpoint(PruneSegment::StorageHistory)?.is_none() {
provider.save_prune_checkpoint(
PruneSegment::StorageHistory,
PruneCheckpoint {
block_number: Some(target_prunable_block),
tx_number: None,
prune_mode,
},
)?;
}
}
}
if input.target_reached() {
return Ok(ExecOutput::done(input.checkpoint()))
}
let mut range = input.next_block_range();
let first_sync = input.checkpoint().block_number == 0;
// On first sync we might have history coming from genesis. We clear the table since it's
// faster to rebuild from scratch.
if first_sync {
provider.tx_ref().clear::<tables::StoragesHistory>()?;
range = 0..=*input.next_block_range().end();
}
info!(target: "sync::stages::index_storage_history::exec", ?first_sync, "Collecting indices");
let collector =
collect_history_indices::<_, tables::StorageChangeSets, tables::StoragesHistory, _>(
provider,
BlockNumberAddress::range(range.clone()),
|AddressStorageKey((address, storage_key)), highest_block_number| {
StorageShardedKey::new(address, storage_key, highest_block_number)
},
|(key, value)| (key.block_number(), AddressStorageKey((key.address(), value.key))),
&self.etl_config,
)?;
info!(target: "sync::stages::index_storage_history::exec", "Loading indices into database");
load_history_indices::<_, tables::StoragesHistory, _>(
provider,
collector,
first_sync,
|AddressStorageKey((address, storage_key)), highest_block_number| {
StorageShardedKey::new(address, storage_key, highest_block_number)
},
StorageShardedKey::decode_owned,
|key| AddressStorageKey((key.address, key.sharded_key.key)),
)?;
Ok(ExecOutput { checkpoint: StageCheckpoint::new(*range.end()), done: true })
}
/// Unwind the stage.
fn unwind(
&mut self,
provider: &Provider,
input: UnwindInput,
) -> Result<UnwindOutput, StageError> {
let (range, unwind_progress, _) =
input.unwind_block_range_with_threshold(self.commit_threshold);
provider.unwind_storage_history_indices_range(BlockNumberAddress::range(range))?;
Ok(UnwindOutput { checkpoint: StageCheckpoint::new(unwind_progress) })
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test_utils::{
stage_test_suite_ext, ExecuteStageTestRunner, StageTestRunner, TestRunnerError,
TestStageDB, UnwindStageTestRunner,
};
use alloy_primitives::{address, b256, Address, BlockNumber, FlaggedStorage, B256};
use itertools::Itertools;
use reth_db_api::{
cursor::DbCursorRO,
models::{
sharded_key, storage_sharded_key::NUM_OF_INDICES_IN_SHARD, ShardedKey,
StoredBlockBodyIndices,
},
transaction::DbTx,
BlockNumberList,
};
use reth_primitives_traits::StorageEntry;
use reth_provider::{providers::StaticFileWriter, DatabaseProviderFactory};
use reth_testing_utils::generators::{
self, random_block_range, random_changeset_range, random_contract_account_range,
BlockRangeParams,
};
use std::collections::BTreeMap;
const ADDRESS: Address = address!("0x0000000000000000000000000000000000000001");
const STORAGE_KEY: B256 =
b256!("0x0000000000000000000000000000000000000000000000000000000000000001");
const LAST_BLOCK_IN_FULL_SHARD: BlockNumber = NUM_OF_INDICES_IN_SHARD as BlockNumber;
const MAX_BLOCK: BlockNumber = NUM_OF_INDICES_IN_SHARD as BlockNumber + 2;
const fn storage(key: B256) -> StorageEntry {
// Value is not used in indexing stage.
StorageEntry { key, value: FlaggedStorage::ZERO }
}
const fn block_number_address(block_number: u64) -> BlockNumberAddress {
BlockNumberAddress((block_number, ADDRESS))
}
/// Shard for account
const fn shard(shard_index: u64) -> StorageShardedKey {
StorageShardedKey {
address: ADDRESS,
sharded_key: ShardedKey { key: STORAGE_KEY, highest_block_number: shard_index },
}
}
fn list(list: &[u64]) -> BlockNumberList {
BlockNumberList::new(list.iter().copied()).unwrap()
}
fn cast(
table: Vec<(StorageShardedKey, BlockNumberList)>,
) -> BTreeMap<StorageShardedKey, Vec<u64>> {
table
.into_iter()
.map(|(k, v)| {
let v = v.iter().collect();
(k, v)
})
.collect()
}
fn partial_setup(db: &TestStageDB) {
// setup
db.commit(|tx| {
for block in 0..=MAX_BLOCK {
tx.put::<tables::BlockBodyIndices>(
block,
StoredBlockBodyIndices { tx_count: 3, ..Default::default() },
)?;
// setup changeset that is going to be applied to history index
tx.put::<tables::StorageChangeSets>(
block_number_address(block),
storage(STORAGE_KEY),
)?;
}
Ok(())
})
.unwrap()
}
fn run(db: &TestStageDB, run_to: u64, input_checkpoint: Option<BlockNumber>) {
let input = ExecInput {
target: Some(run_to),
checkpoint: input_checkpoint
.map(|block_number| StageCheckpoint { block_number, stage_checkpoint: None }),
};
let mut stage = IndexStorageHistoryStage::default();
let provider = db.factory.database_provider_rw().unwrap();
let out = stage.execute(&provider, input).unwrap();
assert_eq!(out, ExecOutput { checkpoint: StageCheckpoint::new(run_to), done: true });
provider.commit().unwrap();
}
fn unwind(db: &TestStageDB, unwind_from: u64, unwind_to: u64) {
let input = UnwindInput {
checkpoint: StageCheckpoint::new(unwind_from),
unwind_to,
..Default::default()
};
let mut stage = IndexStorageHistoryStage::default();
let provider = db.factory.database_provider_rw().unwrap();
let out = stage.unwind(&provider, input).unwrap();
assert_eq!(out, UnwindOutput { checkpoint: StageCheckpoint::new(unwind_to) });
provider.commit().unwrap();
}
#[tokio::test]
async fn insert_index_to_genesis() {
// init
let db = TestStageDB::default();
// setup
partial_setup(&db);
// run
run(&db, 3, None);
// verify
let table = cast(db.table::<tables::StoragesHistory>().unwrap());
assert_eq!(table, BTreeMap::from([(shard(u64::MAX), vec![0, 1, 2, 3])]));
// unwind
unwind(&db, 5, 0);
// verify initial state
let table = cast(db.table::<tables::StoragesHistory>().unwrap());
assert_eq!(table, BTreeMap::from([(shard(u64::MAX), vec![0])]));
}
#[tokio::test]
async fn insert_index_to_not_empty_shard() {
// init
let db = TestStageDB::default();
// setup
partial_setup(&db);
db.commit(|tx| {
tx.put::<tables::StoragesHistory>(shard(u64::MAX), list(&[1, 2, 3])).unwrap();
Ok(())
})
.unwrap();
// run
run(&db, 5, Some(3));
// verify
let table = cast(db.table::<tables::StoragesHistory>().unwrap());
assert_eq!(table, BTreeMap::from([(shard(u64::MAX), vec![1, 2, 3, 4, 5])]));
// unwind
unwind(&db, 5, 3);
// verify initial state
let table = cast(db.table::<tables::StoragesHistory>().unwrap());
assert_eq!(table, BTreeMap::from([(shard(u64::MAX), vec![1, 2, 3])]));
}
#[tokio::test]
async fn insert_index_to_full_shard() {
// init
let db = TestStageDB::default();
// change does not matter only that account is present in changeset.
let full_list = (1..=LAST_BLOCK_IN_FULL_SHARD).collect::<Vec<_>>();
// setup
partial_setup(&db);
db.commit(|tx| {
tx.put::<tables::StoragesHistory>(shard(u64::MAX), list(&full_list)).unwrap();
Ok(())
})
.unwrap();
// run
run(&db, LAST_BLOCK_IN_FULL_SHARD + 2, Some(LAST_BLOCK_IN_FULL_SHARD));
// verify
let table = cast(db.table::<tables::StoragesHistory>().unwrap());
assert_eq!(
table,
BTreeMap::from([
(shard(LAST_BLOCK_IN_FULL_SHARD), full_list.clone()),
(shard(u64::MAX), vec![LAST_BLOCK_IN_FULL_SHARD + 1, LAST_BLOCK_IN_FULL_SHARD + 2])
])
);
// unwind
unwind(&db, LAST_BLOCK_IN_FULL_SHARD + 2, LAST_BLOCK_IN_FULL_SHARD);
// verify initial state
let table = cast(db.table::<tables::StoragesHistory>().unwrap());
assert_eq!(table, BTreeMap::from([(shard(u64::MAX), full_list)]));
}
#[tokio::test]
async fn insert_index_to_fill_shard() {
// init
let db = TestStageDB::default();
let mut almost_full_list = (1..=LAST_BLOCK_IN_FULL_SHARD - 2).collect::<Vec<_>>();
// setup
partial_setup(&db);
db.commit(|tx| {
tx.put::<tables::StoragesHistory>(shard(u64::MAX), list(&almost_full_list)).unwrap();
Ok(())
})
.unwrap();
// run
run(&db, LAST_BLOCK_IN_FULL_SHARD, Some(LAST_BLOCK_IN_FULL_SHARD - 2));
// verify
almost_full_list.push(LAST_BLOCK_IN_FULL_SHARD - 1);
almost_full_list.push(LAST_BLOCK_IN_FULL_SHARD);
let table = cast(db.table::<tables::StoragesHistory>().unwrap());
assert_eq!(table, BTreeMap::from([(shard(u64::MAX), almost_full_list.clone())]));
// unwind
unwind(&db, LAST_BLOCK_IN_FULL_SHARD, LAST_BLOCK_IN_FULL_SHARD - 2);
// verify initial state
almost_full_list.pop();
almost_full_list.pop();
let table = cast(db.table::<tables::StoragesHistory>().unwrap());
assert_eq!(table, BTreeMap::from([(shard(u64::MAX), almost_full_list)]));
// verify initial state
}
#[tokio::test]
async fn insert_index_second_half_shard() {
// init
let db = TestStageDB::default();
let mut close_full_list = (1..=LAST_BLOCK_IN_FULL_SHARD - 1).collect::<Vec<_>>();
// setup
partial_setup(&db);
db.commit(|tx| {
tx.put::<tables::StoragesHistory>(shard(u64::MAX), list(&close_full_list)).unwrap();
Ok(())
})
.unwrap();
// run
run(&db, LAST_BLOCK_IN_FULL_SHARD + 1, Some(LAST_BLOCK_IN_FULL_SHARD - 1));
// verify
close_full_list.push(LAST_BLOCK_IN_FULL_SHARD);
let table = cast(db.table::<tables::StoragesHistory>().unwrap());
assert_eq!(
table,
BTreeMap::from([
(shard(LAST_BLOCK_IN_FULL_SHARD), close_full_list.clone()),
(shard(u64::MAX), vec![LAST_BLOCK_IN_FULL_SHARD + 1])
])
);
// unwind
unwind(&db, LAST_BLOCK_IN_FULL_SHARD, LAST_BLOCK_IN_FULL_SHARD - 1);
// verify initial state
close_full_list.pop();
let table = cast(db.table::<tables::StoragesHistory>().unwrap());
assert_eq!(table, BTreeMap::from([(shard(u64::MAX), close_full_list)]));
}
#[tokio::test]
async fn insert_index_to_third_shard() {
// init
let db = TestStageDB::default();
let full_list = (1..=LAST_BLOCK_IN_FULL_SHARD).collect::<Vec<_>>();
// setup
partial_setup(&db);
db.commit(|tx| {
tx.put::<tables::StoragesHistory>(shard(1), list(&full_list)).unwrap();
tx.put::<tables::StoragesHistory>(shard(2), list(&full_list)).unwrap();
tx.put::<tables::StoragesHistory>(
shard(u64::MAX),
list(&[LAST_BLOCK_IN_FULL_SHARD + 1]),
)
.unwrap();
Ok(())
})
.unwrap();
run(&db, LAST_BLOCK_IN_FULL_SHARD + 2, Some(LAST_BLOCK_IN_FULL_SHARD + 1));
// verify
let table = cast(db.table::<tables::StoragesHistory>().unwrap());
assert_eq!(
table,
BTreeMap::from([
(shard(1), full_list.clone()),
(shard(2), full_list.clone()),
(shard(u64::MAX), vec![LAST_BLOCK_IN_FULL_SHARD + 1, LAST_BLOCK_IN_FULL_SHARD + 2])
])
);
// unwind
unwind(&db, LAST_BLOCK_IN_FULL_SHARD + 2, LAST_BLOCK_IN_FULL_SHARD + 1);
// verify initial state
let table = cast(db.table::<tables::StoragesHistory>().unwrap());
assert_eq!(
table,
BTreeMap::from([
(shard(1), full_list.clone()),
(shard(2), full_list),
(shard(u64::MAX), vec![LAST_BLOCK_IN_FULL_SHARD + 1])
])
);
}
#[tokio::test]
async fn insert_index_with_prune_mode() {
// init
let db = TestStageDB::default();
// setup
db.commit(|tx| {
// we just need first and last
tx.put::<tables::BlockBodyIndices>(
0,
StoredBlockBodyIndices { tx_count: 3, ..Default::default() },
)
.unwrap();
tx.put::<tables::BlockBodyIndices>(
100,
StoredBlockBodyIndices { tx_count: 5, ..Default::default() },
)
.unwrap();
// setup changeset that are going to be applied to history index
tx.put::<tables::StorageChangeSets>(block_number_address(20), storage(STORAGE_KEY))
.unwrap();
tx.put::<tables::StorageChangeSets>(block_number_address(36), storage(STORAGE_KEY))
.unwrap();
tx.put::<tables::StorageChangeSets>(block_number_address(100), storage(STORAGE_KEY))
.unwrap();
Ok(())
})
.unwrap();
// run
let input = ExecInput { target: Some(20000), ..Default::default() };
let mut stage = IndexStorageHistoryStage {
prune_mode: Some(PruneMode::Before(36)),
..Default::default()
};
let provider = db.factory.database_provider_rw().unwrap();
let out = stage.execute(&provider, input).unwrap();
assert_eq!(out, ExecOutput { checkpoint: StageCheckpoint::new(20000), done: true });
provider.commit().unwrap();
// verify
let table = cast(db.table::<tables::StoragesHistory>().unwrap());
assert_eq!(table, BTreeMap::from([(shard(u64::MAX), vec![36, 100])]));
// unwind
unwind(&db, 20000, 0);
// verify initial state
let table = db.table::<tables::StoragesHistory>().unwrap();
assert!(table.is_empty());
}
stage_test_suite_ext!(IndexStorageHistoryTestRunner, index_storage_history);
struct IndexStorageHistoryTestRunner {
pub(crate) db: TestStageDB,
commit_threshold: u64,
prune_mode: Option<PruneMode>,
}
impl Default for IndexStorageHistoryTestRunner {
fn default() -> Self {
Self { db: TestStageDB::default(), commit_threshold: 1000, prune_mode: None }
}
}
impl StageTestRunner for IndexStorageHistoryTestRunner {
type S = IndexStorageHistoryStage;
fn db(&self) -> &TestStageDB {
&self.db
}
fn stage(&self) -> Self::S {
Self::S {
commit_threshold: self.commit_threshold,
prune_mode: self.prune_mode,
etl_config: EtlConfig::default(),
}
}
}
impl ExecuteStageTestRunner for IndexStorageHistoryTestRunner {
type Seed = ();
fn seed_execution(&mut self, input: ExecInput) -> Result<Self::Seed, TestRunnerError> {
let stage_process = input.checkpoint().block_number;
let start = stage_process + 1;
let end = input.target();
let mut rng = generators::rng();
let num_of_accounts = 31;
let accounts = random_contract_account_range(&mut rng, &mut (0..num_of_accounts))
.into_iter()
.collect::<BTreeMap<_, _>>();
let blocks = random_block_range(
&mut rng,
start..=end,
BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..3, ..Default::default() },
);
let (changesets, _) = random_changeset_range(
&mut rng,
blocks.iter(),
accounts.into_iter().map(|(addr, acc)| (addr, (acc, Vec::new()))),
0..3,
0..u64::MAX,
);
// add block changeset from block 1.
self.db.insert_changesets(changesets, Some(start))?;
Ok(())
}
fn validate_execution(
&self,
input: ExecInput,
output: Option<ExecOutput>,
) -> Result<(), TestRunnerError> {
if let Some(output) = output {
let start_block = input.next_block();
let end_block = output.checkpoint.block_number;
if start_block > end_block {
return Ok(())
}
assert_eq!(
output,
ExecOutput { checkpoint: StageCheckpoint::new(input.target()), done: true }
);
let provider = self.db.factory.provider()?;
let mut changeset_cursor =
provider.tx_ref().cursor_read::<tables::StorageChangeSets>()?;
let storage_transitions = changeset_cursor
.walk_range(BlockNumberAddress::range(start_block..=end_block))?
.try_fold(
BTreeMap::new(),
|mut storages: BTreeMap<(Address, B256), Vec<u64>>,
entry|
-> Result<_, TestRunnerError> {
let (index, storage) = entry?;
storages
.entry((index.address(), storage.key))
.or_default()
.push(index.block_number());
Ok(storages)
},
)?;
let mut result = BTreeMap::new();
for (partial_key, indices) in storage_transitions {
// chunk indices and insert them in shards of N size.
let mut chunks = indices
.iter()
.chunks(sharded_key::NUM_OF_INDICES_IN_SHARD)
.into_iter()
.map(|chunks| chunks.copied().collect::<Vec<u64>>())
.collect::<Vec<Vec<_>>>();
let last_chunk = chunks.pop();
for list in chunks {
result.insert(
StorageShardedKey::new(
partial_key.0,
partial_key.1,
*list.last().expect("Chuck does not return empty list")
as BlockNumber,
),
list,
);
}
if let Some(last_list) = last_chunk {
result.insert(
StorageShardedKey::new(partial_key.0, partial_key.1, u64::MAX),
last_list,
);
};
}
let table = cast(self.db.table::<tables::StoragesHistory>().unwrap());
assert_eq!(table, result);
}
Ok(())
}
}
impl UnwindStageTestRunner for IndexStorageHistoryTestRunner {
fn validate_unwind(&self, _input: UnwindInput) -> Result<(), TestRunnerError> {
let table = self.db.table::<tables::StoragesHistory>().unwrap();
assert!(table.is_empty());
Ok(())
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/stages/stages/src/stages/merkle.rs | crates/stages/stages/src/stages/merkle.rs | use alloy_consensus::{constants::KECCAK_EMPTY, BlockHeader};
use alloy_primitives::{BlockNumber, Sealable, B256};
use reth_codecs::Compact;
use reth_consensus::ConsensusError;
use reth_db_api::{
tables,
transaction::{DbTx, DbTxMut},
};
use reth_primitives_traits::{GotExpected, SealedHeader};
use reth_provider::{
DBProvider, HeaderProvider, ProviderError, StageCheckpointReader, StageCheckpointWriter,
StatsReader, TrieWriter,
};
use reth_stages_api::{
BlockErrorKind, EntitiesCheckpoint, ExecInput, ExecOutput, MerkleCheckpoint, Stage,
StageCheckpoint, StageError, StageId, StorageRootMerkleCheckpoint, UnwindInput, UnwindOutput,
};
use reth_trie::{IntermediateStateRootState, StateRoot, StateRootProgress, StoredSubNode};
use reth_trie_db::DatabaseStateRoot;
use std::fmt::Debug;
use tracing::*;
// TODO: automate the process outlined below so the user can just send in a debugging package
/// The error message that we include in invalid state root errors to tell users what information
/// they should include in a bug report, since true state root errors can be impossible to debug
/// with just basic logs.
pub const INVALID_STATE_ROOT_ERROR_MESSAGE: &str = r#"
Invalid state root error on stage verification!
This is an error that likely requires a report to the reth team with additional information.
Please include the following information in your report:
* This error message
* The state root of the block that was rejected
* The output of `reth db stats --checksum` from the database that was being used. This will take a long time to run!
* 50-100 lines of logs before and after the first occurrence of the log message with the state root of the block that was rejected.
* The debug logs from __the same time period__. To find the default location for these logs, run:
`reth --help | grep -A 4 'log.file.directory'`
Once you have this information, please submit a github issue at https://github.com/paradigmxyz/reth/issues/new
"#;
/// The default threshold (in number of blocks) for switching from incremental trie building
/// of changes to whole rebuild.
pub const MERKLE_STAGE_DEFAULT_REBUILD_THRESHOLD: u64 = 100_000;
/// The default threshold (in number of blocks) to run the stage in incremental mode. The
/// incremental mode will calculate the state root for a large range of blocks by calculating the
/// new state root for this many blocks, in batches, repeating until we reach the desired block
/// number.
pub const MERKLE_STAGE_DEFAULT_INCREMENTAL_THRESHOLD: u64 = 7_000;
/// The merkle hashing stage uses input from
/// [`AccountHashingStage`][crate::stages::AccountHashingStage] and
/// [`StorageHashingStage`][crate::stages::StorageHashingStage] to calculate intermediate hashes
/// and state roots.
///
/// This stage should be run with the above two stages, otherwise it is a no-op.
///
/// This stage is split in two: one for calculating hashes and one for unwinding.
///
/// When run in execution, it's going to be executed AFTER the hashing stages, to generate
/// the state root. When run in unwind mode, it's going to be executed BEFORE the hashing stages,
/// so that it unwinds the intermediate hashes based on the unwound hashed state from the hashing
/// stages. The order of these two variants is important. The unwind variant should be added to the
/// pipeline before the execution variant.
///
/// An example pipeline to only hash state would be:
///
/// - [`MerkleStage::Unwind`]
/// - [`AccountHashingStage`][crate::stages::AccountHashingStage]
/// - [`StorageHashingStage`][crate::stages::StorageHashingStage]
/// - [`MerkleStage::Execution`]
#[derive(Debug, Clone)]
pub enum MerkleStage {
/// The execution portion of the merkle stage.
Execution {
// TODO: make struct for holding incremental settings, for code reuse between `Execution`
// variant and `Both`
/// The threshold (in number of blocks) for switching from incremental trie building
/// of changes to whole rebuild.
rebuild_threshold: u64,
/// The threshold (in number of blocks) to run the stage in incremental mode. The
/// incremental mode will calculate the state root by calculating the new state root for
/// some number of blocks, repeating until we reach the desired block number.
incremental_threshold: u64,
},
/// The unwind portion of the merkle stage.
Unwind,
/// Able to execute and unwind. Used for tests
#[cfg(any(test, feature = "test-utils"))]
Both {
/// The threshold (in number of blocks) for switching from incremental trie building
/// of changes to whole rebuild.
rebuild_threshold: u64,
/// The threshold (in number of blocks) to run the stage in incremental mode. The
/// incremental mode will calculate the state root by calculating the new state root for
/// some number of blocks, repeating until we reach the desired block number.
incremental_threshold: u64,
},
}
impl MerkleStage {
/// Stage default for the [`MerkleStage::Execution`].
pub const fn default_execution() -> Self {
Self::Execution {
rebuild_threshold: MERKLE_STAGE_DEFAULT_REBUILD_THRESHOLD,
incremental_threshold: MERKLE_STAGE_DEFAULT_INCREMENTAL_THRESHOLD,
}
}
/// Stage default for the [`MerkleStage::Unwind`].
pub const fn default_unwind() -> Self {
Self::Unwind
}
/// Create new instance of [`MerkleStage::Execution`].
pub const fn new_execution(rebuild_threshold: u64, incremental_threshold: u64) -> Self {
Self::Execution { rebuild_threshold, incremental_threshold }
}
/// Gets the hashing progress
pub fn get_execution_checkpoint(
&self,
provider: &impl StageCheckpointReader,
) -> Result<Option<MerkleCheckpoint>, StageError> {
let buf =
provider.get_stage_checkpoint_progress(StageId::MerkleExecute)?.unwrap_or_default();
if buf.is_empty() {
return Ok(None)
}
let (checkpoint, _) = MerkleCheckpoint::from_compact(&buf, buf.len());
Ok(Some(checkpoint))
}
/// Saves the hashing progress
pub fn save_execution_checkpoint(
&self,
provider: &impl StageCheckpointWriter,
checkpoint: Option<MerkleCheckpoint>,
) -> Result<(), StageError> {
let mut buf = vec![];
if let Some(checkpoint) = checkpoint {
debug!(
target: "sync::stages::merkle::exec",
last_account_key = ?checkpoint.last_account_key,
"Saving inner merkle checkpoint"
);
checkpoint.to_compact(&mut buf);
}
Ok(provider.save_stage_checkpoint_progress(StageId::MerkleExecute, buf)?)
}
}
impl<Provider> Stage<Provider> for MerkleStage
where
Provider: DBProvider<Tx: DbTxMut>
+ TrieWriter
+ StatsReader
+ HeaderProvider
+ StageCheckpointReader
+ StageCheckpointWriter,
{
/// Return the id of the stage
fn id(&self) -> StageId {
match self {
Self::Execution { .. } => StageId::MerkleExecute,
Self::Unwind => StageId::MerkleUnwind,
#[cfg(any(test, feature = "test-utils"))]
Self::Both { .. } => StageId::Other("MerkleBoth"),
}
}
/// Execute the stage.
fn execute(&mut self, provider: &Provider, input: ExecInput) -> Result<ExecOutput, StageError> {
let (threshold, incremental_threshold) = match self {
Self::Unwind => {
info!(target: "sync::stages::merkle::unwind", "Stage is always skipped");
return Ok(ExecOutput::done(StageCheckpoint::new(input.target())))
}
Self::Execution { rebuild_threshold, incremental_threshold } => {
(*rebuild_threshold, *incremental_threshold)
}
#[cfg(any(test, feature = "test-utils"))]
Self::Both { rebuild_threshold, incremental_threshold } => {
(*rebuild_threshold, *incremental_threshold)
}
};
let range = input.next_block_range();
let (from_block, to_block) = range.clone().into_inner();
let current_block_number = input.checkpoint().block_number;
let target_block = provider
.header_by_number(to_block)?
.ok_or_else(|| ProviderError::HeaderNotFound(to_block.into()))?;
let target_block_root = target_block.state_root();
let (trie_root, entities_checkpoint) = if range.is_empty() {
(target_block_root, input.checkpoint().entities_stage_checkpoint().unwrap_or_default())
} else if to_block - from_block > threshold || from_block == 1 {
let mut checkpoint = self.get_execution_checkpoint(provider)?;
// if there are more blocks than threshold it is faster to rebuild the trie
let mut entities_checkpoint = if let Some(checkpoint) =
checkpoint.as_ref().filter(|c| c.target_block == to_block)
{
debug!(
target: "sync::stages::merkle::exec",
current = ?current_block_number,
target = ?to_block,
last_account_key = ?checkpoint.last_account_key,
"Continuing inner merkle checkpoint"
);
input.checkpoint().entities_stage_checkpoint()
} else {
debug!(
target: "sync::stages::merkle::exec",
current = ?current_block_number,
target = ?to_block,
previous_checkpoint = ?checkpoint,
"Rebuilding trie"
);
// Reset the checkpoint and clear trie tables
checkpoint = None;
self.save_execution_checkpoint(provider, None)?;
provider.tx_ref().clear::<tables::AccountsTrie>()?;
provider.tx_ref().clear::<tables::StoragesTrie>()?;
None
}
.unwrap_or(EntitiesCheckpoint {
processed: 0,
total: (provider.count_entries::<tables::HashedAccounts>()? +
provider.count_entries::<tables::HashedStorages>()?)
as u64,
});
let tx = provider.tx_ref();
let progress = StateRoot::from_tx(tx)
.with_intermediate_state(checkpoint.map(IntermediateStateRootState::from))
.root_with_progress()
.map_err(|e| {
error!(target: "sync::stages::merkle", %e, ?current_block_number, ?to_block, "State root with progress failed! {INVALID_STATE_ROOT_ERROR_MESSAGE}");
StageError::Fatal(Box::new(e))
})?;
match progress {
StateRootProgress::Progress(state, hashed_entries_walked, updates) => {
provider.write_trie_updates(&updates)?;
let mut checkpoint = MerkleCheckpoint::new(
to_block,
state.account_root_state.last_hashed_key,
state
.account_root_state
.walker_stack
.into_iter()
.map(StoredSubNode::from)
.collect(),
state.account_root_state.hash_builder.into(),
);
// Save storage root state if present
if let Some(storage_state) = state.storage_root_state {
checkpoint.storage_root_checkpoint =
Some(StorageRootMerkleCheckpoint::new(
storage_state.state.last_hashed_key,
storage_state
.state
.walker_stack
.into_iter()
.map(StoredSubNode::from)
.collect(),
storage_state.state.hash_builder.into(),
storage_state.account.nonce,
storage_state.account.balance,
storage_state.account.bytecode_hash.unwrap_or(KECCAK_EMPTY),
));
}
self.save_execution_checkpoint(provider, Some(checkpoint))?;
entities_checkpoint.processed += hashed_entries_walked as u64;
return Ok(ExecOutput {
checkpoint: input
.checkpoint()
.with_entities_stage_checkpoint(entities_checkpoint),
done: false,
})
}
StateRootProgress::Complete(root, hashed_entries_walked, updates) => {
provider.write_trie_updates(&updates)?;
entities_checkpoint.processed += hashed_entries_walked as u64;
(root, entities_checkpoint)
}
}
} else {
debug!(target: "sync::stages::merkle::exec", current = ?current_block_number, target = ?to_block, "Updating trie in chunks");
let mut final_root = None;
for start_block in range.step_by(incremental_threshold as usize) {
let chunk_to = std::cmp::min(start_block + incremental_threshold, to_block);
let chunk_range = start_block..=chunk_to;
debug!(
target: "sync::stages::merkle::exec",
current = ?current_block_number,
target = ?to_block,
incremental_threshold,
chunk_range = ?chunk_range,
"Processing chunk"
);
let (root, updates) =
StateRoot::incremental_root_with_updates(provider.tx_ref(), chunk_range)
.map_err(|e| {
error!(target: "sync::stages::merkle", %e, ?current_block_number, ?to_block, "Incremental state root failed! {INVALID_STATE_ROOT_ERROR_MESSAGE}");
StageError::Fatal(Box::new(e))
})?;
provider.write_trie_updates(&updates)?;
final_root = Some(root);
}
// if we had no final root, we must have not looped above, which should not be possible
let final_root = final_root.ok_or(StageError::Fatal(
"Incremental merkle hashing did not produce a final root".into(),
))?;
let total_hashed_entries = (provider.count_entries::<tables::HashedAccounts>()? +
provider.count_entries::<tables::HashedStorages>()?)
as u64;
let entities_checkpoint = EntitiesCheckpoint {
// This is fine because `range` doesn't have an upper bound, so in this `else`
// branch we're just hashing all remaining accounts and storage slots we have in the
// database.
processed: total_hashed_entries,
total: total_hashed_entries,
};
// Save the checkpoint
(final_root, entities_checkpoint)
};
// Reset the checkpoint
self.save_execution_checkpoint(provider, None)?;
validate_state_root(trie_root, SealedHeader::seal_slow(target_block), to_block)?;
Ok(ExecOutput {
checkpoint: StageCheckpoint::new(to_block)
.with_entities_stage_checkpoint(entities_checkpoint),
done: true,
})
}
/// Unwind the stage.
fn unwind(
&mut self,
provider: &Provider,
input: UnwindInput,
) -> Result<UnwindOutput, StageError> {
let tx = provider.tx_ref();
let range = input.unwind_block_range();
if matches!(self, Self::Execution { .. }) {
info!(target: "sync::stages::merkle::unwind", "Stage is always skipped");
return Ok(UnwindOutput { checkpoint: StageCheckpoint::new(input.unwind_to) })
}
let mut entities_checkpoint =
input.checkpoint.entities_stage_checkpoint().unwrap_or(EntitiesCheckpoint {
processed: 0,
total: (tx.entries::<tables::HashedAccounts>()? +
tx.entries::<tables::HashedStorages>()?) as u64,
});
if input.unwind_to == 0 {
tx.clear::<tables::AccountsTrie>()?;
tx.clear::<tables::StoragesTrie>()?;
entities_checkpoint.processed = 0;
return Ok(UnwindOutput {
checkpoint: StageCheckpoint::new(input.unwind_to)
.with_entities_stage_checkpoint(entities_checkpoint),
})
}
// Unwind trie only if there are transitions
if range.is_empty() {
info!(target: "sync::stages::merkle::unwind", "Nothing to unwind");
} else {
let (block_root, updates) = StateRoot::incremental_root_with_updates(tx, range)
.map_err(|e| StageError::Fatal(Box::new(e)))?;
// Validate the calculated state root
let target = provider
.header_by_number(input.unwind_to)?
.ok_or_else(|| ProviderError::HeaderNotFound(input.unwind_to.into()))?;
validate_state_root(block_root, SealedHeader::seal_slow(target), input.unwind_to)?;
// Validation passed, apply unwind changes to the database.
provider.write_trie_updates(&updates)?;
// Update entities checkpoint to reflect the unwind operation
// Since we're unwinding, we need to recalculate the total entities at the target block
let accounts = tx.entries::<tables::HashedAccounts>()?;
let storages = tx.entries::<tables::HashedStorages>()?;
let total = (accounts + storages) as u64;
entities_checkpoint.total = total;
entities_checkpoint.processed = total;
}
Ok(UnwindOutput {
checkpoint: StageCheckpoint::new(input.unwind_to)
.with_entities_stage_checkpoint(entities_checkpoint),
})
}
}
/// Check that the computed state root matches the root in the expected header.
#[inline]
fn validate_state_root<H: BlockHeader + Sealable + Debug>(
got: B256,
expected: SealedHeader<H>,
target_block: BlockNumber,
) -> Result<(), StageError> {
if got == expected.state_root() {
Ok(())
} else {
error!(target: "sync::stages::merkle", ?target_block, ?got, ?expected, "Failed to verify block state root! {INVALID_STATE_ROOT_ERROR_MESSAGE}");
Err(StageError::Block {
error: BlockErrorKind::Validation(ConsensusError::BodyStateRootDiff(
GotExpected { got, expected: expected.state_root() }.into(),
)),
block: Box::new(expected.block_with_parent()),
})
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test_utils::{
stage_test_suite_ext, ExecuteStageTestRunner, StageTestRunner, StorageKind,
TestRunnerError, TestStageDB, UnwindStageTestRunner,
};
use alloy_primitives::{keccak256, U256};
use assert_matches::assert_matches;
use reth_db_api::cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO};
use reth_primitives_traits::SealedBlock;
use reth_provider::{providers::StaticFileWriter, StaticFileProviderFactory};
use reth_stages_api::StageUnitCheckpoint;
use reth_static_file_types::StaticFileSegment;
use reth_testing_utils::generators::{
self, random_block, random_block_range, random_changeset_range,
random_contract_account_range, BlockParams, BlockRangeParams,
};
use reth_trie::test_utils::{state_root, state_root_prehashed};
use std::collections::BTreeMap;
stage_test_suite_ext!(MerkleTestRunner, merkle);
/// Execute from genesis so as to merkelize whole state
#[tokio::test]
async fn execute_clean_merkle() {
let (previous_stage, stage_progress) = (500, 0);
// Set up the runner
let mut runner = MerkleTestRunner::default();
// set low threshold so we hash the whole storage
let input = ExecInput {
target: Some(previous_stage),
checkpoint: Some(StageCheckpoint::new(stage_progress)),
};
runner.seed_execution(input).expect("failed to seed execution");
let rx = runner.execute(input);
// Assert the successful result
let result = rx.await.unwrap();
assert_matches!(
result,
Ok(ExecOutput {
checkpoint: StageCheckpoint {
block_number,
stage_checkpoint: Some(StageUnitCheckpoint::Entities(EntitiesCheckpoint {
processed,
total
}))
},
done: true
}) if block_number == previous_stage && processed == total &&
total == (
runner.db.table::<tables::HashedAccounts>().unwrap().len() +
runner.db.table::<tables::HashedStorages>().unwrap().len()
) as u64
);
// Validate the stage execution
assert!(runner.validate_execution(input, result.ok()).is_ok(), "execution validation");
}
/// Update small trie
#[tokio::test]
async fn execute_small_merkle() {
let (previous_stage, stage_progress) = (2, 1);
// Set up the runner
let mut runner = MerkleTestRunner::default();
let input = ExecInput {
target: Some(previous_stage),
checkpoint: Some(StageCheckpoint::new(stage_progress)),
};
runner.seed_execution(input).expect("failed to seed execution");
let rx = runner.execute(input);
// Assert the successful result
let result = rx.await.unwrap();
assert_matches!(
result,
Ok(ExecOutput {
checkpoint: StageCheckpoint {
block_number,
stage_checkpoint: Some(StageUnitCheckpoint::Entities(EntitiesCheckpoint {
processed,
total
}))
},
done: true
}) if block_number == previous_stage && processed == total &&
total == (
runner.db.table::<tables::HashedAccounts>().unwrap().len() +
runner.db.table::<tables::HashedStorages>().unwrap().len()
) as u64
);
// Validate the stage execution
assert!(runner.validate_execution(input, result.ok()).is_ok(), "execution validation");
}
#[tokio::test]
async fn execute_chunked_merkle() {
let (previous_stage, stage_progress) = (200, 100);
let clean_threshold = 100;
let incremental_threshold = 10;
// Set up the runner
let mut runner =
MerkleTestRunner { db: TestStageDB::default(), clean_threshold, incremental_threshold };
let input = ExecInput {
target: Some(previous_stage),
checkpoint: Some(StageCheckpoint::new(stage_progress)),
};
runner.seed_execution(input).expect("failed to seed execution");
let rx = runner.execute(input);
// Assert the successful result
let result = rx.await.unwrap();
assert_matches!(
result,
Ok(ExecOutput {
checkpoint: StageCheckpoint {
block_number,
stage_checkpoint: Some(StageUnitCheckpoint::Entities(EntitiesCheckpoint {
processed,
total
}))
},
done: true
}) if block_number == previous_stage && processed == total &&
total == (
runner.db.table::<tables::HashedAccounts>().unwrap().len() +
runner.db.table::<tables::HashedStorages>().unwrap().len()
) as u64
);
// Validate the stage execution
let provider = runner.db.factory.provider().unwrap();
let header = provider.header_by_number(previous_stage).unwrap().unwrap();
let expected_root = header.state_root;
let actual_root = runner
.db
.query(|tx| {
Ok(StateRoot::incremental_root_with_updates(
tx,
stage_progress + 1..=previous_stage,
))
})
.unwrap();
assert_eq!(
actual_root.unwrap().0,
expected_root,
"State root mismatch after chunked processing"
);
}
struct MerkleTestRunner {
db: TestStageDB,
clean_threshold: u64,
incremental_threshold: u64,
}
impl Default for MerkleTestRunner {
fn default() -> Self {
Self {
db: TestStageDB::default(),
clean_threshold: 10000,
incremental_threshold: 10000,
}
}
}
impl StageTestRunner for MerkleTestRunner {
type S = MerkleStage;
fn db(&self) -> &TestStageDB {
&self.db
}
fn stage(&self) -> Self::S {
Self::S::Both {
rebuild_threshold: self.clean_threshold,
incremental_threshold: self.incremental_threshold,
}
}
}
impl ExecuteStageTestRunner for MerkleTestRunner {
type Seed = Vec<SealedBlock<reth_ethereum_primitives::Block>>;
fn seed_execution(&mut self, input: ExecInput) -> Result<Self::Seed, TestRunnerError> {
let stage_progress = input.checkpoint().block_number;
let start = stage_progress + 1;
let end = input.target();
let mut rng = generators::rng();
let mut preblocks = vec![];
if stage_progress > 0 {
preblocks.append(&mut random_block_range(
&mut rng,
0..=stage_progress - 1,
BlockRangeParams {
parent: Some(B256::ZERO),
tx_count: 0..1,
..Default::default()
},
));
self.db.insert_blocks(preblocks.iter(), StorageKind::Static)?;
}
let num_of_accounts = 31;
let accounts = random_contract_account_range(&mut rng, &mut (0..num_of_accounts))
.into_iter()
.collect::<BTreeMap<_, _>>();
self.db.insert_accounts_and_storages(
accounts.iter().map(|(addr, acc)| (*addr, (*acc, std::iter::empty()))),
)?;
let (header, body) = random_block(
&mut rng,
stage_progress,
BlockParams { parent: preblocks.last().map(|b| b.hash()), ..Default::default() },
)
.split_sealed_header_body();
let mut header = header.unseal();
header.state_root = state_root(
accounts
.clone()
.into_iter()
.map(|(address, account)| (address, (account, std::iter::empty()))),
);
let sealed_head = SealedBlock::<reth_ethereum_primitives::Block>::from_sealed_parts(
SealedHeader::seal_slow(header),
body,
);
let head_hash = sealed_head.hash();
let mut blocks = vec![sealed_head];
blocks.extend(random_block_range(
&mut rng,
start..=end,
BlockRangeParams { parent: Some(head_hash), tx_count: 0..3, ..Default::default() },
));
let last_block = blocks.last().cloned().unwrap();
self.db.insert_blocks(blocks.iter(), StorageKind::Static)?;
let (transitions, final_state) = random_changeset_range(
&mut rng,
blocks.iter(),
accounts.into_iter().map(|(addr, acc)| (addr, (acc, Vec::new()))),
0..3,
0..256,
);
// add block changeset from block 1.
self.db.insert_changesets(transitions, Some(start))?;
self.db.insert_accounts_and_storages(final_state)?;
// Calculate state root
let root = self.db.query(|tx| {
let mut accounts = BTreeMap::default();
let mut accounts_cursor = tx.cursor_read::<tables::HashedAccounts>()?;
let mut storage_cursor = tx.cursor_dup_read::<tables::HashedStorages>()?;
for entry in accounts_cursor.walk_range(..)? {
let (key, account) = entry?;
let mut storage_entries = Vec::new();
let mut entry = storage_cursor.seek_exact(key)?;
while let Some((_, storage)) = entry {
storage_entries.push(storage);
entry = storage_cursor.next_dup()?;
}
let storage = storage_entries
.into_iter()
.filter(|v| !v.value.is_zero())
.map(|v| (v.key, v.value))
.collect::<Vec<_>>();
accounts.insert(key, (account, storage));
}
Ok(state_root_prehashed(
accounts
.into_iter()
.map(|(key, (a, b))| (key, (a, b.into_iter().map(|(k, v)| (k, v))))),
))
})?;
let static_file_provider = self.db.factory.static_file_provider();
let mut writer =
static_file_provider.latest_writer(StaticFileSegment::Headers).unwrap();
let mut last_header = last_block.clone_sealed_header();
last_header.set_state_root(root);
let hash = last_header.hash_slow();
writer.prune_headers(1).unwrap();
writer.commit().unwrap();
writer.append_header(&last_header, U256::ZERO, &hash).unwrap();
writer.commit().unwrap();
Ok(blocks)
}
fn validate_execution(
&self,
_input: ExecInput,
_output: Option<ExecOutput>,
) -> Result<(), TestRunnerError> {
// The execution is validated within the stage
Ok(())
}
}
impl UnwindStageTestRunner for MerkleTestRunner {
fn validate_unwind(&self, _input: UnwindInput) -> Result<(), TestRunnerError> {
// The unwind is validated within the stage
Ok(())
}
fn before_unwind(&self, input: UnwindInput) -> Result<(), TestRunnerError> {
let target_block = input.unwind_to + 1;
self.db
.commit(|tx| {
let mut storage_changesets_cursor =
tx.cursor_dup_read::<tables::StorageChangeSets>().unwrap();
let mut storage_cursor =
tx.cursor_dup_write::<tables::HashedStorages>().unwrap();
let mut tree: BTreeMap<B256, BTreeMap<B256, alloy_primitives::FlaggedStorage>> =
BTreeMap::new();
let mut rev_changeset_walker =
storage_changesets_cursor.walk_back(None).unwrap();
while let Some((bn_address, entry)) =
rev_changeset_walker.next().transpose().unwrap()
{
if bn_address.block_number() < target_block {
break
}
tree.entry(keccak256(bn_address.address()))
.or_default()
.insert(keccak256(entry.key), entry.value);
}
for (hashed_address, storage) in tree {
for (hashed_slot, value) in storage {
let storage_entry = storage_cursor
.seek_by_key_subkey(hashed_address, hashed_slot)
.unwrap();
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.