repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/genesis-builder/src/error.rs | crates/genesis-builder/src/error.rs | use std::path::PathBuf;
use thiserror::Error;
use toml::de::Error as TomlError;
/// Errors that can occur during genesis building
#[derive(Debug, Error)]
pub enum BuilderError {
/// Manifest not found
#[error("Manifest not found: {}", .0.display())]
ManifestNotFound(PathBuf),
/// No contracts defined in manifest
#[error("No contracts defined in manifest")]
NoContractsDefined,
/// Genesis file not found
#[error("Genesis file not found: {}", .0.display())]
GenesisNotFound(PathBuf),
/// Invalid address format
#[error("Invalid address format: {0}")]
InvalidAddress(String),
/// Invalid hex format
#[error("Invalid hex format: {0}")]
InvalidHex(String),
/// Contract bytecode missing or empty
#[error("Contract bytecode missing or empty: {0}")]
MissingBytecode(String),
/// Address collision between contracts
#[error("Address collision: {0} already exists in genesis")]
AddressCollision(String),
/// Failed to fetch artifact from remote source
#[error("Failed to fetch artifact from {0}: {1}")]
RemoteFetchFailed(String, String),
/// IO error
#[error("IO error: {0}")]
Io(#[from] std::io::Error),
/// JSON parse error
#[error("JSON parse error: {0}")]
Json(#[from] serde_json::Error),
/// TOML parse error
#[error("TOML parse error: {0}")]
Toml(#[from] TomlError),
}
/// Result type for genesis builder operations
pub type Result<T> = std::result::Result<T, BuilderError>;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/genesis-builder/src/types.rs | crates/genesis-builder/src/types.rs | use alloy_primitives::{Address, Bytes};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
/// Default base URL for the manifest which will be
/// used if a base URL is not provided explicitly
pub const DEFAULT_BASE_URL: &str =
"https://raw.githubusercontent.com/SeismicSystems/seismic-contracts/main";
/// Contract configuration from manifest
#[derive(Debug, Deserialize, Clone)]
pub struct ContractConfig {
/// Relative path to the artifact file
pub artifact: String,
/// Address where the contract will be deployed
pub address: String,
}
/// Full manifest structure
#[derive(Debug, Deserialize)]
pub struct Manifest {
/// Metadata in `manifest.toml`
pub metadata: ManifestMetadata,
/// Contracts to deploy
pub contracts: HashMap<String, ContractConfig>,
}
#[derive(Debug, Deserialize)]
/// Metadata in `manifest.toml`
pub struct ManifestMetadata {
/// Version of the manifest
pub version: String,
/// Description of the manifest
pub description: Option<String>,
/// Base GitHub URL for all artifacts
/// If not provided, the default base URL will be used
pub base_url: Option<String>,
}
impl ManifestMetadata {
/// Get the base URL for the manifest
pub fn base_url(&self) -> &str {
self.base_url.as_deref().unwrap_or(DEFAULT_BASE_URL)
}
}
/// Contract artifact from JSON
#[derive(Debug)]
pub struct ContractArtifact {
/// Name of the contract
pub name: String,
/// Bytecode of the contract
pub deployed_bytecode: Bytes,
}
/// Genesis file structure
#[derive(Debug, Serialize, Deserialize)]
pub struct Genesis {
/// Configuration of the genesis file
pub config: serde_json::Value,
/// Allocations of the genesis file
pub alloc: HashMap<Address, GenesisAccount>,
/// Other fields of the genesis file
#[serde(flatten)]
pub other: HashMap<String, serde_json::Value>,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
/// Account in the genesis file
pub struct GenesisAccount {
/// Code of the account
#[serde(skip_serializing_if = "Option::is_none")]
pub code: Option<String>,
/// Balance of the account
pub balance: String,
/// Nonce of the account
#[serde(skip_serializing_if = "Option::is_none")]
pub nonce: Option<String>,
/// Storage of the account
#[serde(default, skip_serializing_if = "HashMap::is_empty")]
pub storage: HashMap<String, String>,
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/banlist/src/lib.rs | crates/net/banlist/src/lib.rs | //! Support for banning peers.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
type PeerId = alloy_primitives::B512;
use std::{collections::HashMap, net::IpAddr, time::Instant};
/// Determines whether or not the IP is globally routable.
/// Should be replaced with [`IpAddr::is_global`](std::net::IpAddr::is_global) once it is stable.
pub const fn is_global(ip: &IpAddr) -> bool {
if ip.is_unspecified() || ip.is_loopback() {
return false
}
match ip {
IpAddr::V4(ip) => !ip.is_private() && !ip.is_link_local(),
IpAddr::V6(_) => true,
}
}
/// Stores peers that should be taken out of circulation either indefinitely or until a certain
/// timestamp
#[derive(Debug, Clone, Default, PartialEq, Eq)]
pub struct BanList {
/// A set of IPs whose packets get dropped instantly.
banned_ips: HashMap<IpAddr, Option<Instant>>,
/// A set of [`PeerId`] whose packets get dropped instantly.
banned_peers: HashMap<PeerId, Option<Instant>>,
}
impl BanList {
/// Creates a new ban list that bans the given peers and ips indefinitely.
pub fn new(
banned_peers: impl IntoIterator<Item = PeerId>,
banned_ips: impl IntoIterator<Item = IpAddr>,
) -> Self {
Self::new_with_timeout(
banned_peers.into_iter().map(|peer| (peer, None)).collect(),
banned_ips.into_iter().map(|ip| (ip, None)).collect(),
)
}
/// Creates a new ban list that bans the given peers and ips with an optional timeout.
pub const fn new_with_timeout(
banned_peers: HashMap<PeerId, Option<Instant>>,
banned_ips: HashMap<IpAddr, Option<Instant>>,
) -> Self {
Self { banned_ips, banned_peers }
}
/// Removes all peers that are no longer banned.
pub fn evict_peers(&mut self, now: Instant) -> Vec<PeerId> {
let mut evicted = Vec::new();
self.banned_peers.retain(|peer, until| {
if let Some(until) = until {
if now > *until {
evicted.push(*peer);
return false
}
}
true
});
evicted
}
/// Removes all ip addresses that are no longer banned.
pub fn evict_ips(&mut self, now: Instant) -> Vec<IpAddr> {
let mut evicted = Vec::new();
self.banned_ips.retain(|peer, until| {
if let Some(until) = until {
if now > *until {
evicted.push(*peer);
return false
}
}
true
});
evicted
}
/// Removes all entries that should no longer be banned.
///
/// Returns the evicted entries.
pub fn evict(&mut self, now: Instant) -> (Vec<IpAddr>, Vec<PeerId>) {
let ips = self.evict_ips(now);
let peers = self.evict_peers(now);
(ips, peers)
}
/// Returns true if either the given peer id _or_ ip address is banned.
#[inline]
pub fn is_banned(&self, peer_id: &PeerId, ip: &IpAddr) -> bool {
self.is_banned_peer(peer_id) || self.is_banned_ip(ip)
}
/// checks the ban list to see if it contains the given ip
#[inline]
pub fn is_banned_ip(&self, ip: &IpAddr) -> bool {
self.banned_ips.contains_key(ip)
}
/// checks the ban list to see if it contains the given ip
#[inline]
pub fn is_banned_peer(&self, peer_id: &PeerId) -> bool {
self.banned_peers.contains_key(peer_id)
}
/// Unbans the ip address
pub fn unban_ip(&mut self, ip: &IpAddr) {
self.banned_ips.remove(ip);
}
/// Unbans the ip address
pub fn unban_peer(&mut self, peer_id: &PeerId) {
self.banned_peers.remove(peer_id);
}
/// Bans the IP until the timestamp.
///
/// This does not ban non-global IPs.
pub fn ban_ip_until(&mut self, ip: IpAddr, until: Instant) {
self.ban_ip_with(ip, Some(until));
}
/// Bans the peer until the timestamp
pub fn ban_peer_until(&mut self, node_id: PeerId, until: Instant) {
self.ban_peer_with(node_id, Some(until));
}
/// Bans the IP indefinitely.
///
/// This does not ban non-global IPs.
pub fn ban_ip(&mut self, ip: IpAddr) {
self.ban_ip_with(ip, None);
}
/// Bans the peer indefinitely,
pub fn ban_peer(&mut self, node_id: PeerId) {
self.ban_peer_with(node_id, None);
}
/// Bans the peer indefinitely or until the given timeout.
pub fn ban_peer_with(&mut self, node_id: PeerId, until: Option<Instant>) {
self.banned_peers.insert(node_id, until);
}
/// Bans the ip indefinitely or until the given timeout.
///
/// This does not ban non-global IPs.
pub fn ban_ip_with(&mut self, ip: IpAddr, until: Option<Instant>) {
if is_global(&ip) {
self.banned_ips.insert(ip, until);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn can_ban_unban_peer() {
let peer = PeerId::random();
let mut banlist = BanList::default();
banlist.ban_peer(peer);
assert!(banlist.is_banned_peer(&peer));
banlist.unban_peer(&peer);
assert!(!banlist.is_banned_peer(&peer));
}
#[test]
fn can_ban_unban_ip() {
let ip = IpAddr::from([1, 1, 1, 1]);
let mut banlist = BanList::default();
banlist.ban_ip(ip);
assert!(banlist.is_banned_ip(&ip));
banlist.unban_ip(&ip);
assert!(!banlist.is_banned_ip(&ip));
}
#[test]
fn cannot_ban_non_global() {
let mut ip = IpAddr::from([0, 0, 0, 0]);
let mut banlist = BanList::default();
banlist.ban_ip(ip);
assert!(!banlist.is_banned_ip(&ip));
ip = IpAddr::from([10, 0, 0, 0]);
banlist.ban_ip(ip);
assert!(!banlist.is_banned_ip(&ip));
ip = IpAddr::from([127, 0, 0, 0]);
banlist.ban_ip(ip);
assert!(!banlist.is_banned_ip(&ip));
ip = IpAddr::from([172, 17, 0, 0]);
banlist.ban_ip(ip);
assert!(!banlist.is_banned_ip(&ip));
ip = IpAddr::from([172, 16, 0, 0]);
banlist.ban_ip(ip);
assert!(!banlist.is_banned_ip(&ip));
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network-types/src/backoff.rs | crates/net/network-types/src/backoff.rs | /// Describes the type of backoff should be applied.
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum BackoffKind {
/// Use the lowest configured backoff duration.
///
/// This applies to connection problems where there is a chance that they will be resolved
/// after the short duration.
Low,
/// Use a slightly higher duration to put a peer in timeout
///
/// This applies to more severe connection problems where there is a lower chance that they
/// will be resolved.
Medium,
/// Use the max configured backoff duration.
///
/// This is intended for spammers, or bad peers in general.
High,
}
// === impl BackoffKind ===
impl BackoffKind {
/// Returns true if the backoff is considered severe.
pub const fn is_severe(&self) -> bool {
matches!(self, Self::Medium | Self::High)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network-types/src/lib.rs | crates/net/network-types/src/lib.rs | //! Commonly used networking types.
//!
//! ## Feature Flags
//!
//! - `serde` (default): Enable serde support
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
/// Types related to peering.
pub mod peers;
pub mod session;
/// [`BackoffKind`] definition.
mod backoff;
pub use peers::reputation::{Reputation, ReputationChangeKind, ReputationChangeWeights};
pub use backoff::BackoffKind;
pub use peers::{
addr::PeerAddr,
kind::PeerKind,
reputation::{
is_banned_reputation, is_connection_failed_reputation, ReputationChangeOutcome,
DEFAULT_REPUTATION,
},
state::PeerConnectionState,
ConnectionsConfig, Peer, PeersConfig,
};
pub use session::{SessionLimits, SessionsConfig};
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network-types/src/session/config.rs | crates/net/network-types/src/session/config.rs | //! Configuration types for peer sessions manager.
use crate::peers::config::{DEFAULT_MAX_COUNT_PEERS_INBOUND, DEFAULT_MAX_COUNT_PEERS_OUTBOUND};
use std::time::Duration;
/// Default request timeout for a single request.
///
/// This represents the amount of time we wait for a response until we consider it timed out.
pub const INITIAL_REQUEST_TIMEOUT: Duration = Duration::from_secs(20);
/// Default timeout after which a pending session attempt is considered failed.
pub const PENDING_SESSION_TIMEOUT: Duration = Duration::from_secs(20);
/// Default timeout after which we'll consider the peer to be in violation of the protocol.
///
/// This is the time a peer has to answer a response.
pub const PROTOCOL_BREACH_REQUEST_TIMEOUT: Duration = Duration::from_secs(2 * 60);
/// The default maximum number of peers.
const DEFAULT_MAX_PEERS: usize =
DEFAULT_MAX_COUNT_PEERS_OUTBOUND as usize + DEFAULT_MAX_COUNT_PEERS_INBOUND as usize;
/// The default session event buffer size.
///
/// The actual capacity of the event channel will be `buffer + num sessions`.
/// With maxed out peers, this will allow for 3 messages per session (average)
const DEFAULT_SESSION_EVENT_BUFFER_SIZE: usize = DEFAULT_MAX_PEERS * 2;
/// Configuration options for peer session management.
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "serde", serde(default))]
pub struct SessionsConfig {
/// Size of the session command buffer (per session task).
pub session_command_buffer: usize,
/// Size of the session event channel buffer.
pub session_event_buffer: usize,
/// Limits to enforce.
///
/// By default, no limits will be enforced.
pub limits: SessionLimits,
/// The maximum initial time we wait for a response from the peer before we timeout a request
/// _internally_.
pub initial_internal_request_timeout: Duration,
/// The amount of time we continue to wait for a response from the peer, even if we timed it
/// out internally (`initial_internal_request_timeout`). Timeouts are not penalized but the
/// session directly, however if a peer fails to respond at all (within
/// `PROTOCOL_BREACH_REQUEST_TIMEOUT`) this is considered a protocol violation and results in a
/// dropped session.
pub protocol_breach_request_timeout: Duration,
/// The timeout after which a pending session attempt is considered failed.
pub pending_session_timeout: Duration,
}
impl Default for SessionsConfig {
fn default() -> Self {
Self {
// This should be sufficient to slots for handling commands sent to the session task,
// since the manager is the sender.
session_command_buffer: 32,
// This should be greater since the manager is the receiver. The total size will be
// `buffer + num sessions`. Each session can therefore fit at least 1 message in the
// channel. The buffer size is additional capacity. The channel is always drained on
// `poll`.
// The default is twice the maximum number of available slots, if all slots are occupied
// the buffer will have capacity for 3 messages per session (average).
session_event_buffer: DEFAULT_SESSION_EVENT_BUFFER_SIZE,
limits: Default::default(),
initial_internal_request_timeout: INITIAL_REQUEST_TIMEOUT,
protocol_breach_request_timeout: PROTOCOL_BREACH_REQUEST_TIMEOUT,
pending_session_timeout: PENDING_SESSION_TIMEOUT,
}
}
}
impl SessionsConfig {
/// Sets the buffer size for the bounded communication channel between the manager and its
/// sessions for events emitted by the sessions.
///
/// It is expected, that the background session task will stall if they outpace the manager. The
/// buffer size provides backpressure on the network I/O.
pub const fn with_session_event_buffer(mut self, n: usize) -> Self {
self.session_event_buffer = n;
self
}
/// Helper function to set the buffer size for the bounded communication channel between the
/// manager and its sessions for events emitted by the sessions.
///
/// This scales the buffer size based on the configured number of peers, where the base line is
/// the default buffer size.
///
/// If the number of peers is greater than the default, the buffer size will be scaled up to
/// match the default `buffer size / max peers` ratio.
///
/// Note: This is capped at 10 times the default buffer size.
pub fn with_upscaled_event_buffer(mut self, num_peers: usize) -> Self {
if num_peers > DEFAULT_MAX_PEERS {
self.session_event_buffer = (num_peers * 2).min(DEFAULT_SESSION_EVENT_BUFFER_SIZE * 10);
}
self
}
}
/// Limits for sessions.
///
/// By default, no session limits will be enforced
#[derive(Debug, Clone, Default, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct SessionLimits {
/// Maximum allowed inbound connections.
pub max_pending_inbound: Option<u32>,
/// Maximum allowed outbound connections.
pub max_pending_outbound: Option<u32>,
/// Maximum allowed established inbound connections.
pub max_established_inbound: Option<u32>,
/// Maximum allowed established outbound connections.
pub max_established_outbound: Option<u32>,
}
impl SessionLimits {
/// Sets the maximum number of pending incoming sessions.
pub const fn with_max_pending_inbound(mut self, limit: u32) -> Self {
self.max_pending_inbound = Some(limit);
self
}
/// Sets the maximum number of pending outbound sessions.
pub const fn with_max_pending_outbound(mut self, limit: u32) -> Self {
self.max_pending_outbound = Some(limit);
self
}
/// Sets the maximum number of active inbound sessions.
pub const fn with_max_established_inbound(mut self, limit: u32) -> Self {
self.max_established_inbound = Some(limit);
self
}
/// Sets the maximum number of active outbound sessions.
pub const fn with_max_established_outbound(mut self, limit: u32) -> Self {
self.max_established_outbound = Some(limit);
self
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn scale_session_event_buffer() {
let config = SessionsConfig::default().with_upscaled_event_buffer(10);
assert_eq!(config.session_event_buffer, DEFAULT_SESSION_EVENT_BUFFER_SIZE);
let default_ration = config.session_event_buffer / DEFAULT_MAX_PEERS;
let config = SessionsConfig::default().with_upscaled_event_buffer(DEFAULT_MAX_PEERS * 2);
let expected_ration = config.session_event_buffer / (DEFAULT_MAX_PEERS * 2);
assert_eq!(default_ration, expected_ration);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network-types/src/session/mod.rs | crates/net/network-types/src/session/mod.rs | //! Peer sessions configuration.
pub mod config;
pub use config::{SessionLimits, SessionsConfig};
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network-types/src/peers/config.rs | crates/net/network-types/src/peers/config.rs | //! Configuration for peering.
use std::{
collections::HashSet,
io::{self, ErrorKind},
path::Path,
time::Duration,
};
use reth_net_banlist::BanList;
use reth_network_peers::{NodeRecord, TrustedPeer};
use tracing::info;
use crate::{BackoffKind, ReputationChangeWeights};
/// Maximum number of available slots for outbound sessions.
pub const DEFAULT_MAX_COUNT_PEERS_OUTBOUND: u32 = 100;
/// Maximum number of available slots for inbound sessions.
pub const DEFAULT_MAX_COUNT_PEERS_INBOUND: u32 = 30;
/// Maximum number of available slots for concurrent outgoing dials.
///
/// This restricts how many outbound dials can be performed concurrently.
pub const DEFAULT_MAX_COUNT_CONCURRENT_OUTBOUND_DIALS: usize = 15;
/// A temporary timeout for ips on incoming connection attempts.
pub const INBOUND_IP_THROTTLE_DURATION: Duration = Duration::from_secs(30);
/// The durations to use when a backoff should be applied to a peer.
///
/// See also [`BackoffKind`].
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct PeerBackoffDurations {
/// Applies to connection problems where there is a chance that they will be resolved after the
/// short duration.
#[cfg_attr(feature = "serde", serde(with = "humantime_serde"))]
pub low: Duration,
/// Applies to more severe connection problems where there is a lower chance that they will be
/// resolved.
#[cfg_attr(feature = "serde", serde(with = "humantime_serde"))]
pub medium: Duration,
/// Intended for spammers, or bad peers in general.
#[cfg_attr(feature = "serde", serde(with = "humantime_serde"))]
pub high: Duration,
/// Maximum total backoff duration.
#[cfg_attr(feature = "serde", serde(with = "humantime_serde"))]
pub max: Duration,
}
impl PeerBackoffDurations {
/// Returns the corresponding [`Duration`]
pub const fn backoff(&self, kind: BackoffKind) -> Duration {
match kind {
BackoffKind::Low => self.low,
BackoffKind::Medium => self.medium,
BackoffKind::High => self.high,
}
}
/// Returns the timestamp until which we should backoff.
///
/// The Backoff duration is capped by the configured maximum backoff duration.
pub fn backoff_until(&self, kind: BackoffKind, backoff_counter: u8) -> std::time::Instant {
let backoff_time = self.backoff(kind);
let backoff_time = backoff_time + backoff_time * backoff_counter as u32;
let now = std::time::Instant::now();
now + backoff_time.min(self.max)
}
/// Returns durations for testing.
#[cfg(any(test, feature = "test-utils"))]
pub const fn test() -> Self {
Self {
low: Duration::from_millis(200),
medium: Duration::from_millis(200),
high: Duration::from_millis(200),
max: Duration::from_millis(200),
}
}
}
impl Default for PeerBackoffDurations {
fn default() -> Self {
Self {
low: Duration::from_secs(30),
// 3min
medium: Duration::from_secs(60 * 3),
// 15min
high: Duration::from_secs(60 * 15),
// 1h
max: Duration::from_secs(60 * 60),
}
}
}
/// Tracks stats about connected nodes
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize), serde(default))]
pub struct ConnectionsConfig {
/// Maximum allowed outbound connections.
pub max_outbound: usize,
/// Maximum allowed inbound connections.
pub max_inbound: usize,
/// Maximum allowed concurrent outbound dials.
#[cfg_attr(feature = "serde", serde(default))]
pub max_concurrent_outbound_dials: usize,
}
impl Default for ConnectionsConfig {
fn default() -> Self {
Self {
max_outbound: DEFAULT_MAX_COUNT_PEERS_OUTBOUND as usize,
max_inbound: DEFAULT_MAX_COUNT_PEERS_INBOUND as usize,
max_concurrent_outbound_dials: DEFAULT_MAX_COUNT_CONCURRENT_OUTBOUND_DIALS,
}
}
}
/// Config type for initiating a `PeersManager` instance.
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "serde", serde(default))]
pub struct PeersConfig {
/// How often to recheck free slots for outbound connections.
#[cfg_attr(feature = "serde", serde(with = "humantime_serde"))]
pub refill_slots_interval: Duration,
/// Trusted nodes to connect to or accept from
pub trusted_nodes: Vec<TrustedPeer>,
/// Connect to or accept from trusted nodes only?
#[cfg_attr(feature = "serde", serde(alias = "connect_trusted_nodes_only"))]
pub trusted_nodes_only: bool,
/// Interval to update trusted nodes DNS resolution
#[cfg_attr(feature = "serde", serde(with = "humantime_serde"))]
pub trusted_nodes_resolution_interval: Duration,
/// Maximum number of backoff attempts before we give up on a peer and dropping.
///
/// The max time spent of a peer before it's removed from the set is determined by the
/// configured backoff duration and the max backoff count.
///
/// With a backoff counter of 5 and a backoff duration of 1h, the minimum time spent of the
/// peer in the table is the sum of all backoffs (1h + 2h + 3h + 4h + 5h = 15h).
///
/// Note: this does not apply to trusted peers.
pub max_backoff_count: u8,
/// Basic nodes to connect to.
#[cfg_attr(feature = "serde", serde(skip))]
pub basic_nodes: HashSet<NodeRecord>,
/// How long to ban bad peers.
#[cfg_attr(feature = "serde", serde(with = "humantime_serde"))]
pub ban_duration: Duration,
/// Restrictions on `PeerIds` and Ips.
#[cfg_attr(feature = "serde", serde(skip))]
pub ban_list: BanList,
/// Restrictions on connections.
pub connection_info: ConnectionsConfig,
/// How to weigh reputation changes.
pub reputation_weights: ReputationChangeWeights,
/// How long to backoff peers that we are failed to connect to for non-fatal reasons.
///
/// The backoff duration increases with number of backoff attempts.
pub backoff_durations: PeerBackoffDurations,
/// How long to temporarily ban ips on incoming connection attempts.
///
/// This acts as an IP based rate limit.
#[cfg_attr(feature = "serde", serde(default, with = "humantime_serde"))]
pub incoming_ip_throttle_duration: Duration,
}
impl Default for PeersConfig {
fn default() -> Self {
Self {
refill_slots_interval: Duration::from_millis(5_000),
connection_info: Default::default(),
reputation_weights: Default::default(),
ban_list: Default::default(),
// Ban peers for 12h
ban_duration: Duration::from_secs(60 * 60 * 12),
backoff_durations: Default::default(),
trusted_nodes: Default::default(),
trusted_nodes_only: false,
trusted_nodes_resolution_interval: Duration::from_secs(60 * 60),
basic_nodes: Default::default(),
max_backoff_count: 5,
incoming_ip_throttle_duration: INBOUND_IP_THROTTLE_DURATION,
}
}
}
impl PeersConfig {
/// A set of `peer_ids` and ip addr that we want to never connect to
pub fn with_ban_list(mut self, ban_list: BanList) -> Self {
self.ban_list = ban_list;
self
}
/// Configure how long to ban bad peers
pub const fn with_ban_duration(mut self, ban_duration: Duration) -> Self {
self.ban_duration = ban_duration;
self
}
/// Configure how long to refill outbound slots
pub const fn with_refill_slots_interval(mut self, interval: Duration) -> Self {
self.refill_slots_interval = interval;
self
}
/// Maximum allowed outbound connections.
pub const fn with_max_outbound(mut self, max_outbound: usize) -> Self {
self.connection_info.max_outbound = max_outbound;
self
}
/// Maximum allowed inbound connections with optional update.
pub const fn with_max_inbound_opt(mut self, max_inbound: Option<usize>) -> Self {
if let Some(max_inbound) = max_inbound {
self.connection_info.max_inbound = max_inbound;
}
self
}
/// Maximum allowed outbound connections with optional update.
pub const fn with_max_outbound_opt(mut self, max_outbound: Option<usize>) -> Self {
if let Some(max_outbound) = max_outbound {
self.connection_info.max_outbound = max_outbound;
}
self
}
/// Maximum allowed inbound connections.
pub const fn with_max_inbound(mut self, max_inbound: usize) -> Self {
self.connection_info.max_inbound = max_inbound;
self
}
/// Maximum allowed concurrent outbound dials.
pub const fn with_max_concurrent_dials(mut self, max_concurrent_outbound_dials: usize) -> Self {
self.connection_info.max_concurrent_outbound_dials = max_concurrent_outbound_dials;
self
}
/// Nodes to always connect to.
pub fn with_trusted_nodes(mut self, nodes: Vec<TrustedPeer>) -> Self {
self.trusted_nodes = nodes;
self
}
/// Connect only to trusted nodes.
pub const fn with_trusted_nodes_only(mut self, trusted_only: bool) -> Self {
self.trusted_nodes_only = trusted_only;
self
}
/// Nodes available at launch.
pub fn with_basic_nodes(mut self, nodes: HashSet<NodeRecord>) -> Self {
self.basic_nodes = nodes;
self
}
/// Configures the max allowed backoff count.
pub const fn with_max_backoff_count(mut self, max_backoff_count: u8) -> Self {
self.max_backoff_count = max_backoff_count;
self
}
/// Configures how to weigh reputation changes.
pub const fn with_reputation_weights(
mut self,
reputation_weights: ReputationChangeWeights,
) -> Self {
self.reputation_weights = reputation_weights;
self
}
/// Configures how long to backoff peers that are we failed to connect to for non-fatal reasons
pub const fn with_backoff_durations(mut self, backoff_durations: PeerBackoffDurations) -> Self {
self.backoff_durations = backoff_durations;
self
}
/// Returns the maximum number of peers, inbound and outbound.
pub const fn max_peers(&self) -> usize {
self.connection_info.max_outbound + self.connection_info.max_inbound
}
/// Read from file nodes available at launch. Ignored if None.
pub fn with_basic_nodes_from_file(
self,
optional_file: Option<impl AsRef<Path>>,
) -> Result<Self, io::Error> {
let Some(file_path) = optional_file else { return Ok(self) };
let reader = match std::fs::File::open(file_path.as_ref()) {
Ok(file) => io::BufReader::new(file),
Err(e) if e.kind() == ErrorKind::NotFound => return Ok(self),
Err(e) => Err(e)?,
};
info!(target: "net::peers", file = %file_path.as_ref().display(), "Loading saved peers");
let nodes: HashSet<NodeRecord> = serde_json::from_reader(reader)?;
Ok(self.with_basic_nodes(nodes))
}
/// Returns settings for testing
#[cfg(any(test, feature = "test-utils"))]
pub fn test() -> Self {
Self {
refill_slots_interval: Duration::from_millis(100),
backoff_durations: PeerBackoffDurations::test(),
ban_duration: Duration::from_millis(200),
..Default::default()
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network-types/src/peers/state.rs | crates/net/network-types/src/peers/state.rs | //! State of connection to a peer.
/// Represents the kind of connection established to the peer, if any
#[derive(Debug, Clone, Copy, Default, Eq, PartialEq)]
pub enum PeerConnectionState {
/// Not connected currently.
#[default]
Idle,
/// Disconnect of an incoming connection in progress
DisconnectingIn,
/// Disconnect of an outgoing connection in progress
DisconnectingOut,
/// Connected via incoming connection.
In,
/// Connected via outgoing connection.
Out,
/// Pending outgoing connection.
PendingOut,
}
// === impl PeerConnectionState ===
impl PeerConnectionState {
/// Sets the disconnect state
#[inline]
pub const fn disconnect(&mut self) {
match self {
Self::In => *self = Self::DisconnectingIn,
Self::Out => *self = Self::DisconnectingOut,
_ => {}
}
}
/// Returns true if this is the idle state.
#[inline]
pub const fn is_idle(&self) -> bool {
matches!(self, Self::Idle)
}
/// Returns true if this is an active incoming connection.
#[inline]
pub const fn is_incoming(&self) -> bool {
matches!(self, Self::In)
}
/// Returns whether we're currently connected with this peer
#[inline]
pub const fn is_connected(&self) -> bool {
matches!(self, Self::In | Self::Out | Self::PendingOut)
}
/// Returns if there's currently no connection to that peer.
#[inline]
pub const fn is_unconnected(&self) -> bool {
matches!(self, Self::Idle)
}
/// Returns true if there's currently an outbound dial to that peer.
#[inline]
pub const fn is_pending_out(&self) -> bool {
matches!(self, Self::PendingOut)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network-types/src/peers/kind.rs | crates/net/network-types/src/peers/kind.rs | //! Classification of a peer based on trust.
/// Represents the kind of peer
#[derive(Debug, Clone, Copy, Default, Eq, PartialEq)]
pub enum PeerKind {
/// Basic peer kind.
#[default]
Basic,
/// Static peer, added via JSON-RPC.
Static,
/// Trusted peer.
Trusted,
}
impl PeerKind {
/// Returns `true` if the peer is trusted.
pub const fn is_trusted(&self) -> bool {
matches!(self, Self::Trusted)
}
/// Returns `true` if the peer is static.
pub const fn is_static(&self) -> bool {
matches!(self, Self::Static)
}
/// Returns `true` if the peer is basic.
pub const fn is_basic(&self) -> bool {
matches!(self, Self::Basic)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network-types/src/peers/mod.rs | crates/net/network-types/src/peers/mod.rs | pub mod addr;
pub mod config;
pub mod kind;
pub mod reputation;
pub mod state;
pub use config::{ConnectionsConfig, PeersConfig};
pub use reputation::{Reputation, ReputationChange, ReputationChangeKind, ReputationChangeWeights};
use alloy_eip2124::ForkId;
use tracing::trace;
use crate::{
is_banned_reputation, PeerAddr, PeerConnectionState, PeerKind, ReputationChangeOutcome,
DEFAULT_REPUTATION,
};
/// Tracks info about a single peer.
#[derive(Debug, Clone)]
pub struct Peer {
/// Where to reach the peer.
pub addr: PeerAddr,
/// Reputation of the peer.
pub reputation: i32,
/// The state of the connection, if any.
pub state: PeerConnectionState,
/// The [`ForkId`] that the peer announced via discovery.
pub fork_id: Option<ForkId>,
/// Whether the entry should be removed after an existing session was terminated.
pub remove_after_disconnect: bool,
/// The kind of peer
pub kind: PeerKind,
/// Whether the peer is currently backed off.
pub backed_off: bool,
/// Counts number of times the peer was backed off due to a severe
/// [`BackoffKind`](crate::BackoffKind).
pub severe_backoff_counter: u8,
}
// === impl Peer ===
impl Peer {
/// Returns a new peer for given [`PeerAddr`].
pub fn new(addr: PeerAddr) -> Self {
Self::with_state(addr, Default::default())
}
/// Returns a new trusted peer for given [`PeerAddr`].
pub fn trusted(addr: PeerAddr) -> Self {
Self { kind: PeerKind::Trusted, ..Self::new(addr) }
}
/// Returns the reputation of the peer
pub const fn reputation(&self) -> i32 {
self.reputation
}
/// Returns a new peer for given [`PeerAddr`] and [`PeerConnectionState`].
pub fn with_state(addr: PeerAddr, state: PeerConnectionState) -> Self {
Self {
addr,
state,
reputation: DEFAULT_REPUTATION,
fork_id: None,
remove_after_disconnect: false,
kind: Default::default(),
backed_off: false,
severe_backoff_counter: 0,
}
}
/// Returns a new peer for given [`PeerAddr`] and [`PeerKind`].
pub fn with_kind(addr: PeerAddr, kind: PeerKind) -> Self {
Self { kind, ..Self::new(addr) }
}
/// Resets the reputation of the peer to the default value. This always returns
/// [`ReputationChangeOutcome::None`].
pub const fn reset_reputation(&mut self) -> ReputationChangeOutcome {
self.reputation = DEFAULT_REPUTATION;
ReputationChangeOutcome::None
}
/// Applies a reputation change to the peer and returns what action should be taken.
pub fn apply_reputation(
&mut self,
reputation: i32,
kind: ReputationChangeKind,
) -> ReputationChangeOutcome {
let previous = self.reputation;
// we add reputation since negative reputation change decrease total reputation
self.reputation = previous.saturating_add(reputation);
trace!(target: "net::peers", reputation=%self.reputation, banned=%self.is_banned(), ?kind, "applied reputation change");
if self.state.is_connected() && self.is_banned() {
self.state.disconnect();
return ReputationChangeOutcome::DisconnectAndBan
}
if self.is_banned() && !is_banned_reputation(previous) {
return ReputationChangeOutcome::Ban
}
if !self.is_banned() && is_banned_reputation(previous) {
return ReputationChangeOutcome::Unban
}
ReputationChangeOutcome::None
}
/// Returns true if the peer's reputation is below the banned threshold.
#[inline]
pub const fn is_banned(&self) -> bool {
is_banned_reputation(self.reputation)
}
/// Returns `true` if peer is banned.
#[inline]
pub const fn is_backed_off(&self) -> bool {
self.backed_off
}
/// Unbans the peer by resetting its reputation
#[inline]
pub const fn unban(&mut self) {
self.reputation = DEFAULT_REPUTATION
}
/// Returns whether this peer is trusted
#[inline]
pub const fn is_trusted(&self) -> bool {
matches!(self.kind, PeerKind::Trusted)
}
/// Returns whether this peer is static
#[inline]
pub const fn is_static(&self) -> bool {
matches!(self.kind, PeerKind::Static)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network-types/src/peers/addr.rs | crates/net/network-types/src/peers/addr.rs | //! `RLPx` (TCP) and `Discovery` (UDP) sockets of a peer.
use std::net::{IpAddr, SocketAddr};
/// Represents a peer's address information.
///
/// # Fields
///
/// - `tcp`: A `SocketAddr` representing the peer's data transfer address.
/// - `udp`: An optional `SocketAddr` representing the peer's discover address. `None` if the peer
/// is directly connecting to us or the port is the same to `tcp`'s
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub struct PeerAddr {
tcp: SocketAddr,
udp: Option<SocketAddr>,
}
impl PeerAddr {
/// Returns the peer's TCP address.
pub const fn tcp(&self) -> SocketAddr {
self.tcp
}
/// Returns the peer's UDP address.
pub const fn udp(&self) -> Option<SocketAddr> {
self.udp
}
/// Returns a new `PeerAddr` with the given `tcp` and `udp` addresses.
pub const fn new(tcp: SocketAddr, udp: Option<SocketAddr>) -> Self {
Self { tcp, udp }
}
/// Returns a new `PeerAddr` with a `tcp` address only.
pub const fn from_tcp(tcp: SocketAddr) -> Self {
Self { tcp, udp: None }
}
/// Returns a new `PeerAddr` with the given `tcp` and `udp` ports.
pub fn new_with_ports(ip: IpAddr, tcp_port: u16, udp_port: Option<u16>) -> Self {
let tcp = SocketAddr::new(ip, tcp_port);
let udp = udp_port.map(|port| SocketAddr::new(ip, port));
Self::new(tcp, udp)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network-types/src/peers/reputation.rs | crates/net/network-types/src/peers/reputation.rs | //! Peer reputation management
/// The default reputation of a peer
pub const DEFAULT_REPUTATION: Reputation = 0;
/// The minimal unit we're measuring reputation
const REPUTATION_UNIT: i32 = -1024;
/// The reputation value below which new connection from/to peers are rejected.
pub const BANNED_REPUTATION: i32 = 50 * REPUTATION_UNIT;
/// The reputation change to apply to a peer that dropped the connection.
const REMOTE_DISCONNECT_REPUTATION_CHANGE: i32 = 4 * REPUTATION_UNIT;
/// The reputation change to apply to a peer that we failed to connect to.
pub const FAILED_TO_CONNECT_REPUTATION_CHANGE: i32 = 25 * REPUTATION_UNIT;
/// The reputation change to apply to a peer that failed to respond in time.
const TIMEOUT_REPUTATION_CHANGE: i32 = 4 * REPUTATION_UNIT;
/// The reputation change to apply to a peer that sent a bad message.
const BAD_MESSAGE_REPUTATION_CHANGE: i32 = 16 * REPUTATION_UNIT;
/// The reputation change applies to a peer that has sent a transaction (full or hash) that we
/// already know about and have already previously received from that peer.
///
/// Note: this appears to be quite common in practice, so by default this is 0, which doesn't
/// apply any changes to the peer's reputation, effectively ignoring it.
const ALREADY_SEEN_TRANSACTION_REPUTATION_CHANGE: i32 = 0;
/// The reputation change to apply to a peer which violates protocol rules: minimal reputation
const BAD_PROTOCOL_REPUTATION_CHANGE: i32 = i32::MIN;
/// The reputation change to apply to a peer that sent a bad announcement.
// todo: current value is a hint, needs to be set properly
const BAD_ANNOUNCEMENT_REPUTATION_CHANGE: i32 = REPUTATION_UNIT;
/// The maximum reputation change that can be applied to a trusted peer.
/// This is used to prevent a single bad message from a trusted peer to cause a significant change.
/// This gives a trusted peer more leeway when interacting with the node, which is useful for in
/// custom setups. By not setting this to `0` we still allow trusted peer penalization but less than
/// untrusted peers.
pub const MAX_TRUSTED_PEER_REPUTATION_CHANGE: Reputation = 2 * REPUTATION_UNIT;
/// Returns `true` if the given reputation is below the [`BANNED_REPUTATION`] threshold
#[inline]
pub const fn is_banned_reputation(reputation: i32) -> bool {
reputation < BANNED_REPUTATION
}
/// Returns `true` if the given reputation is below the [`FAILED_TO_CONNECT_REPUTATION_CHANGE`]
/// threshold
#[inline]
pub const fn is_connection_failed_reputation(reputation: i32) -> bool {
reputation < FAILED_TO_CONNECT_REPUTATION_CHANGE
}
/// The type that tracks the reputation score.
pub type Reputation = i32;
/// Various kinds of reputation changes.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum ReputationChangeKind {
/// Received an unspecific bad message from the peer
BadMessage,
/// Peer sent a bad block.
///
/// Note: this will we only used in pre-merge, pow consensus, since after no more block announcements are sent via devp2p: [EIP-3675](https://eips.ethereum.org/EIPS/eip-3675#devp2p)
BadBlock,
/// Peer sent a bad transaction message. E.g. Transactions which weren't recoverable.
BadTransactions,
/// Peer sent a bad announcement message, e.g. invalid transaction type for the configured
/// network.
BadAnnouncement,
/// Peer sent a message that included a hash or transaction that we already received from the
/// peer.
///
/// According to the [Eth spec](https://github.com/ethereum/devp2p/blob/master/caps/eth.md):
///
/// > A node should never send a transaction back to a peer that it can determine already knows
/// > of it (either because it was previously sent or because it was informed from this peer
/// > originally). This is usually achieved by remembering a set of transaction hashes recently
/// > relayed by the peer.
AlreadySeenTransaction,
/// Peer failed to respond in time.
Timeout,
/// Peer does not adhere to network protocol rules.
BadProtocol,
/// Failed to establish a connection to the peer.
FailedToConnect,
/// Connection dropped by peer.
Dropped,
/// Reset the reputation to the default value.
Reset,
/// Apply a reputation change by value
Other(Reputation),
}
impl ReputationChangeKind {
/// Returns true if the reputation change is a [`ReputationChangeKind::Reset`].
pub const fn is_reset(&self) -> bool {
matches!(self, Self::Reset)
}
/// Returns true if the reputation change is [`ReputationChangeKind::Dropped`].
pub const fn is_dropped(&self) -> bool {
matches!(self, Self::Dropped)
}
}
/// How the [`ReputationChangeKind`] are weighted.
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "serde", serde(default))]
pub struct ReputationChangeWeights {
/// Weight for [`ReputationChangeKind::BadMessage`]
pub bad_message: Reputation,
/// Weight for [`ReputationChangeKind::BadBlock`]
pub bad_block: Reputation,
/// Weight for [`ReputationChangeKind::BadTransactions`]
pub bad_transactions: Reputation,
/// Weight for [`ReputationChangeKind::AlreadySeenTransaction`]
pub already_seen_transactions: Reputation,
/// Weight for [`ReputationChangeKind::Timeout`]
pub timeout: Reputation,
/// Weight for [`ReputationChangeKind::BadProtocol`]
pub bad_protocol: Reputation,
/// Weight for [`ReputationChangeKind::FailedToConnect`]
pub failed_to_connect: Reputation,
/// Weight for [`ReputationChangeKind::Dropped`]
pub dropped: Reputation,
/// Weight for [`ReputationChangeKind::BadAnnouncement`]
pub bad_announcement: Reputation,
}
// === impl ReputationChangeWeights ===
impl ReputationChangeWeights {
/// Creates a new instance that doesn't penalize any kind of reputation change.
pub const fn zero() -> Self {
Self {
bad_block: 0,
bad_transactions: 0,
already_seen_transactions: 0,
bad_message: 0,
timeout: 0,
bad_protocol: 0,
failed_to_connect: 0,
dropped: 0,
bad_announcement: 0,
}
}
/// Returns the quantifiable [`ReputationChange`] for the given [`ReputationChangeKind`] using
/// the configured weights
pub fn change(&self, kind: ReputationChangeKind) -> ReputationChange {
match kind {
ReputationChangeKind::BadMessage => self.bad_message.into(),
ReputationChangeKind::BadBlock => self.bad_block.into(),
ReputationChangeKind::BadTransactions => self.bad_transactions.into(),
ReputationChangeKind::AlreadySeenTransaction => self.already_seen_transactions.into(),
ReputationChangeKind::Timeout => self.timeout.into(),
ReputationChangeKind::BadProtocol => self.bad_protocol.into(),
ReputationChangeKind::FailedToConnect => self.failed_to_connect.into(),
ReputationChangeKind::Dropped => self.dropped.into(),
ReputationChangeKind::Reset => DEFAULT_REPUTATION.into(),
ReputationChangeKind::Other(val) => val.into(),
ReputationChangeKind::BadAnnouncement => self.bad_announcement.into(),
}
}
}
impl Default for ReputationChangeWeights {
fn default() -> Self {
Self {
bad_block: BAD_MESSAGE_REPUTATION_CHANGE,
bad_transactions: BAD_MESSAGE_REPUTATION_CHANGE,
already_seen_transactions: ALREADY_SEEN_TRANSACTION_REPUTATION_CHANGE,
bad_message: BAD_MESSAGE_REPUTATION_CHANGE,
timeout: TIMEOUT_REPUTATION_CHANGE,
bad_protocol: BAD_PROTOCOL_REPUTATION_CHANGE,
failed_to_connect: FAILED_TO_CONNECT_REPUTATION_CHANGE,
dropped: REMOTE_DISCONNECT_REPUTATION_CHANGE,
bad_announcement: BAD_ANNOUNCEMENT_REPUTATION_CHANGE,
}
}
}
/// Represents a change in a peer's reputation.
#[derive(Debug, Copy, Clone, Default)]
pub struct ReputationChange(Reputation);
// === impl ReputationChange ===
impl ReputationChange {
/// Helper type for easier conversion
#[inline]
pub const fn as_i32(self) -> Reputation {
self.0
}
}
impl From<ReputationChange> for Reputation {
fn from(value: ReputationChange) -> Self {
value.0
}
}
impl From<Reputation> for ReputationChange {
fn from(value: Reputation) -> Self {
Self(value)
}
}
/// Outcomes when a reputation change is applied to a peer
#[derive(Debug, Clone, Copy)]
pub enum ReputationChangeOutcome {
/// Nothing to do.
None,
/// Ban the peer.
Ban,
/// Ban and disconnect
DisconnectAndBan,
/// Unban the peer
Unban,
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/downloaders/src/file_client.rs | crates/net/downloaders/src/file_client.rs | use alloy_consensus::BlockHeader;
use alloy_eips::BlockHashOrNumber;
use alloy_primitives::{BlockHash, BlockNumber, Sealable, B256};
use futures::Future;
use itertools::Either;
use reth_consensus::{Consensus, ConsensusError};
use reth_network_p2p::{
bodies::client::{BodiesClient, BodiesFut},
download::DownloadClient,
error::RequestError,
headers::client::{HeadersClient, HeadersDirection, HeadersFut, HeadersRequest},
priority::Priority,
BlockClient,
};
use reth_network_peers::PeerId;
use reth_primitives_traits::{Block, BlockBody, FullBlock, SealedBlock, SealedHeader};
use std::{collections::HashMap, io, ops::RangeInclusive, path::Path, sync::Arc};
use thiserror::Error;
use tokio::{fs::File, io::AsyncReadExt};
use tokio_stream::StreamExt;
use tokio_util::codec::FramedRead;
use tracing::{debug, trace, warn};
use super::file_codec::BlockFileCodec;
use crate::receipt_file_client::FromReceiptReader;
/// Default byte length of chunk to read from chain file.
///
/// Default is 1 GB.
pub const DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE: u64 = 1_000_000_000;
/// Front-end API for fetching chain data from a file.
///
/// Blocks are assumed to be written one after another in a file, as rlp bytes.
///
/// For example, if the file contains 3 blocks, the file is assumed to be encoded as follows:
/// rlp(block1) || rlp(block2) || rlp(block3)
///
/// Blocks are assumed to have populated transactions, so reading headers will also buffer
/// transactions in memory for use in the bodies stage.
///
/// This reads the entire file into memory, so it is not suitable for large files.
#[derive(Debug, Clone)]
pub struct FileClient<B: Block> {
/// The buffered headers retrieved when fetching new bodies.
headers: HashMap<BlockNumber, B::Header>,
/// A mapping between block hash and number.
hash_to_number: HashMap<BlockHash, BlockNumber>,
/// The buffered bodies retrieved when fetching new headers.
bodies: HashMap<BlockHash, B::Body>,
}
/// An error that can occur when constructing and using a [`FileClient`].
#[derive(Debug, Error)]
pub enum FileClientError {
/// An error occurred when validating a header from file.
#[error(transparent)]
Consensus(#[from] ConsensusError),
/// An error occurred when opening or reading the file.
#[error(transparent)]
Io(#[from] std::io::Error),
/// An error occurred when decoding blocks, headers, or rlp headers from the file.
#[error("{0}")]
Rlp(alloy_rlp::Error, Vec<u8>),
/// Custom error message.
#[error("{0}")]
Custom(&'static str),
}
impl From<&'static str> for FileClientError {
fn from(value: &'static str) -> Self {
Self::Custom(value)
}
}
impl<B: FullBlock> FileClient<B> {
/// Create a new file client from a file path.
pub async fn new<P: AsRef<Path>>(
path: P,
consensus: Arc<dyn Consensus<B, Error = ConsensusError>>,
) -> Result<Self, FileClientError> {
let file = File::open(path).await?;
Self::from_file(file, consensus).await
}
/// Initialize the [`FileClient`] with a file directly.
pub(crate) async fn from_file(
mut file: File,
consensus: Arc<dyn Consensus<B, Error = ConsensusError>>,
) -> Result<Self, FileClientError> {
// get file len from metadata before reading
let metadata = file.metadata().await?;
let file_len = metadata.len();
let mut reader = vec![];
file.read_to_end(&mut reader).await?;
Ok(FileClientBuilder { consensus, parent_header: None }
.build(&reader[..], file_len)
.await?
.file_client)
}
/// Get the tip hash of the chain.
pub fn tip(&self) -> Option<B256> {
self.headers.get(&self.max_block()?).map(|h| h.hash_slow())
}
/// Get the start hash of the chain.
pub fn start(&self) -> Option<B256> {
self.headers.get(&self.min_block()?).map(|h| h.hash_slow())
}
/// Returns the highest block number of this client has or `None` if empty
pub fn max_block(&self) -> Option<u64> {
self.headers.keys().max().copied()
}
/// Returns the lowest block number of this client has or `None` if empty
pub fn min_block(&self) -> Option<u64> {
self.headers.keys().min().copied()
}
/// Clones and returns the highest header of this client has or `None` if empty. Seals header
/// before returning.
pub fn tip_header(&self) -> Option<SealedHeader<B::Header>> {
self.headers.get(&self.max_block()?).map(|h| SealedHeader::seal_slow(h.clone()))
}
/// Returns true if all blocks are canonical (no gaps)
pub fn has_canonical_blocks(&self) -> bool {
if self.headers.is_empty() {
return true
}
let mut nums = self.headers.keys().copied().collect::<Vec<_>>();
nums.sort_unstable();
let mut iter = nums.into_iter();
let mut lowest = iter.next().expect("not empty");
for next in iter {
if next != lowest + 1 {
return false
}
lowest = next;
}
true
}
/// Use the provided bodies as the file client's block body buffer.
pub fn with_bodies(mut self, bodies: HashMap<BlockHash, B::Body>) -> Self {
self.bodies = bodies;
self
}
/// Use the provided headers as the file client's block body buffer.
pub fn with_headers(mut self, headers: HashMap<BlockNumber, B::Header>) -> Self {
self.headers = headers;
for (number, header) in &self.headers {
self.hash_to_number.insert(header.hash_slow(), *number);
}
self
}
/// Returns the current number of headers in the client.
pub fn headers_len(&self) -> usize {
self.headers.len()
}
/// Returns the current number of bodies in the client.
pub fn bodies_len(&self) -> usize {
self.bodies.len()
}
/// Returns an iterator over headers in the client.
pub fn headers_iter(&self) -> impl Iterator<Item = &B::Header> {
self.headers.values()
}
/// Returns a mutable iterator over bodies in the client.
///
/// Panics, if file client headers and bodies are not mapping 1-1.
pub fn bodies_iter_mut(&mut self) -> impl Iterator<Item = (u64, &mut B::Body)> {
let bodies = &mut self.bodies;
let numbers = &self.hash_to_number;
bodies.iter_mut().map(|(hash, body)| (numbers[hash], body))
}
/// Returns the current number of transactions in the client.
pub fn total_transactions(&self) -> usize {
self.bodies.iter().fold(0, |acc, (_, body)| acc + body.transactions().len())
}
}
struct FileClientBuilder<B: Block> {
pub consensus: Arc<dyn Consensus<B, Error = ConsensusError>>,
pub parent_header: Option<SealedHeader<B::Header>>,
}
impl<B: FullBlock<Header: reth_primitives_traits::BlockHeader>> FromReader
for FileClientBuilder<B>
{
type Error = FileClientError;
type Output = FileClient<B>;
/// Initialize the [`FileClient`] from bytes that have been read from file.
fn build<R>(
&self,
reader: R,
num_bytes: u64,
) -> impl Future<Output = Result<DecodedFileChunk<Self::Output>, Self::Error>>
where
R: AsyncReadExt + Unpin,
{
let mut headers = HashMap::default();
let mut hash_to_number = HashMap::default();
let mut bodies = HashMap::default();
// use with_capacity to make sure the internal buffer contains the entire chunk
let mut stream =
FramedRead::with_capacity(reader, BlockFileCodec::<B>::default(), num_bytes as usize);
trace!(target: "downloaders::file",
target_num_bytes=num_bytes,
capacity=stream.read_buffer().capacity(),
"init decode stream"
);
let mut remaining_bytes = vec![];
let mut log_interval = 0;
let mut log_interval_start_block = 0;
let mut parent_header = self.parent_header.clone();
async move {
while let Some(block_res) = stream.next().await {
let block = match block_res {
Ok(block) => block,
Err(FileClientError::Rlp(err, bytes)) => {
trace!(target: "downloaders::file",
%err,
bytes_len=bytes.len(),
"partial block returned from decoding chunk"
);
remaining_bytes = bytes;
break
}
Err(err) => return Err(err),
};
let block = SealedBlock::seal_slow(block);
// Validate standalone header
self.consensus.validate_header(block.sealed_header())?;
if let Some(parent) = &parent_header {
self.consensus.validate_header_against_parent(block.sealed_header(), parent)?;
parent_header = Some(block.sealed_header().clone());
}
// Validate block against header
self.consensus.validate_block_pre_execution(&block)?;
// add to the internal maps
let block_hash = block.hash();
let block_number = block.number();
let (header, body) = block.split_sealed_header_body();
headers.insert(block_number, header.unseal());
hash_to_number.insert(block_hash, block_number);
bodies.insert(block_hash, body);
if log_interval == 0 {
trace!(target: "downloaders::file",
block_number,
"read first block"
);
log_interval_start_block = block_number;
} else if log_interval % 100_000 == 0 {
trace!(target: "downloaders::file",
blocks=?log_interval_start_block..=block_number,
"read blocks from file"
);
log_interval_start_block = block_number + 1;
}
log_interval += 1;
}
trace!(target: "downloaders::file", blocks = headers.len(), "Initialized file client");
Ok(DecodedFileChunk {
file_client: FileClient { headers, hash_to_number, bodies },
remaining_bytes,
highest_block: None,
})
}
}
}
impl<B: FullBlock> HeadersClient for FileClient<B> {
type Header = B::Header;
type Output = HeadersFut<B::Header>;
fn get_headers_with_priority(
&self,
request: HeadersRequest,
_priority: Priority,
) -> Self::Output {
// this just searches the buffer, and fails if it can't find the header
let mut headers = Vec::new();
trace!(target: "downloaders::file", request=?request, "Getting headers");
let start_num = match request.start {
BlockHashOrNumber::Hash(hash) => match self.hash_to_number.get(&hash) {
Some(num) => *num,
None => {
warn!(%hash, "Could not find starting block number for requested header hash");
return Box::pin(async move { Err(RequestError::BadResponse) })
}
},
BlockHashOrNumber::Number(num) => num,
};
let range = if request.limit == 1 {
Either::Left(start_num..start_num + 1)
} else {
match request.direction {
HeadersDirection::Rising => Either::Left(start_num..start_num + request.limit),
HeadersDirection::Falling => {
Either::Right((start_num - request.limit + 1..=start_num).rev())
}
}
};
trace!(target: "downloaders::file", range=?range, "Getting headers with range");
for block_number in range {
match self.headers.get(&block_number).cloned() {
Some(header) => headers.push(header),
None => {
warn!(number=%block_number, "Could not find header");
return Box::pin(async move { Err(RequestError::BadResponse) })
}
}
}
Box::pin(async move { Ok((PeerId::default(), headers).into()) })
}
}
impl<B: FullBlock> BodiesClient for FileClient<B> {
type Body = B::Body;
type Output = BodiesFut<B::Body>;
fn get_block_bodies_with_priority_and_range_hint(
&self,
hashes: Vec<B256>,
_priority: Priority,
_range_hint: Option<RangeInclusive<u64>>,
) -> Self::Output {
// this just searches the buffer, and fails if it can't find the block
let mut bodies = Vec::new();
// check if any are an error
// could unwrap here
for hash in hashes {
match self.bodies.get(&hash).cloned() {
Some(body) => bodies.push(body),
None => return Box::pin(async move { Err(RequestError::BadResponse) }),
}
}
Box::pin(async move { Ok((PeerId::default(), bodies).into()) })
}
}
impl<B: FullBlock> DownloadClient for FileClient<B> {
fn report_bad_message(&self, _peer_id: PeerId) {
trace!("Reported a bad message on a file client, the file may be corrupted or invalid");
// noop
}
fn num_connected_peers(&self) -> usize {
// no such thing as connected peers when we are just using a file
1
}
}
impl<B: FullBlock> BlockClient for FileClient<B> {
type Block = B;
}
/// Chunks file into several [`FileClient`]s.
#[derive(Debug)]
pub struct ChunkedFileReader {
/// File to read from.
file: File,
/// Current file byte length.
file_byte_len: u64,
/// Bytes that have been read.
chunk: Vec<u8>,
/// Max bytes per chunk.
chunk_byte_len: u64,
/// Optionally, tracks highest decoded block number. Needed when decoding data that maps * to 1
/// with block number
highest_block: Option<u64>,
}
impl ChunkedFileReader {
/// Returns the remaining file length.
pub const fn file_len(&self) -> u64 {
self.file_byte_len
}
/// Opens the file to import from given path. Returns a new instance. If no chunk byte length
/// is passed, chunks have [`DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE`] (one static file).
pub async fn new<P: AsRef<Path>>(
path: P,
chunk_byte_len: Option<u64>,
) -> Result<Self, FileClientError> {
let file = File::open(path).await?;
let chunk_byte_len = chunk_byte_len.unwrap_or(DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE);
Self::from_file(file, chunk_byte_len).await
}
/// Opens the file to import from given path. Returns a new instance.
pub async fn from_file(file: File, chunk_byte_len: u64) -> Result<Self, FileClientError> {
// get file len from metadata before reading
let metadata = file.metadata().await?;
let file_byte_len = metadata.len();
Ok(Self { file, file_byte_len, chunk: vec![], chunk_byte_len, highest_block: None })
}
/// Calculates the number of bytes to read from the chain file. Returns a tuple of the chunk
/// length and the remaining file length.
const fn chunk_len(&self) -> u64 {
let Self { chunk_byte_len, file_byte_len, .. } = *self;
let file_byte_len = file_byte_len + self.chunk.len() as u64;
if chunk_byte_len > file_byte_len {
// last chunk
file_byte_len
} else {
chunk_byte_len
}
}
/// Reads bytes from file and buffers as next chunk to decode. Returns byte length of next
/// chunk to read.
async fn read_next_chunk(&mut self) -> Result<Option<u64>, io::Error> {
if self.file_byte_len == 0 && self.chunk.is_empty() {
// eof
return Ok(None)
}
let chunk_target_len = self.chunk_len();
let old_bytes_len = self.chunk.len() as u64;
// calculate reserved space in chunk
let new_read_bytes_target_len = chunk_target_len - old_bytes_len;
// read new bytes from file
let prev_read_bytes_len = self.chunk.len();
self.chunk.extend(std::iter::repeat_n(0, new_read_bytes_target_len as usize));
let reader = &mut self.chunk[prev_read_bytes_len..];
// actual bytes that have been read
let new_read_bytes_len = self.file.read_exact(reader).await? as u64;
let next_chunk_byte_len = self.chunk.len();
// update remaining file length
self.file_byte_len -= new_read_bytes_len;
debug!(target: "downloaders::file",
max_chunk_byte_len=self.chunk_byte_len,
prev_read_bytes_len,
new_read_bytes_target_len,
new_read_bytes_len,
next_chunk_byte_len,
remaining_file_byte_len=self.file_byte_len,
"new bytes were read from file"
);
Ok(Some(next_chunk_byte_len as u64))
}
/// Read next chunk from file. Returns [`FileClient`] containing decoded chunk.
pub async fn next_chunk<B: FullBlock>(
&mut self,
consensus: Arc<dyn Consensus<B, Error = ConsensusError>>,
parent_header: Option<SealedHeader<B::Header>>,
) -> Result<Option<FileClient<B>>, FileClientError> {
let Some(next_chunk_byte_len) = self.read_next_chunk().await? else { return Ok(None) };
// make new file client from chunk
let DecodedFileChunk { file_client, remaining_bytes, .. } =
FileClientBuilder { consensus, parent_header }
.build(&self.chunk[..], next_chunk_byte_len)
.await?;
// save left over bytes
self.chunk = remaining_bytes;
Ok(Some(file_client))
}
/// Read next chunk from file. Returns [`FileClient`] containing decoded chunk.
pub async fn next_receipts_chunk<T>(&mut self) -> Result<Option<T>, T::Error>
where
T: FromReceiptReader,
{
let Some(next_chunk_byte_len) = self.read_next_chunk().await? else { return Ok(None) };
// make new file client from chunk
let DecodedFileChunk { file_client, remaining_bytes, highest_block } =
T::from_receipt_reader(&self.chunk[..], next_chunk_byte_len, self.highest_block)
.await?;
// save left over bytes
self.chunk = remaining_bytes;
// update highest block
self.highest_block = highest_block;
Ok(Some(file_client))
}
}
/// Constructs a file client from a reader.
pub trait FromReader {
/// Error returned by file client type.
type Error: From<io::Error>;
/// Output returned by file client type.
type Output;
/// Returns a file client
fn build<R>(
&self,
reader: R,
num_bytes: u64,
) -> impl Future<Output = Result<DecodedFileChunk<Self::Output>, Self::Error>>
where
Self: Sized,
R: AsyncReadExt + Unpin;
}
/// Output from decoding a file chunk with [`FromReader::build`].
#[derive(Debug)]
pub struct DecodedFileChunk<T> {
/// File client, i.e. the decoded part of chunk.
pub file_client: T,
/// Remaining bytes that have not been decoded, e.g. a partial block or a partial receipt.
pub remaining_bytes: Vec<u8>,
/// Highest block of decoded chunk. This is needed when decoding data that maps * to 1 with
/// block number, like receipts.
pub highest_block: Option<u64>,
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
bodies::{
bodies::BodiesDownloaderBuilder,
test_utils::{insert_headers, zip_blocks},
},
headers::{reverse_headers::ReverseHeadersDownloaderBuilder, test_utils::child_header},
test_utils::{generate_bodies, generate_bodies_file},
};
use assert_matches::assert_matches;
use futures_util::stream::StreamExt;
use rand::Rng;
use reth_consensus::{noop::NoopConsensus, test_utils::TestConsensus};
use reth_ethereum_primitives::Block;
use reth_network_p2p::{
bodies::downloader::BodyDownloader,
headers::downloader::{HeaderDownloader, SyncTarget},
};
use reth_provider::test_utils::create_test_provider_factory;
use std::sync::Arc;
#[tokio::test]
async fn streams_bodies_from_buffer() {
// Generate some random blocks
let factory = create_test_provider_factory();
let (headers, mut bodies) = generate_bodies(0..=19);
insert_headers(factory.db_ref().db(), &headers);
// create an empty file
let file = tempfile::tempfile().unwrap();
let client: Arc<FileClient<Block>> = Arc::new(
FileClient::from_file(file.into(), NoopConsensus::arc())
.await
.unwrap()
.with_bodies(bodies.clone()),
);
let mut downloader = BodiesDownloaderBuilder::default().build::<Block, _, _>(
client.clone(),
Arc::new(TestConsensus::default()),
factory,
);
downloader.set_download_range(0..=19).expect("failed to set download range");
assert_matches!(
downloader.next().await,
Some(Ok(res)) => assert_eq!(res, zip_blocks(headers.iter(), &mut bodies))
);
}
#[tokio::test]
async fn download_headers_at_fork_head() {
reth_tracing::init_test_tracing();
let p3 = SealedHeader::default();
let p2 = child_header(&p3);
let p1 = child_header(&p2);
let p0 = child_header(&p1);
let file = tempfile::tempfile().unwrap();
let client: Arc<FileClient<Block>> = Arc::new(
FileClient::from_file(file.into(), NoopConsensus::arc()).await.unwrap().with_headers(
HashMap::from([
(0u64, p0.clone_header()),
(1, p1.clone_header()),
(2, p2.clone_header()),
(3, p3.clone_header()),
]),
),
);
let mut downloader = ReverseHeadersDownloaderBuilder::default()
.stream_batch_size(3)
.request_limit(3)
.build(Arc::clone(&client), Arc::new(TestConsensus::default()));
downloader.update_local_head(p3.clone());
downloader.update_sync_target(SyncTarget::Tip(p0.hash()));
let headers = downloader.next().await.unwrap();
assert_eq!(headers, Ok(vec![p0, p1, p2]));
assert!(downloader.next().await.is_none());
assert!(downloader.next().await.is_none());
}
#[tokio::test]
async fn test_download_headers_from_file() {
reth_tracing::init_test_tracing();
// Generate some random blocks
let (file, headers, _) = generate_bodies_file(0..=19).await;
// now try to read them back
let client: Arc<FileClient<Block>> =
Arc::new(FileClient::from_file(file, NoopConsensus::arc()).await.unwrap());
// construct headers downloader and use first header
let mut header_downloader = ReverseHeadersDownloaderBuilder::default()
.build(Arc::clone(&client), Arc::new(TestConsensus::default()));
header_downloader.update_local_head(headers.first().unwrap().clone());
header_downloader.update_sync_target(SyncTarget::Tip(headers.last().unwrap().hash()));
// get headers first
let mut downloaded_headers = header_downloader.next().await.unwrap().unwrap();
// reverse to make sure it's in the right order before comparing
downloaded_headers.reverse();
// the first header is not included in the response
assert_eq!(downloaded_headers, headers[1..]);
}
#[tokio::test]
async fn test_download_bodies_from_file() {
// Generate some random blocks
let factory = create_test_provider_factory();
let (file, headers, mut bodies) = generate_bodies_file(0..=19).await;
// now try to read them back
let client: Arc<FileClient<Block>> =
Arc::new(FileClient::from_file(file, NoopConsensus::arc()).await.unwrap());
// insert headers in db for the bodies downloader
insert_headers(factory.db_ref().db(), &headers);
let mut downloader = BodiesDownloaderBuilder::default().build::<Block, _, _>(
client.clone(),
Arc::new(TestConsensus::default()),
factory,
);
downloader.set_download_range(0..=19).expect("failed to set download range");
assert_matches!(
downloader.next().await,
Some(Ok(res)) => assert_eq!(res, zip_blocks(headers.iter(), &mut bodies))
);
}
#[tokio::test]
async fn test_chunk_download_headers_from_file() {
reth_tracing::init_test_tracing();
// Generate some random blocks
let (file, headers, _) = generate_bodies_file(0..=14).await;
// calculate min for chunk byte length range, pick a lower bound that guarantees at least
// one block will be read
let chunk_byte_len = rand::rng().random_range(2000..=10_000);
trace!(target: "downloaders::file::test", chunk_byte_len);
// init reader
let mut reader = ChunkedFileReader::from_file(file, chunk_byte_len as u64).await.unwrap();
let mut downloaded_headers: Vec<SealedHeader> = vec![];
let mut local_header = headers.first().unwrap().clone();
// test
while let Some(client) =
reader.next_chunk::<Block>(NoopConsensus::arc(), None).await.unwrap()
{
let sync_target = client.tip_header().unwrap();
let sync_target_hash = sync_target.hash();
// construct headers downloader and use first header
let mut header_downloader = ReverseHeadersDownloaderBuilder::default()
.build(Arc::clone(&Arc::new(client)), Arc::new(TestConsensus::default()));
header_downloader.update_local_head(local_header.clone());
header_downloader.update_sync_target(SyncTarget::Tip(sync_target_hash));
// get headers first
let mut downloaded_headers_chunk = header_downloader.next().await.unwrap().unwrap();
// export new local header to outer scope
local_header = sync_target;
// reverse to make sure it's in the right order before comparing
downloaded_headers_chunk.reverse();
downloaded_headers.extend_from_slice(&downloaded_headers_chunk);
}
// the first header is not included in the response
assert_eq!(headers[1..], downloaded_headers);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/downloaders/src/lib.rs | crates/net/downloaders/src/lib.rs | //! Implements the downloader algorithms.
//!
//! ## Feature Flags
//!
//! - `test-utils`: Export utilities for testing
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
/// The collection of algorithms for downloading block bodies.
pub mod bodies;
/// The collection of algorithms for downloading block headers.
pub mod headers;
/// Common downloader metrics.
pub mod metrics;
/// Module managing file-based data retrieval and buffering.
///
/// Contains [`FileClient`](file_client::FileClient) to read block data from files,
/// efficiently buffering headers and bodies for retrieval.
pub mod file_client;
/// Module managing file-based data retrieval and buffering of receipts.
///
/// Contains [`ReceiptFileClient`](receipt_file_client::ReceiptFileClient) to read receipt data from
/// files, efficiently buffering receipts for retrieval.
pub mod receipt_file_client;
/// Module with a codec for reading and encoding block bodies in files.
///
/// Enables decoding and encoding `Block` types within file contexts.
pub mod file_codec;
#[cfg(any(test, feature = "test-utils"))]
pub mod test_utils;
pub use file_client::{DecodedFileChunk, FileClientError};
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/downloaders/src/receipt_file_client.rs | crates/net/downloaders/src/receipt_file_client.rs | use std::{fmt, io};
use futures::Future;
use tokio::io::AsyncReadExt;
use tokio_stream::StreamExt;
use tokio_util::codec::{Decoder, FramedRead};
use tracing::{trace, warn};
use crate::{DecodedFileChunk, FileClientError};
/// Helper trait implemented for [`Decoder`] that decodes the receipt type.
pub trait ReceiptDecoder: Decoder<Item = Option<ReceiptWithBlockNumber<Self::Receipt>>> {
/// The receipt type being decoded.
type Receipt;
}
impl<T, R> ReceiptDecoder for T
where
T: Decoder<Item = Option<ReceiptWithBlockNumber<R>>>,
{
type Receipt = R;
}
/// File client for reading RLP encoded receipts from file. Receipts in file must be in sequential
/// order w.r.t. block number.
#[derive(Debug)]
pub struct ReceiptFileClient<D: ReceiptDecoder> {
/// The buffered receipts, read from file, as nested lists. One list per block number.
pub receipts: Vec<Vec<D::Receipt>>,
/// First (lowest) block number read from file.
pub first_block: u64,
/// Total number of receipts. Count of elements in receipts flattened.
pub total_receipts: usize,
}
/// Constructs a file client from a reader and decoder.
pub trait FromReceiptReader {
/// Error returned by file client type.
type Error: From<io::Error>;
/// Returns a file client
fn from_receipt_reader<B>(
reader: B,
num_bytes: u64,
prev_chunk_highest_block: Option<u64>,
) -> impl Future<Output = Result<DecodedFileChunk<Self>, Self::Error>>
where
Self: Sized,
B: AsyncReadExt + Unpin;
}
impl<D> FromReceiptReader for ReceiptFileClient<D>
where
D: ReceiptDecoder<Error = FileClientError> + fmt::Debug + Default,
{
type Error = D::Error;
/// Initialize the [`ReceiptFileClient`] from bytes that have been read from file. Caution! If
/// first block has no transactions, it's assumed to be the genesis block.
fn from_receipt_reader<B>(
reader: B,
num_bytes: u64,
prev_chunk_highest_block: Option<u64>,
) -> impl Future<Output = Result<DecodedFileChunk<Self>, Self::Error>>
where
B: AsyncReadExt + Unpin,
{
let mut receipts = Vec::default();
// use with_capacity to make sure the internal buffer contains the entire chunk
let mut stream = FramedRead::with_capacity(reader, D::default(), num_bytes as usize);
trace!(target: "downloaders::file",
target_num_bytes=num_bytes,
capacity=stream.read_buffer().capacity(),
codec=?D::default(),
"init decode stream"
);
let mut remaining_bytes = vec![];
let mut log_interval = 0;
let mut log_interval_start_block = 0;
let mut block_number = 0;
let mut total_receipts = 0;
let mut receipts_for_block = vec![];
let mut first_block = None;
async move {
while let Some(receipt_res) = stream.next().await {
let receipt = match receipt_res {
Ok(receipt) => receipt,
Err(FileClientError::Rlp(err, bytes)) => {
trace!(target: "downloaders::file",
%err,
bytes_len=bytes.len(),
"partial receipt returned from decoding chunk"
);
remaining_bytes = bytes;
break
}
Err(err) => return Err(err),
};
match receipt {
Some(ReceiptWithBlockNumber { receipt, number }) => {
if block_number > number {
warn!(target: "downloaders::file", previous_block_number = block_number, "skipping receipt from a lower block: {number}");
continue
}
total_receipts += 1;
if first_block.is_none() {
first_block = Some(number);
block_number = number;
}
if block_number == number {
receipts_for_block.push(receipt);
} else {
receipts.push(receipts_for_block);
// next block
block_number = number;
receipts_for_block = vec![receipt];
}
}
None => {
match first_block {
Some(num) => {
// if there was a block number before this, push receipts for that
// block
receipts.push(receipts_for_block);
// block with no txns
block_number = num + receipts.len() as u64;
}
None => {
// this is the first block and it's empty
if let Some(highest_block) = prev_chunk_highest_block {
// this is a chunked read and this is not the first chunk
block_number = highest_block + 1;
} else {
// this is not a chunked read or this is the first chunk. assume
// it's the genesis block
block_number = 0;
}
first_block = Some(block_number);
}
}
receipts_for_block = vec![];
}
}
if log_interval == 0 {
trace!(target: "downloaders::file",
block_number,
total_receipts,
"read first receipt"
);
log_interval_start_block = block_number;
} else if log_interval % 100_000 == 0 {
trace!(target: "downloaders::file",
blocks=?log_interval_start_block..=block_number,
total_receipts,
"read receipts from file"
);
log_interval_start_block = block_number + 1;
}
log_interval += 1;
}
trace!(target: "downloaders::file",
blocks=?log_interval_start_block..=block_number,
total_receipts,
"read receipts from file"
);
// we need to push the last receipts
receipts.push(receipts_for_block);
trace!(target: "downloaders::file",
blocks = receipts.len(),
total_receipts,
"Initialized receipt file client"
);
Ok(DecodedFileChunk {
file_client: Self {
receipts,
first_block: first_block.unwrap_or_default(),
total_receipts,
},
remaining_bytes,
highest_block: Some(block_number),
})
}
}
}
/// Receipt with block number.
#[derive(Debug, PartialEq, Eq)]
pub struct ReceiptWithBlockNumber<R> {
/// Receipt.
pub receipt: R,
/// Block number.
pub number: u64,
}
#[cfg(test)]
mod test {
use alloy_primitives::{
address, b256,
bytes::{Buf, BytesMut},
hex, Bytes, Log, LogData,
};
use alloy_rlp::{Decodable, RlpDecodable};
use reth_ethereum_primitives::{Receipt, TxType};
use reth_tracing::init_test_tracing;
use tokio_util::codec::Decoder;
use super::{FromReceiptReader, ReceiptFileClient, ReceiptWithBlockNumber};
use crate::{DecodedFileChunk, FileClientError};
#[derive(Debug, PartialEq, Eq, RlpDecodable)]
struct MockReceipt {
tx_type: u8,
status: u64,
cumulative_gas_used: u64,
logs: Vec<Log>,
block_number: u64,
}
#[derive(Debug, PartialEq, Eq, RlpDecodable)]
#[rlp(trailing)]
struct MockReceiptContainer(Option<MockReceipt>);
impl TryFrom<MockReceipt> for ReceiptWithBlockNumber<Receipt> {
type Error = FileClientError;
fn try_from(exported_receipt: MockReceipt) -> Result<Self, Self::Error> {
let MockReceipt { tx_type, status, cumulative_gas_used, logs, block_number: number } =
exported_receipt;
let receipt = Receipt {
tx_type: TxType::try_from(tx_type.to_be_bytes()[0])
.map_err(|err| FileClientError::Rlp(err.into(), vec![tx_type]))?,
success: status != 0,
cumulative_gas_used,
logs,
};
Ok(Self { receipt, number })
}
}
#[derive(Debug, Default)]
struct MockReceiptFileCodec;
impl Decoder for MockReceiptFileCodec {
type Item = Option<ReceiptWithBlockNumber<Receipt>>;
type Error = FileClientError;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
if src.is_empty() {
return Ok(None)
}
let buf_slice = &mut src.as_ref();
let receipt = MockReceiptContainer::decode(buf_slice)
.map_err(|err| Self::Error::Rlp(err, src.to_vec()))?
.0;
src.advance(src.len() - buf_slice.len());
Ok(Some(receipt.map(|receipt| receipt.try_into()).transpose()?))
}
}
/// No receipts for genesis block
const MOCK_RECEIPT_BLOCK_NO_TRANSACTIONS: &[u8] = &hex!("c0");
const MOCK_RECEIPT_ENCODED_BLOCK_1: &[u8] = &hex!(
"f901a4f901a1800183031843f90197f89b948ce8c13d816fe6daf12d6fd9e4952e1fc88850aef863a00109fc6f55cf40689f02fbaad7af7fe7bbac8a3d2186600afc7d3e10cac6027ba00000000000000000000000000000000000000000000000000000000000014218a000000000000000000000000070b17c0fe982ab4a7ac17a4c25485643151a1f2da000000000000000000000000000000000000000000000000000000000618d8837f89c948ce8c13d816fe6daf12d6fd9e4952e1fc88850aef884a092e98423f8adac6e64d0608e519fd1cefb861498385c6dee70d58fc926ddc68ba000000000000000000000000000000000000000000000000000000000d0e3ebf0a00000000000000000000000000000000000000000000000000000000000014218a000000000000000000000000070b17c0fe982ab4a7ac17a4c25485643151a1f2d80f85a948ce8c13d816fe6daf12d6fd9e4952e1fc88850aef842a0fe25c73e3b9089fac37d55c4c7efcba6f04af04cebd2fc4d6d7dbb07e1e5234fa000000000000000000000000000000000000000000000007edc6ca0bb683480008001"
);
const MOCK_RECEIPT_ENCODED_BLOCK_2: &[u8] = &hex!(
"f90106f9010380018301c60df8faf89c948ce8c13d816fe6daf12d6fd9e4952e1fc88850aef884a092e98423f8adac6e64d0608e519fd1cefb861498385c6dee70d58fc926ddc68da000000000000000000000000000000000000000000000000000000000d0ea0e40a00000000000000000000000000000000000000000000000000000000000014218a0000000000000000000000000e5e7492282fd1e3bfac337a0beccd29b15b7b24080f85a948ce8c13d816fe6daf12d6fd9e4952e1fc88850aef842a0fe25c73e3b9089fac37d55c4c7efcba6f04af04cebd2fc4d6d7dbb07e1e5234ea000000000000000000000000000000000000000000000007eda7867e0c7d480008002"
);
const MOCK_RECEIPT_ENCODED_BLOCK_3: &[u8] = &hex!(
"f90106f9010380018301c60df8faf89c948ce8c13d816fe6daf12d6fd9e4952e1fc88850aef884a092e98423f8adac6e64d0608e519fd1cefb861498385c6dee70d58fc926ddc68da000000000000000000000000000000000000000000000000000000000d101e54ba00000000000000000000000000000000000000000000000000000000000014218a0000000000000000000000000fa011d8d6c26f13abe2cefed38226e401b2b8a9980f85a948ce8c13d816fe6daf12d6fd9e4952e1fc88850aef842a0fe25c73e3b9089fac37d55c4c7efcba6f04af04cebd2fc4d6d7dbb07e1e5234ea000000000000000000000000000000000000000000000007ed8842f06277480008003"
);
fn mock_receipt_1() -> MockReceipt {
let receipt = receipt_block_1();
MockReceipt {
tx_type: receipt.receipt.tx_type as u8,
status: receipt.receipt.success as u64,
cumulative_gas_used: receipt.receipt.cumulative_gas_used,
logs: receipt.receipt.logs,
block_number: 1,
}
}
fn mock_receipt_2() -> MockReceipt {
let receipt = receipt_block_2();
MockReceipt {
tx_type: receipt.receipt.tx_type as u8,
status: receipt.receipt.success as u64,
cumulative_gas_used: receipt.receipt.cumulative_gas_used,
logs: receipt.receipt.logs,
block_number: 2,
}
}
fn mock_receipt_3() -> MockReceipt {
let receipt = receipt_block_3();
MockReceipt {
tx_type: receipt.receipt.tx_type as u8,
status: receipt.receipt.success as u64,
cumulative_gas_used: receipt.receipt.cumulative_gas_used,
logs: receipt.receipt.logs,
block_number: 3,
}
}
fn receipt_block_1() -> ReceiptWithBlockNumber<Receipt> {
let log_1 = Log {
address: address!("0x8ce8c13d816fe6daf12d6fd9e4952e1fc88850ae"),
data: LogData::new(
vec![
b256!("0x0109fc6f55cf40689f02fbaad7af7fe7bbac8a3d2186600afc7d3e10cac6027b"),
b256!("0x0000000000000000000000000000000000000000000000000000000000014218"),
b256!("0x00000000000000000000000070b17c0fe982ab4a7ac17a4c25485643151a1f2d"),
],
Bytes::from(hex!(
"00000000000000000000000000000000000000000000000000000000618d8837"
)),
)
.unwrap(),
};
let log_2 = Log {
address: address!("0x8ce8c13d816fe6daf12d6fd9e4952e1fc88850ae"),
data: LogData::new(
vec![
b256!("0x92e98423f8adac6e64d0608e519fd1cefb861498385c6dee70d58fc926ddc68b"),
b256!("0x00000000000000000000000000000000000000000000000000000000d0e3ebf0"),
b256!("0x0000000000000000000000000000000000000000000000000000000000014218"),
b256!("0x00000000000000000000000070b17c0fe982ab4a7ac17a4c25485643151a1f2d"),
],
Bytes::default(),
)
.unwrap(),
};
let log_3 = Log {
address: address!("0x8ce8c13d816fe6daf12d6fd9e4952e1fc88850ae"),
data: LogData::new(
vec![
b256!("0xfe25c73e3b9089fac37d55c4c7efcba6f04af04cebd2fc4d6d7dbb07e1e5234f"),
b256!("0x00000000000000000000000000000000000000000000007edc6ca0bb68348000"),
],
Bytes::default(),
)
.unwrap(),
};
// feature must not be brought into scope
let mut receipt = Receipt {
tx_type: TxType::Legacy,
success: true,
cumulative_gas_used: 202819,
logs: vec![],
};
receipt.logs = vec![log_1, log_2, log_3];
ReceiptWithBlockNumber { receipt, number: 1 }
}
fn receipt_block_2() -> ReceiptWithBlockNumber<Receipt> {
let log_1 = Log {
address: address!("0x8ce8c13d816fe6daf12d6fd9e4952e1fc88850ae"),
data: LogData::new(
vec![
b256!("0x92e98423f8adac6e64d0608e519fd1cefb861498385c6dee70d58fc926ddc68d"),
b256!("0x00000000000000000000000000000000000000000000000000000000d0ea0e40"),
b256!("0x0000000000000000000000000000000000000000000000000000000000014218"),
b256!("0x000000000000000000000000e5e7492282fd1e3bfac337a0beccd29b15b7b240"),
],
Bytes::default(),
)
.unwrap(),
};
let log_2 = Log {
address: address!("0x8ce8c13d816fe6daf12d6fd9e4952e1fc88850ae"),
data: LogData::new(
vec![
b256!("0xfe25c73e3b9089fac37d55c4c7efcba6f04af04cebd2fc4d6d7dbb07e1e5234e"),
b256!("0x00000000000000000000000000000000000000000000007eda7867e0c7d48000"),
],
Bytes::default(),
)
.unwrap(),
};
let mut receipt = Receipt {
tx_type: TxType::Legacy,
success: true,
cumulative_gas_used: 116237,
logs: vec![],
};
receipt.logs = vec![log_1, log_2];
ReceiptWithBlockNumber { receipt, number: 2 }
}
fn receipt_block_3() -> ReceiptWithBlockNumber<Receipt> {
let log_1 = Log {
address: address!("0x8ce8c13d816fe6daf12d6fd9e4952e1fc88850ae"),
data: LogData::new(
vec![
b256!("0x92e98423f8adac6e64d0608e519fd1cefb861498385c6dee70d58fc926ddc68d"),
b256!("0x00000000000000000000000000000000000000000000000000000000d101e54b"),
b256!("0x0000000000000000000000000000000000000000000000000000000000014218"),
b256!("0x000000000000000000000000fa011d8d6c26f13abe2cefed38226e401b2b8a99"),
],
Bytes::default(),
)
.unwrap(),
};
let log_2 = Log {
address: address!("0x8ce8c13d816fe6daf12d6fd9e4952e1fc88850ae"),
data: LogData::new(
vec![
b256!("0xfe25c73e3b9089fac37d55c4c7efcba6f04af04cebd2fc4d6d7dbb07e1e5234e"),
b256!("0x00000000000000000000000000000000000000000000007ed8842f0627748000"),
],
Bytes::default(),
)
.unwrap(),
};
let mut receipt = Receipt {
tx_type: TxType::Legacy,
success: true,
cumulative_gas_used: 116237,
..Default::default()
};
receipt.logs = vec![log_1, log_2];
ReceiptWithBlockNumber { receipt, number: 3 }
}
#[test]
fn decode_mock_receipt() {
let receipt1 = mock_receipt_1();
let decoded1 = MockReceiptContainer::decode(&mut &MOCK_RECEIPT_ENCODED_BLOCK_1[..])
.unwrap()
.0
.unwrap();
assert_eq!(receipt1, decoded1);
let receipt2 = mock_receipt_2();
let decoded2 = MockReceiptContainer::decode(&mut &MOCK_RECEIPT_ENCODED_BLOCK_2[..])
.unwrap()
.0
.unwrap();
assert_eq!(receipt2, decoded2);
let receipt3 = mock_receipt_3();
let decoded3 = MockReceiptContainer::decode(&mut &MOCK_RECEIPT_ENCODED_BLOCK_3[..])
.unwrap()
.0
.unwrap();
assert_eq!(receipt3, decoded3);
}
#[test]
fn receipts_codec() {
// rig
let mut receipt_1_to_3 = MOCK_RECEIPT_ENCODED_BLOCK_1.to_vec();
receipt_1_to_3.extend_from_slice(MOCK_RECEIPT_ENCODED_BLOCK_2);
receipt_1_to_3.extend_from_slice(MOCK_RECEIPT_ENCODED_BLOCK_3);
let encoded = &mut BytesMut::from(&receipt_1_to_3[..]);
let mut codec = MockReceiptFileCodec;
// test
let first_decoded_receipt = codec.decode(encoded).unwrap().unwrap().unwrap();
assert_eq!(receipt_block_1(), first_decoded_receipt);
let second_decoded_receipt = codec.decode(encoded).unwrap().unwrap().unwrap();
assert_eq!(receipt_block_2(), second_decoded_receipt);
let third_decoded_receipt = codec.decode(encoded).unwrap().unwrap().unwrap();
assert_eq!(receipt_block_3(), third_decoded_receipt);
}
#[tokio::test]
async fn receipt_file_client_ovm_codec() {
init_test_tracing();
// genesis block has no hack receipts
let mut encoded_receipts = MOCK_RECEIPT_BLOCK_NO_TRANSACTIONS.to_vec();
// one receipt each for block 1 and 2
encoded_receipts.extend_from_slice(MOCK_RECEIPT_ENCODED_BLOCK_1);
encoded_receipts.extend_from_slice(MOCK_RECEIPT_ENCODED_BLOCK_2);
// no receipt for block 4
encoded_receipts.extend_from_slice(MOCK_RECEIPT_BLOCK_NO_TRANSACTIONS);
let encoded_byte_len = encoded_receipts.len() as u64;
let reader = &mut &encoded_receipts[..];
let DecodedFileChunk {
file_client: ReceiptFileClient { receipts, first_block, total_receipts, .. },
..
} = ReceiptFileClient::<MockReceiptFileCodec>::from_receipt_reader(
reader,
encoded_byte_len,
None,
)
.await
.unwrap();
// 2 non-empty receipt objects
assert_eq!(2, total_receipts);
assert_eq!(0, first_block);
assert!(receipts[0].is_empty());
assert_eq!(receipt_block_1().receipt, receipts[1][0].clone());
assert_eq!(receipt_block_2().receipt, receipts[2][0].clone());
assert!(receipts[3].is_empty());
}
#[tokio::test]
async fn no_receipts_middle_block() {
init_test_tracing();
// genesis block has no hack receipts
let mut encoded_receipts = MOCK_RECEIPT_BLOCK_NO_TRANSACTIONS.to_vec();
// one receipt each for block 1
encoded_receipts.extend_from_slice(MOCK_RECEIPT_ENCODED_BLOCK_1);
// no receipt for block 2
encoded_receipts.extend_from_slice(MOCK_RECEIPT_BLOCK_NO_TRANSACTIONS);
// one receipt for block 3
encoded_receipts.extend_from_slice(MOCK_RECEIPT_ENCODED_BLOCK_3);
let encoded_byte_len = encoded_receipts.len() as u64;
let reader = &mut &encoded_receipts[..];
let DecodedFileChunk {
file_client: ReceiptFileClient { receipts, first_block, total_receipts, .. },
..
} = ReceiptFileClient::<MockReceiptFileCodec>::from_receipt_reader(
reader,
encoded_byte_len,
None,
)
.await
.unwrap();
// 2 non-empty receipt objects
assert_eq!(2, total_receipts);
assert_eq!(0, first_block);
assert!(receipts[0].is_empty());
assert_eq!(receipt_block_1().receipt, receipts[1][0].clone());
assert!(receipts[2].is_empty());
assert_eq!(receipt_block_3().receipt, receipts[3][0].clone());
}
#[tokio::test]
async fn two_receipts_same_block() {
init_test_tracing();
// genesis block has no hack receipts
let mut encoded_receipts = MOCK_RECEIPT_BLOCK_NO_TRANSACTIONS.to_vec();
// one receipt each for block 1
encoded_receipts.extend_from_slice(MOCK_RECEIPT_ENCODED_BLOCK_1);
// two receipts for block 2
encoded_receipts.extend_from_slice(MOCK_RECEIPT_ENCODED_BLOCK_2);
encoded_receipts.extend_from_slice(MOCK_RECEIPT_ENCODED_BLOCK_2);
// one receipt for block 3
encoded_receipts.extend_from_slice(MOCK_RECEIPT_ENCODED_BLOCK_3);
let encoded_byte_len = encoded_receipts.len() as u64;
let reader = &mut &encoded_receipts[..];
let DecodedFileChunk {
file_client: ReceiptFileClient { receipts, first_block, total_receipts, .. },
..
} = ReceiptFileClient::<MockReceiptFileCodec>::from_receipt_reader(
reader,
encoded_byte_len,
None,
)
.await
.unwrap();
// 4 non-empty receipt objects
assert_eq!(4, total_receipts);
assert_eq!(0, first_block);
assert!(receipts[0].is_empty());
assert_eq!(receipt_block_1().receipt, receipts[1][0].clone());
assert_eq!(receipt_block_2().receipt, receipts[2][0].clone());
assert_eq!(receipt_block_2().receipt, receipts[2][1].clone());
assert_eq!(receipt_block_3().receipt, receipts[3][0].clone());
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/downloaders/src/file_codec.rs | crates/net/downloaders/src/file_codec.rs | //! Codec for reading raw block bodies from a file.
use crate::file_client::FileClientError;
use alloy_primitives::bytes::{Buf, BytesMut};
use alloy_rlp::{Decodable, Encodable};
use tokio_util::codec::{Decoder, Encoder};
/// Codec for reading raw block bodies from a file.
///
/// If using with [`FramedRead`](tokio_util::codec::FramedRead), the user should make sure the
/// framed reader has capacity for the entire block file. Otherwise, the decoder will return
/// [`InputTooShort`](alloy_rlp::Error::InputTooShort), because RLP headers can only be
/// decoded if the internal buffer is large enough to contain the entire block body.
///
/// Without ensuring the framed reader has capacity for the entire file, a block body is likely to
/// fall across two read buffers, the decoder will not be able to decode the header, which will
/// cause it to fail.
///
/// It's recommended to use [`with_capacity`](tokio_util::codec::FramedRead::with_capacity) to set
/// the capacity of the framed reader to the size of the file.
pub(crate) struct BlockFileCodec<B>(std::marker::PhantomData<B>);
impl<B> Default for BlockFileCodec<B> {
fn default() -> Self {
Self(std::marker::PhantomData)
}
}
impl<B: Decodable> Decoder for BlockFileCodec<B> {
type Item = B;
type Error = FileClientError;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
if src.is_empty() {
return Ok(None)
}
let buf_slice = &mut src.as_ref();
let body = B::decode(buf_slice).map_err(|err| FileClientError::Rlp(err, src.to_vec()))?;
src.advance(src.len() - buf_slice.len());
Ok(Some(body))
}
}
impl<B: Encodable> Encoder<B> for BlockFileCodec<B> {
type Error = FileClientError;
fn encode(&mut self, item: B, dst: &mut BytesMut) -> Result<(), Self::Error> {
item.encode(dst);
Ok(())
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/downloaders/src/metrics.rs | crates/net/downloaders/src/metrics.rs | use reth_metrics::{
metrics::{Counter, Gauge},
Metrics,
};
use reth_network_p2p::error::DownloadError;
/// Common body downloader metrics.
///
/// These metrics will be initialized with the `downloaders.bodies` scope.
/// ```
/// use reth_downloaders::metrics::BodyDownloaderMetrics;
/// use reth_network_p2p::error::DownloadError;
///
/// // Initialize metrics.
/// let metrics = BodyDownloaderMetrics::default();
/// // Increment `downloaders.bodies.timeout_errors` counter by 1.
/// metrics.increment_errors(&DownloadError::Timeout);
/// ```
#[derive(Clone, Metrics)]
#[metrics(scope = "downloaders.bodies")]
pub struct BodyDownloaderMetrics {
/// The number of items that were successfully sent to the poller (stage)
pub total_flushed: Counter,
/// Number of items that were successfully downloaded
pub total_downloaded: Counter,
/// The number of requests (can contain more than 1 item) currently in-flight.
pub in_flight_requests: Gauge,
/// The number of responses (can contain more than 1 item) in the internal buffer of the
/// downloader.
pub buffered_responses: Gauge,
/// The number of blocks the internal buffer of the
/// downloader.
/// These are bodies that have been received, but cannot be committed yet because they're
/// not contiguous
pub buffered_blocks: Gauge,
/// Total amount of memory used by the buffered blocks in bytes
pub buffered_blocks_size_bytes: Gauge,
/// The number blocks that are contiguous and are queued for insertion into the db.
pub queued_blocks: Gauge,
/// The number of out-of-order requests sent by the downloader.
/// The consumer of the download stream is able to re-request data (bodies) in case
/// it encountered a recoverable error (e.g. during insertion).
/// Out-of-order request happen when the new download range start for bodies downloader
/// is less than the last block number returned from the stream.
pub out_of_order_requests: Counter,
/// Number of timeout errors while requesting items
pub timeout_errors: Counter,
/// Number of validation errors while requesting items
pub validation_errors: Counter,
/// Number of unexpected errors while requesting items
pub unexpected_errors: Counter,
}
impl BodyDownloaderMetrics {
/// Increment errors counter.
pub fn increment_errors(&self, error: &DownloadError) {
match error {
DownloadError::Timeout => self.timeout_errors.increment(1),
DownloadError::BodyValidation { .. } => self.validation_errors.increment(1),
_error => self.unexpected_errors.increment(1),
}
}
}
/// Metrics for an individual response, i.e. the size in bytes, and length (number of bodies) in the
/// response.
///
/// These metrics will be initialized with the `downloaders.bodies.response` scope.
#[derive(Clone, Metrics)]
#[metrics(scope = "downloaders.bodies.response")]
pub struct ResponseMetrics {
/// The size (in bytes) of an individual bodies response received by the downloader.
pub response_size_bytes: Gauge,
/// The number of bodies in an individual bodies response received by the downloader.
pub response_length: Gauge,
}
/// Common header downloader metrics.
///
/// These metrics will be initialized with the `downloaders.headers` scope.
/// ```
/// use reth_downloaders::metrics::HeaderDownloaderMetrics;
/// use reth_network_p2p::error::DownloadError;
///
/// // Initialize metrics.
/// let metrics = HeaderDownloaderMetrics::default();
/// // Increment `downloaders.headers.timeout_errors` counter by 1.
/// metrics.increment_errors(&DownloadError::Timeout);
/// ```
#[derive(Clone, Metrics)]
#[metrics(scope = "downloaders.headers")]
pub struct HeaderDownloaderMetrics {
/// The number of items that were successfully sent to the poller (stage)
pub total_flushed: Counter,
/// Number of items that were successfully downloaded
pub total_downloaded: Counter,
/// The number of requests (can contain more than 1 item) currently in-flight.
pub in_flight_requests: Gauge,
/// The number of responses (can contain more than 1 item) in the internal buffer of the
/// downloader.
pub buffered_responses: Gauge,
/// The number of blocks the internal buffer of the
/// downloader.
/// These are bodies that have been received, but cannot be committed yet because they're
/// not contiguous
pub buffered_blocks: Gauge,
/// Total amount of memory used by the buffered blocks in bytes
pub buffered_blocks_size_bytes: Gauge,
/// The number blocks that are contiguous and are queued for insertion into the db.
pub queued_blocks: Gauge,
/// The number of out-of-order requests sent by the downloader.
/// The consumer of the download stream is able to re-request data (headers) in case
/// it encountered a recoverable error (e.g. during insertion).
/// Out-of-order request happen when the headers downloader `SyncTarget::Tip`
/// hash is different from the previous sync target hash.
pub out_of_order_requests: Counter,
/// Number of timeout errors while requesting items
pub timeout_errors: Counter,
/// Number of validation errors while requesting items
pub validation_errors: Counter,
/// Number of unexpected errors while requesting items
pub unexpected_errors: Counter,
}
impl HeaderDownloaderMetrics {
/// Increment errors counter.
pub fn increment_errors(&self, error: &DownloadError) {
match error {
DownloadError::Timeout => self.timeout_errors.increment(1),
DownloadError::HeaderValidation { .. } => self.validation_errors.increment(1),
_error => self.unexpected_errors.increment(1),
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/downloaders/src/bodies/noop.rs | crates/net/downloaders/src/bodies/noop.rs | use alloy_primitives::BlockNumber;
use futures::Stream;
use reth_network_p2p::{
bodies::{downloader::BodyDownloader, response::BlockResponse},
error::{DownloadError, DownloadResult},
};
use reth_primitives_traits::Block;
use std::{fmt::Debug, ops::RangeInclusive};
/// A [`BodyDownloader`] implementation that does nothing.
#[derive(Debug, Default)]
#[non_exhaustive]
pub struct NoopBodiesDownloader<B> {
_block: std::marker::PhantomData<B>,
}
impl<B: Block + 'static> BodyDownloader for NoopBodiesDownloader<B> {
type Block = B;
fn set_download_range(&mut self, _: RangeInclusive<BlockNumber>) -> DownloadResult<()> {
Ok(())
}
}
impl<B: Block + 'static> Stream for NoopBodiesDownloader<B> {
type Item = Result<Vec<BlockResponse<B>>, DownloadError>;
fn poll_next(
self: std::pin::Pin<&mut Self>,
_: &mut std::task::Context<'_>,
) -> std::task::Poll<Option<Self::Item>> {
panic!("NoopBodiesDownloader shouldn't be polled.")
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/downloaders/src/bodies/test_utils.rs | crates/net/downloaders/src/bodies/test_utils.rs | //! Test helper impls for generating bodies
#![allow(dead_code)]
use alloy_consensus::BlockHeader;
use alloy_primitives::B256;
use reth_db::DatabaseEnv;
use reth_db_api::{database::Database, tables, transaction::DbTxMut};
use reth_ethereum_primitives::BlockBody;
use reth_network_p2p::bodies::response::BlockResponse;
use reth_primitives_traits::{Block, SealedBlock, SealedHeader};
use std::collections::HashMap;
pub(crate) fn zip_blocks<'a, B: Block>(
headers: impl Iterator<Item = &'a SealedHeader<B::Header>>,
bodies: &mut HashMap<B256, B::Body>,
) -> Vec<BlockResponse<B>> {
headers
.into_iter()
.map(|header| {
let body = bodies.remove(&header.hash()).expect("body exists");
if header.is_empty() {
BlockResponse::Empty(header.clone())
} else {
BlockResponse::Full(SealedBlock::from_sealed_parts(header.clone(), body))
}
})
.collect()
}
pub(crate) fn create_raw_bodies(
headers: impl IntoIterator<Item = SealedHeader>,
bodies: &mut HashMap<B256, BlockBody>,
) -> Vec<reth_ethereum_primitives::Block> {
headers
.into_iter()
.map(|header| {
let body = bodies.remove(&header.hash()).expect("body exists");
body.into_block(header.unseal())
})
.collect()
}
#[inline]
pub(crate) fn insert_headers(db: &DatabaseEnv, headers: &[SealedHeader]) {
db.update(|tx| {
for header in headers {
tx.put::<tables::CanonicalHeaders>(header.number, header.hash()).unwrap();
tx.put::<tables::Headers>(header.number, header.clone_header()).unwrap();
}
})
.expect("failed to commit")
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/downloaders/src/bodies/bodies.rs | crates/net/downloaders/src/bodies/bodies.rs | use super::queue::BodiesRequestQueue;
use crate::{bodies::task::TaskDownloader, metrics::BodyDownloaderMetrics};
use alloy_consensus::BlockHeader;
use alloy_primitives::BlockNumber;
use futures::Stream;
use futures_util::StreamExt;
use reth_config::BodiesConfig;
use reth_consensus::{Consensus, ConsensusError};
use reth_network_p2p::{
bodies::{
client::BodiesClient,
downloader::{BodyDownloader, BodyDownloaderResult},
response::BlockResponse,
},
error::{DownloadError, DownloadResult},
};
use reth_primitives_traits::{size::InMemorySize, Block, SealedHeader};
use reth_storage_api::HeaderProvider;
use reth_tasks::{TaskSpawner, TokioTaskExecutor};
use std::{
cmp::Ordering,
collections::BinaryHeap,
fmt::Debug,
mem,
ops::RangeInclusive,
pin::Pin,
sync::Arc,
task::{Context, Poll},
};
use tracing::info;
/// Downloads bodies in batches.
///
/// All blocks in a batch are fetched at the same time.
#[must_use = "Stream does nothing unless polled"]
#[derive(Debug)]
pub struct BodiesDownloader<
B: Block,
C: BodiesClient<Body = B::Body>,
Provider: HeaderProvider<Header = B::Header>,
> {
/// The bodies client
client: Arc<C>,
/// The consensus client
consensus: Arc<dyn Consensus<B, Error = ConsensusError>>,
/// The database handle
provider: Provider,
/// The maximum number of non-empty blocks per one request
request_limit: u64,
/// The maximum number of block bodies returned at once from the stream
stream_batch_size: usize,
/// The allowed range for number of concurrent requests.
concurrent_requests_range: RangeInclusive<usize>,
/// Maximum number of bytes of received blocks to buffer internally.
max_buffered_blocks_size_bytes: usize,
/// Current estimated size of buffered blocks in bytes.
buffered_blocks_size_bytes: usize,
/// The range of block numbers for body download.
download_range: RangeInclusive<BlockNumber>,
/// The latest block number returned.
latest_queued_block_number: Option<BlockNumber>,
/// Requests in progress
in_progress_queue: BodiesRequestQueue<B, C>,
/// Buffered responses
buffered_responses: BinaryHeap<OrderedBodiesResponse<B>>,
/// Queued body responses that can be returned for insertion into the database.
queued_bodies: Vec<BlockResponse<B>>,
/// The bodies downloader metrics.
metrics: BodyDownloaderMetrics,
}
impl<B, C, Provider> BodiesDownloader<B, C, Provider>
where
B: Block,
C: BodiesClient<Body = B::Body> + 'static,
Provider: HeaderProvider<Header = B::Header> + Unpin + 'static,
{
/// Returns the next contiguous request.
fn next_headers_request(&self) -> DownloadResult<Option<Vec<SealedHeader<Provider::Header>>>> {
let start_at = match self.in_progress_queue.last_requested_block_number {
Some(num) => num + 1,
None => *self.download_range.start(),
};
// as the range is inclusive, we need to add 1 to the end.
let items_left = (self.download_range.end() + 1).saturating_sub(start_at);
let limit = items_left.min(self.request_limit);
self.query_headers(start_at..=*self.download_range.end(), limit)
}
/// Retrieve a batch of headers from the database starting from the provided block number.
///
/// This method is going to return the batch as soon as one of the conditions below
/// is fulfilled:
/// 1. The number of non-empty headers in the batch equals requested.
/// 2. The total number of headers in the batch (both empty and non-empty) is greater than
/// or equal to the stream batch size.
/// 3. Downloader reached the end of the range
///
/// NOTE: The batches returned have a variable length.
fn query_headers(
&self,
range: RangeInclusive<BlockNumber>,
max_non_empty: u64,
) -> DownloadResult<Option<Vec<SealedHeader<B::Header>>>> {
if range.is_empty() || max_non_empty == 0 {
return Ok(None)
}
// Collect headers while
// 1. Current block number is in range
// 2. The number of non empty headers is less than maximum
// 3. The total number of headers is less than the stream batch size (this is only
// relevant if the range consists entirely of empty headers)
let mut collected = 0;
let mut non_empty_headers = 0;
let headers = self.provider.sealed_headers_while(range.clone(), |header| {
let should_take = range.contains(&header.number()) &&
non_empty_headers < max_non_empty &&
collected < self.stream_batch_size;
if should_take {
collected += 1;
if !header.is_empty() {
non_empty_headers += 1;
}
true
} else {
false
}
})?;
Ok(Some(headers).filter(|h| !h.is_empty()))
}
/// Get the next expected block number for queueing.
const fn next_expected_block_number(&self) -> BlockNumber {
match self.latest_queued_block_number {
Some(num) => num + 1,
None => *self.download_range.start(),
}
}
/// Max requests to handle at the same time
///
/// This depends on the number of active peers but will always be
/// `min_concurrent_requests..max_concurrent_requests`
#[inline]
fn concurrent_request_limit(&self) -> usize {
let num_peers = self.client.num_connected_peers();
let max_requests = num_peers.max(*self.concurrent_requests_range.start());
// if we're only connected to a few peers, we keep it low
if num_peers < *self.concurrent_requests_range.start() {
return max_requests
}
max_requests.min(*self.concurrent_requests_range.end())
}
/// Returns true if the size of buffered blocks is lower than the configured maximum
const fn has_buffer_capacity(&self) -> bool {
self.buffered_blocks_size_bytes < self.max_buffered_blocks_size_bytes
}
// Check if the stream is terminated
fn is_terminated(&self) -> bool {
// There is nothing to request if the range is empty
let nothing_to_request = self.download_range.is_empty() ||
// or all blocks have already been requested.
self.in_progress_queue
.last_requested_block_number.is_some_and(|last| last == *self.download_range.end());
nothing_to_request &&
self.in_progress_queue.is_empty() &&
self.buffered_responses.is_empty() &&
self.queued_bodies.is_empty()
}
/// Clear all download related data.
///
/// Should be invoked upon encountering fatal error.
fn clear(&mut self) {
self.download_range = RangeInclusive::new(1, 0);
self.latest_queued_block_number.take();
self.in_progress_queue.clear();
self.queued_bodies = Vec::new();
self.buffered_responses = BinaryHeap::new();
self.buffered_blocks_size_bytes = 0;
// reset metrics
self.metrics.in_flight_requests.set(0.);
self.metrics.buffered_responses.set(0.);
self.metrics.buffered_blocks.set(0.);
self.metrics.buffered_blocks_size_bytes.set(0.);
self.metrics.queued_blocks.set(0.);
}
/// Queues bodies and sets the latest queued block number
fn queue_bodies(&mut self, bodies: Vec<BlockResponse<B>>) {
self.latest_queued_block_number = Some(bodies.last().expect("is not empty").block_number());
self.queued_bodies.extend(bodies);
self.metrics.queued_blocks.set(self.queued_bodies.len() as f64);
}
/// Removes the next response from the buffer.
fn pop_buffered_response(&mut self) -> Option<OrderedBodiesResponse<B>> {
let resp = self.buffered_responses.pop()?;
self.metrics.buffered_responses.decrement(1.);
self.buffered_blocks_size_bytes -= resp.size();
self.metrics.buffered_blocks.decrement(resp.len() as f64);
self.metrics.buffered_blocks_size_bytes.set(self.buffered_blocks_size_bytes as f64);
Some(resp)
}
/// Adds a new response to the internal buffer
fn buffer_bodies_response(&mut self, response: Vec<BlockResponse<B>>) {
// take into account capacity
let size = response.iter().map(BlockResponse::size).sum::<usize>() +
response.capacity() * mem::size_of::<BlockResponse<B>>();
let response = OrderedBodiesResponse { resp: response, size };
let response_len = response.len();
self.buffered_blocks_size_bytes += size;
self.buffered_responses.push(response);
self.metrics.buffered_blocks.increment(response_len as f64);
self.metrics.buffered_blocks_size_bytes.set(self.buffered_blocks_size_bytes as f64);
self.metrics.buffered_responses.set(self.buffered_responses.len() as f64);
}
/// Returns a response if its first block number matches the next expected.
fn try_next_buffered(&mut self) -> Option<Vec<BlockResponse<B>>> {
if let Some(next) = self.buffered_responses.peek() {
let expected = self.next_expected_block_number();
let next_block_range = next.block_range();
if next_block_range.contains(&expected) {
return self.pop_buffered_response().map(|buffered| {
buffered
.resp
.into_iter()
.skip_while(|b| b.block_number() < expected)
.take_while(|b| self.download_range.contains(&b.block_number()))
.collect()
})
}
// Drop buffered response since we passed that range
if *next_block_range.end() < expected {
self.pop_buffered_response();
}
}
None
}
/// Returns the next batch of block bodies that can be returned if we have enough buffered
/// bodies
fn try_split_next_batch(&mut self) -> Option<Vec<BlockResponse<B>>> {
if self.queued_bodies.len() >= self.stream_batch_size {
let next_batch = self.queued_bodies.drain(..self.stream_batch_size).collect::<Vec<_>>();
self.queued_bodies.shrink_to_fit();
self.metrics.total_flushed.increment(next_batch.len() as u64);
self.metrics.queued_blocks.set(self.queued_bodies.len() as f64);
return Some(next_batch)
}
None
}
/// Check if a new request can be submitted, it implements back pressure to prevent overwhelming
/// the system and causing memory overload.
///
/// Returns true if a new request can be submitted
fn can_submit_new_request(&self) -> bool {
// requests are issued in order but not necessarily finished in order, so the queued bodies
// can grow large if a certain request is slow, so we limit the followup requests if the
// queued bodies grew too large
self.queued_bodies.len() < 4 * self.stream_batch_size &&
self.has_buffer_capacity() &&
self.in_progress_queue.len() < self.concurrent_request_limit()
}
}
impl<B, C, Provider> BodiesDownloader<B, C, Provider>
where
B: Block + 'static,
C: BodiesClient<Body = B::Body> + 'static,
Provider: HeaderProvider<Header = B::Header> + Unpin + 'static,
{
/// Spawns the downloader task via [`tokio::task::spawn`]
pub fn into_task(self) -> TaskDownloader<B> {
self.into_task_with(&TokioTaskExecutor::default())
}
/// Convert the downloader into a [`TaskDownloader`] by spawning it via the given spawner.
pub fn into_task_with<S>(self, spawner: &S) -> TaskDownloader<B>
where
S: TaskSpawner,
{
TaskDownloader::spawn_with(self, spawner)
}
}
impl<B, C, Provider> BodyDownloader for BodiesDownloader<B, C, Provider>
where
B: Block + 'static,
C: BodiesClient<Body = B::Body> + 'static,
Provider: HeaderProvider<Header = B::Header> + Unpin + 'static,
{
type Block = B;
/// Set a new download range (exclusive).
///
/// This method will drain all queued bodies, filter out ones outside the range and put them
/// back into the buffer.
/// If there are any bodies between the range start and last queued body that have not been
/// downloaded or are not in progress, they will be re-requested.
fn set_download_range(&mut self, range: RangeInclusive<BlockNumber>) -> DownloadResult<()> {
// Check if the range is valid.
if range.is_empty() {
tracing::error!(target: "downloaders::bodies", ?range, "Bodies download range is invalid (empty)");
return Err(DownloadError::InvalidBodyRange { range })
}
// Check if the provided range is the subset of the existing range.
let is_current_range_subset = self.download_range.contains(range.start()) &&
*range.end() == *self.download_range.end();
if is_current_range_subset {
tracing::trace!(target: "downloaders::bodies", ?range, "Download range already in progress");
// The current range already includes requested.
return Ok(())
}
// Check if the provided range is the next expected range.
let count = *range.end() - *range.start() + 1; // range is inclusive
let is_next_consecutive_range = *range.start() == *self.download_range.end() + 1;
if is_next_consecutive_range {
// New range received.
tracing::trace!(target: "downloaders::bodies", ?range, "New download range set");
info!(target: "downloaders::bodies", count, ?range, "Downloading bodies");
self.download_range = range;
return Ok(())
}
// The block range is reset. This can happen either after unwind or after the bodies were
// written by external services (e.g. BlockchainTree).
tracing::trace!(target: "downloaders::bodies", ?range, prev_range = ?self.download_range, "Download range reset");
info!(target: "downloaders::bodies", count, ?range, "Downloading bodies");
self.clear();
self.download_range = range;
Ok(())
}
}
impl<B, C, Provider> Stream for BodiesDownloader<B, C, Provider>
where
B: Block + 'static,
C: BodiesClient<Body = B::Body> + 'static,
Provider: HeaderProvider<Header = B::Header> + Unpin + 'static,
{
type Item = BodyDownloaderResult<B>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = self.get_mut();
if this.is_terminated() {
return Poll::Ready(None)
}
// Submit new requests and poll any in progress
loop {
// Yield next batch if ready
if let Some(next_batch) = this.try_split_next_batch() {
return Poll::Ready(Some(Ok(next_batch)))
}
// Poll requests
while let Poll::Ready(Some(response)) = this.in_progress_queue.poll_next_unpin(cx) {
this.metrics.in_flight_requests.decrement(1.);
match response {
Ok(response) => {
this.buffer_bodies_response(response);
}
Err(error) => {
tracing::debug!(target: "downloaders::bodies", %error, "Request failed");
this.clear();
return Poll::Ready(Some(Err(error)))
}
};
}
// Loop exit condition
let mut new_request_submitted = false;
// Submit new requests
'inner: while this.can_submit_new_request() {
match this.next_headers_request() {
Ok(Some(request)) => {
this.metrics.in_flight_requests.increment(1.);
this.in_progress_queue.push_new_request(
Arc::clone(&this.client),
Arc::clone(&this.consensus),
request,
);
new_request_submitted = true;
}
Ok(None) => break 'inner,
Err(error) => {
tracing::error!(target: "downloaders::bodies", %error, "Failed to download from next request");
this.clear();
return Poll::Ready(Some(Err(error)))
}
};
}
while let Some(buf_response) = this.try_next_buffered() {
this.queue_bodies(buf_response);
}
// shrink the buffer so that it doesn't grow indefinitely
this.buffered_responses.shrink_to_fit();
if !new_request_submitted {
break
}
}
// All requests are handled, stream is finished
if this.in_progress_queue.is_empty() {
if this.queued_bodies.is_empty() {
return Poll::Ready(None)
}
let batch_size = this.stream_batch_size.min(this.queued_bodies.len());
let next_batch = this.queued_bodies.drain(..batch_size).collect::<Vec<_>>();
this.queued_bodies.shrink_to_fit();
this.metrics.total_flushed.increment(next_batch.len() as u64);
this.metrics.queued_blocks.set(this.queued_bodies.len() as f64);
return Poll::Ready(Some(Ok(next_batch)))
}
Poll::Pending
}
}
#[derive(Debug)]
struct OrderedBodiesResponse<B: Block> {
resp: Vec<BlockResponse<B>>,
/// The total size of the response in bytes
size: usize,
}
impl<B: Block> OrderedBodiesResponse<B> {
#[inline]
const fn len(&self) -> usize {
self.resp.len()
}
/// Returns the size of the response in bytes
///
/// See [`BlockResponse::size`]
#[inline]
const fn size(&self) -> usize {
self.size
}
}
impl<B: Block> OrderedBodiesResponse<B> {
/// Returns the block number of the first element
///
/// # Panics
/// If the response vec is empty.
fn first_block_number(&self) -> u64 {
self.resp.first().expect("is not empty").block_number()
}
/// Returns the range of the block numbers in the response
///
/// # Panics
/// If the response vec is empty.
fn block_range(&self) -> RangeInclusive<u64> {
self.first_block_number()..=self.resp.last().expect("is not empty").block_number()
}
}
impl<B: Block> PartialEq for OrderedBodiesResponse<B> {
fn eq(&self, other: &Self) -> bool {
self.first_block_number() == other.first_block_number()
}
}
impl<B: Block> Eq for OrderedBodiesResponse<B> {}
impl<B: Block> PartialOrd for OrderedBodiesResponse<B> {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl<B: Block> Ord for OrderedBodiesResponse<B> {
fn cmp(&self, other: &Self) -> Ordering {
self.first_block_number().cmp(&other.first_block_number()).reverse()
}
}
/// Builder for [`BodiesDownloader`].
#[derive(Debug, Clone)]
pub struct BodiesDownloaderBuilder {
/// The batch size of non-empty blocks per one request
pub request_limit: u64,
/// The maximum number of block bodies returned at once from the stream
pub stream_batch_size: usize,
/// Maximum number of bytes of received bodies to buffer internally.
pub max_buffered_blocks_size_bytes: usize,
/// The maximum number of requests to send concurrently.
pub concurrent_requests_range: RangeInclusive<usize>,
}
impl BodiesDownloaderBuilder {
/// Creates a new [`BodiesDownloaderBuilder`] with configurations based on the provided
/// [`BodiesConfig`].
pub fn new(config: BodiesConfig) -> Self {
Self::default()
.with_stream_batch_size(config.downloader_stream_batch_size)
.with_request_limit(config.downloader_request_limit)
.with_max_buffered_blocks_size_bytes(config.downloader_max_buffered_blocks_size_bytes)
.with_concurrent_requests_range(
config.downloader_min_concurrent_requests..=
config.downloader_max_concurrent_requests,
)
}
}
impl Default for BodiesDownloaderBuilder {
fn default() -> Self {
Self {
request_limit: 200,
stream_batch_size: 1_000,
max_buffered_blocks_size_bytes: 2 * 1024 * 1024 * 1024, // ~2GB
concurrent_requests_range: 5..=100,
}
}
}
impl BodiesDownloaderBuilder {
/// Set request batch size on the downloader.
pub const fn with_request_limit(mut self, request_limit: u64) -> Self {
self.request_limit = request_limit;
self
}
/// Set stream batch size on the downloader.
pub const fn with_stream_batch_size(mut self, stream_batch_size: usize) -> Self {
self.stream_batch_size = stream_batch_size;
self
}
/// Set concurrent requests range on the downloader.
pub const fn with_concurrent_requests_range(
mut self,
concurrent_requests_range: RangeInclusive<usize>,
) -> Self {
self.concurrent_requests_range = concurrent_requests_range;
self
}
/// Set max buffered block bytes on the downloader.
pub const fn with_max_buffered_blocks_size_bytes(
mut self,
max_buffered_blocks_size_bytes: usize,
) -> Self {
self.max_buffered_blocks_size_bytes = max_buffered_blocks_size_bytes;
self
}
/// Consume self and return the concurrent downloader.
pub fn build<B, C, Provider>(
self,
client: C,
consensus: Arc<dyn Consensus<B, Error = ConsensusError>>,
provider: Provider,
) -> BodiesDownloader<B, C, Provider>
where
B: Block,
C: BodiesClient<Body = B::Body> + 'static,
Provider: HeaderProvider<Header = B::Header>,
{
let Self {
request_limit,
stream_batch_size,
concurrent_requests_range,
max_buffered_blocks_size_bytes,
} = self;
let metrics = BodyDownloaderMetrics::default();
let in_progress_queue = BodiesRequestQueue::new(metrics.clone());
BodiesDownloader {
client: Arc::new(client),
consensus,
provider,
request_limit,
stream_batch_size,
max_buffered_blocks_size_bytes,
concurrent_requests_range,
in_progress_queue,
metrics,
download_range: RangeInclusive::new(1, 0),
latest_queued_block_number: None,
buffered_responses: Default::default(),
queued_bodies: Default::default(),
buffered_blocks_size_bytes: 0,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
bodies::test_utils::{insert_headers, zip_blocks},
test_utils::{generate_bodies, TestBodiesClient},
};
use alloy_primitives::B256;
use assert_matches::assert_matches;
use reth_chainspec::MAINNET;
use reth_consensus::test_utils::TestConsensus;
use reth_db::test_utils::{create_test_rw_db, create_test_static_files_dir};
use reth_provider::{
providers::StaticFileProvider, test_utils::MockNodeTypesWithDB, ProviderFactory,
};
use reth_testing_utils::generators::{self, random_block_range, BlockRangeParams};
use std::collections::HashMap;
// Check that the blocks are emitted in order of block number, not in order of
// first-downloaded
#[tokio::test]
async fn streams_bodies_in_order() {
// Generate some random blocks
let db = create_test_rw_db();
let (headers, mut bodies) = generate_bodies(0..=19);
insert_headers(db.db(), &headers);
let client = Arc::new(
TestBodiesClient::default().with_bodies(bodies.clone()).with_should_delay(true),
);
let (_static_dir, static_dir_path) = create_test_static_files_dir();
let mut downloader = BodiesDownloaderBuilder::default()
.build::<reth_ethereum_primitives::Block, _, _>(
client.clone(),
Arc::new(TestConsensus::default()),
ProviderFactory::<MockNodeTypesWithDB>::new(
db,
MAINNET.clone(),
StaticFileProvider::read_write(static_dir_path).unwrap(),
),
);
downloader.set_download_range(0..=19).expect("failed to set download range");
assert_matches!(
downloader.next().await,
Some(Ok(res)) => assert_eq!(res, zip_blocks(headers.iter(), &mut bodies))
);
assert_eq!(client.times_requested(), 1);
}
// Check that the number of times requested equals to the number of headers divided by request
// limit.
#[tokio::test]
async fn requests_correct_number_of_times() {
// Generate some random blocks
let db = create_test_rw_db();
let mut rng = generators::rng();
let blocks = random_block_range(
&mut rng,
0..=199,
BlockRangeParams { parent: Some(B256::ZERO), tx_count: 1..2, ..Default::default() },
);
let headers = blocks.iter().map(|block| block.clone_sealed_header()).collect::<Vec<_>>();
let bodies = blocks
.into_iter()
.map(|block| (block.hash(), block.into_body()))
.collect::<HashMap<_, _>>();
insert_headers(db.db(), &headers);
let request_limit = 10;
let client = Arc::new(TestBodiesClient::default().with_bodies(bodies.clone()));
let (_static_dir, static_dir_path) = create_test_static_files_dir();
let mut downloader = BodiesDownloaderBuilder::default()
.with_request_limit(request_limit)
.build::<reth_ethereum_primitives::Block, _, _>(
client.clone(),
Arc::new(TestConsensus::default()),
ProviderFactory::<MockNodeTypesWithDB>::new(
db,
MAINNET.clone(),
StaticFileProvider::read_write(static_dir_path).unwrap(),
),
);
downloader.set_download_range(0..=199).expect("failed to set download range");
let _ = downloader.collect::<Vec<_>>().await;
assert_eq!(client.times_requested(), 20);
}
// Check that bodies are returned in correct order
// after resetting the download range multiple times.
#[tokio::test]
async fn streams_bodies_in_order_after_range_reset() {
// Generate some random blocks
let db = create_test_rw_db();
let (headers, mut bodies) = generate_bodies(0..=99);
insert_headers(db.db(), &headers);
let stream_batch_size = 20;
let request_limit = 10;
let client = Arc::new(
TestBodiesClient::default().with_bodies(bodies.clone()).with_should_delay(true),
);
let (_static_dir, static_dir_path) = create_test_static_files_dir();
let mut downloader = BodiesDownloaderBuilder::default()
.with_stream_batch_size(stream_batch_size)
.with_request_limit(request_limit)
.build::<reth_ethereum_primitives::Block, _, _>(
client.clone(),
Arc::new(TestConsensus::default()),
ProviderFactory::<MockNodeTypesWithDB>::new(
db,
MAINNET.clone(),
StaticFileProvider::read_write(static_dir_path).unwrap(),
),
);
let mut range_start = 0;
while range_start < 100 {
downloader.set_download_range(range_start..=99).expect("failed to set download range");
assert_matches!(
downloader.next().await,
Some(Ok(res)) => assert_eq!(res, zip_blocks(headers.iter().skip(range_start as usize).take(stream_batch_size), &mut bodies))
);
assert!(downloader.latest_queued_block_number >= Some(range_start));
range_start += stream_batch_size as u64;
}
}
// Check that the downloader picks up the new range and downloads bodies after previous range
// was completed.
#[tokio::test]
async fn can_download_new_range_after_termination() {
// Generate some random blocks
let db = create_test_rw_db();
let (headers, mut bodies) = generate_bodies(0..=199);
insert_headers(db.db(), &headers);
let client = Arc::new(TestBodiesClient::default().with_bodies(bodies.clone()));
let (_static_dir, static_dir_path) = create_test_static_files_dir();
let mut downloader = BodiesDownloaderBuilder::default()
.with_stream_batch_size(100)
.build::<reth_ethereum_primitives::Block, _, _>(
client.clone(),
Arc::new(TestConsensus::default()),
ProviderFactory::<MockNodeTypesWithDB>::new(
db,
MAINNET.clone(),
StaticFileProvider::read_write(static_dir_path).unwrap(),
),
);
// Set and download the first range
downloader.set_download_range(0..=99).expect("failed to set download range");
assert_matches!(
downloader.next().await,
Some(Ok(res)) => assert_eq!(res, zip_blocks(headers.iter().take(100), &mut bodies))
);
// Check that the stream is terminated
assert!(downloader.next().await.is_none());
// Set and download the second range
downloader.set_download_range(100..=199).expect("failed to set download range");
assert_matches!(
downloader.next().await,
Some(Ok(res)) => assert_eq!(res, zip_blocks(headers.iter().skip(100), &mut bodies))
);
}
// Check that the downloader continues after the size limit is reached.
#[tokio::test]
async fn can_download_after_exceeding_limit() {
// Generate some random blocks
let db = create_test_rw_db();
let (headers, mut bodies) = generate_bodies(0..=199);
insert_headers(db.db(), &headers);
let client = Arc::new(TestBodiesClient::default().with_bodies(bodies.clone()));
let (_static_dir, static_dir_path) = create_test_static_files_dir();
// Set the max buffered block size to 1 byte, to make sure that every response exceeds the
// limit
let mut downloader = BodiesDownloaderBuilder::default()
.with_stream_batch_size(10)
.with_request_limit(1)
.with_max_buffered_blocks_size_bytes(1)
.build::<reth_ethereum_primitives::Block, _, _>(
client.clone(),
Arc::new(TestConsensus::default()),
ProviderFactory::<MockNodeTypesWithDB>::new(
db,
MAINNET.clone(),
StaticFileProvider::read_write(static_dir_path).unwrap(),
),
);
// Set and download the entire range
downloader.set_download_range(0..=199).expect("failed to set download range");
let mut header = 0;
while let Some(Ok(resp)) = downloader.next().await {
assert_eq!(resp, zip_blocks(headers.iter().skip(header).take(resp.len()), &mut bodies));
header += resp.len();
}
}
// Check that the downloader can tolerate a few completely empty responses
#[tokio::test]
async fn can_tolerate_empty_responses() {
// Generate some random blocks
let db = create_test_rw_db();
let (headers, mut bodies) = generate_bodies(0..=99);
insert_headers(db.db(), &headers);
// respond with empty bodies for every other request.
let client = Arc::new(
TestBodiesClient::default().with_bodies(bodies.clone()).with_empty_responses(2),
);
let (_static_dir, static_dir_path) = create_test_static_files_dir();
let mut downloader = BodiesDownloaderBuilder::default()
.with_request_limit(3)
.with_stream_batch_size(100)
.build::<reth_ethereum_primitives::Block, _, _>(
client.clone(),
Arc::new(TestConsensus::default()),
ProviderFactory::<MockNodeTypesWithDB>::new(
db,
MAINNET.clone(),
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | true |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/downloaders/src/bodies/mod.rs | crates/net/downloaders/src/bodies/mod.rs | /// A naive concurrent downloader.
#[expect(clippy::module_inception)]
pub mod bodies;
/// A body downloader that does nothing. Useful to build unwind-only pipelines.
pub mod noop;
/// A downloader implementation that spawns a downloader to a task
pub mod task;
mod queue;
mod request;
#[cfg(any(test, feature = "test-utils"))]
pub mod test_utils;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/downloaders/src/bodies/task.rs | crates/net/downloaders/src/bodies/task.rs | use alloy_primitives::BlockNumber;
use futures::Stream;
use futures_util::{FutureExt, StreamExt};
use pin_project::pin_project;
use reth_network_p2p::{
bodies::downloader::{BodyDownloader, BodyDownloaderResult},
error::DownloadResult,
};
use reth_primitives_traits::Block;
use reth_tasks::{TaskSpawner, TokioTaskExecutor};
use std::{
fmt::Debug,
future::Future,
ops::RangeInclusive,
pin::Pin,
task::{ready, Context, Poll},
};
use tokio::sync::{mpsc, mpsc::UnboundedSender};
use tokio_stream::wrappers::{ReceiverStream, UnboundedReceiverStream};
use tokio_util::sync::PollSender;
/// The maximum number of [`BodyDownloaderResult`]s to hold in the buffer.
pub const BODIES_TASK_BUFFER_SIZE: usize = 4;
/// A [BodyDownloader] that drives a spawned [BodyDownloader] on a spawned task.
#[derive(Debug)]
#[pin_project]
pub struct TaskDownloader<B: Block> {
#[pin]
from_downloader: ReceiverStream<BodyDownloaderResult<B>>,
to_downloader: UnboundedSender<RangeInclusive<BlockNumber>>,
}
impl<B: Block + 'static> TaskDownloader<B> {
/// Spawns the given `downloader` via [`tokio::task::spawn`] returns a [`TaskDownloader`] that's
/// connected to that task.
///
/// # Panics
///
/// This method panics if called outside of a Tokio runtime
///
/// # Example
///
/// ```
/// use reth_consensus::{Consensus, ConsensusError};
/// use reth_downloaders::bodies::{bodies::BodiesDownloaderBuilder, task::TaskDownloader};
/// use reth_network_p2p::bodies::client::BodiesClient;
/// use reth_primitives_traits::{Block, InMemorySize};
/// use reth_storage_api::HeaderProvider;
/// use std::{fmt::Debug, sync::Arc};
///
/// fn t<
/// B: Block + 'static,
/// C: BodiesClient<Body = B::Body> + 'static,
/// Provider: HeaderProvider<Header = B::Header> + Unpin + 'static,
/// >(
/// client: Arc<C>,
/// consensus: Arc<dyn Consensus<B, Error = ConsensusError>>,
/// provider: Provider,
/// ) {
/// let downloader =
/// BodiesDownloaderBuilder::default().build::<B, _, _>(client, consensus, provider);
/// let downloader = TaskDownloader::spawn(downloader);
/// }
/// ```
pub fn spawn<T>(downloader: T) -> Self
where
T: BodyDownloader<Block = B> + 'static,
{
Self::spawn_with(downloader, &TokioTaskExecutor::default())
}
/// Spawns the given `downloader` via the given [`TaskSpawner`] returns a [`TaskDownloader`]
/// that's connected to that task.
pub fn spawn_with<T, S>(downloader: T, spawner: &S) -> Self
where
T: BodyDownloader<Block = B> + 'static,
S: TaskSpawner,
{
let (bodies_tx, bodies_rx) = mpsc::channel(BODIES_TASK_BUFFER_SIZE);
let (to_downloader, updates_rx) = mpsc::unbounded_channel();
let downloader = SpawnedDownloader {
bodies_tx: PollSender::new(bodies_tx),
updates: UnboundedReceiverStream::new(updates_rx),
downloader,
};
spawner.spawn(downloader.boxed());
Self { from_downloader: ReceiverStream::new(bodies_rx), to_downloader }
}
}
impl<B: Block + 'static> BodyDownloader for TaskDownloader<B> {
type Block = B;
fn set_download_range(&mut self, range: RangeInclusive<BlockNumber>) -> DownloadResult<()> {
let _ = self.to_downloader.send(range);
Ok(())
}
}
impl<B: Block + 'static> Stream for TaskDownloader<B> {
type Item = BodyDownloaderResult<B>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
self.project().from_downloader.poll_next(cx)
}
}
/// A [`BodyDownloader`] that runs on its own task
struct SpawnedDownloader<T: BodyDownloader> {
updates: UnboundedReceiverStream<RangeInclusive<BlockNumber>>,
bodies_tx: PollSender<BodyDownloaderResult<T::Block>>,
downloader: T,
}
impl<T: BodyDownloader> Future for SpawnedDownloader<T> {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.get_mut();
loop {
while let Poll::Ready(update) = this.updates.poll_next_unpin(cx) {
if let Some(range) = update {
if let Err(err) = this.downloader.set_download_range(range) {
tracing::error!(target: "downloaders::bodies", %err, "Failed to set bodies download range");
// Clone the sender ensure its availability. See [PollSender::clone].
let mut bodies_tx = this.bodies_tx.clone();
let forward_error_result = ready!(bodies_tx.poll_reserve(cx))
.and_then(|_| bodies_tx.send_item(Err(err)));
if forward_error_result.is_err() {
// channel closed, this means [TaskDownloader] was dropped,
// so we can also exit
return Poll::Ready(())
}
}
} else {
// channel closed, this means [TaskDownloader] was dropped, so we can also
// exit
return Poll::Ready(())
}
}
match ready!(this.bodies_tx.poll_reserve(cx)) {
Ok(()) => match ready!(this.downloader.poll_next_unpin(cx)) {
Some(bodies) => {
if this.bodies_tx.send_item(bodies).is_err() {
// channel closed, this means [TaskDownloader] was dropped, so we can
// also exit
return Poll::Ready(())
}
}
None => return Poll::Pending,
},
Err(_) => {
// channel closed, this means [TaskDownloader] was dropped, so we can also
// exit
return Poll::Ready(())
}
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
bodies::{
bodies::BodiesDownloaderBuilder,
test_utils::{insert_headers, zip_blocks},
},
test_utils::{generate_bodies, TestBodiesClient},
};
use assert_matches::assert_matches;
use reth_consensus::test_utils::TestConsensus;
use reth_network_p2p::error::DownloadError;
use reth_provider::test_utils::create_test_provider_factory;
use std::sync::Arc;
#[tokio::test(flavor = "multi_thread")]
async fn download_one_by_one_on_task() {
reth_tracing::init_test_tracing();
let factory = create_test_provider_factory();
let (headers, mut bodies) = generate_bodies(0..=19);
insert_headers(factory.db_ref().db(), &headers);
let client = Arc::new(
TestBodiesClient::default().with_bodies(bodies.clone()).with_should_delay(true),
);
let downloader = BodiesDownloaderBuilder::default()
.build::<reth_ethereum_primitives::Block, _, _>(
client.clone(),
Arc::new(TestConsensus::default()),
factory,
);
let mut downloader = TaskDownloader::spawn(downloader);
downloader.set_download_range(0..=19).expect("failed to set download range");
assert_matches!(
downloader.next().await,
Some(Ok(res)) => assert_eq!(res, zip_blocks(headers.iter(), &mut bodies))
);
assert_eq!(client.times_requested(), 1);
}
#[tokio::test(flavor = "multi_thread")]
#[expect(clippy::reversed_empty_ranges)]
async fn set_download_range_error_returned() {
reth_tracing::init_test_tracing();
let factory = create_test_provider_factory();
let downloader = BodiesDownloaderBuilder::default()
.build::<reth_ethereum_primitives::Block, _, _>(
Arc::new(TestBodiesClient::default()),
Arc::new(TestConsensus::default()),
factory,
);
let mut downloader = TaskDownloader::spawn(downloader);
downloader.set_download_range(1..=0).expect("failed to set download range");
assert_matches!(downloader.next().await, Some(Err(DownloadError::InvalidBodyRange { .. })));
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/downloaders/src/bodies/request.rs | crates/net/downloaders/src/bodies/request.rs | use crate::metrics::{BodyDownloaderMetrics, ResponseMetrics};
use alloy_consensus::BlockHeader;
use alloy_primitives::B256;
use futures::{Future, FutureExt};
use reth_consensus::{Consensus, ConsensusError};
use reth_network_p2p::{
bodies::{client::BodiesClient, response::BlockResponse},
error::{DownloadError, DownloadResult},
priority::Priority,
};
use reth_network_peers::{PeerId, WithPeerId};
use reth_primitives_traits::{Block, GotExpected, InMemorySize, SealedBlock, SealedHeader};
use std::{
collections::VecDeque,
mem,
pin::Pin,
sync::Arc,
task::{ready, Context, Poll},
};
/// Body request implemented as a [Future].
///
/// The future will poll the underlying request until fulfilled.
/// If the response arrived with insufficient number of bodies, the future
/// will issue another request until all bodies are collected.
///
/// It then proceeds to verify the downloaded bodies. In case of a validation error,
/// the future will start over.
///
/// The future will filter out any empty headers (see [`alloy_consensus::Header::is_empty`]) from
/// the request. If [`BodiesRequestFuture`] was initialized with all empty headers, no request will
/// be dispatched and they will be immediately returned upon polling.
///
/// NB: This assumes that peers respond with bodies in the order that they were requested.
/// This is a reasonable assumption to make as that's [what Geth
/// does](https://github.com/ethereum/go-ethereum/blob/f53ff0ff4a68ffc56004ab1d5cc244bcb64d3277/les/server_requests.go#L245).
/// All errors regarding the response cause the peer to get penalized, meaning that adversaries
/// that try to give us bodies that do not match the requested order are going to be penalized
/// and eventually disconnected.
pub(crate) struct BodiesRequestFuture<B: Block, C: BodiesClient<Body = B::Body>> {
client: Arc<C>,
consensus: Arc<dyn Consensus<B, Error = ConsensusError>>,
metrics: BodyDownloaderMetrics,
/// Metrics for individual responses. This can be used to observe how the size (in bytes) of
/// responses change while bodies are being downloaded.
response_metrics: ResponseMetrics,
// Headers to download. The collection is shrunk as responses are buffered.
pending_headers: VecDeque<SealedHeader<B::Header>>,
/// Internal buffer for all blocks
buffer: Vec<BlockResponse<B>>,
fut: Option<C::Output>,
/// Tracks how many bodies we requested in the last request.
last_request_len: Option<usize>,
}
impl<B, C> BodiesRequestFuture<B, C>
where
B: Block,
C: BodiesClient<Body = B::Body> + 'static,
{
/// Returns an empty future. Use [`BodiesRequestFuture::with_headers`] to set the request.
pub(crate) fn new(
client: Arc<C>,
consensus: Arc<dyn Consensus<B, Error = ConsensusError>>,
metrics: BodyDownloaderMetrics,
) -> Self {
Self {
client,
consensus,
metrics,
response_metrics: Default::default(),
pending_headers: Default::default(),
buffer: Default::default(),
last_request_len: None,
fut: None,
}
}
pub(crate) fn with_headers(mut self, headers: Vec<SealedHeader<B::Header>>) -> Self {
self.buffer.reserve_exact(headers.len());
self.pending_headers = VecDeque::from(headers);
// Submit the request only if there are any headers to download.
// Otherwise, the future will immediately be resolved.
if let Some(req) = self.next_request() {
self.submit_request(req, Priority::Normal);
}
self
}
fn on_error(&mut self, error: DownloadError, peer_id: Option<PeerId>) {
self.metrics.increment_errors(&error);
tracing::debug!(target: "downloaders::bodies", ?peer_id, %error, "Error requesting bodies");
if let Some(peer_id) = peer_id {
self.client.report_bad_message(peer_id);
}
self.submit_request(
self.next_request().expect("existing hashes to resubmit"),
Priority::High,
);
}
/// Retrieve header hashes for the next request.
fn next_request(&self) -> Option<Vec<B256>> {
let mut hashes =
self.pending_headers.iter().filter(|h| !h.is_empty()).map(|h| h.hash()).peekable();
hashes.peek().is_some().then(|| hashes.collect())
}
/// Submit the request with the given priority.
fn submit_request(&mut self, req: Vec<B256>, priority: Priority) {
tracing::trace!(target: "downloaders::bodies", request_len = req.len(), "Requesting bodies");
let client = Arc::clone(&self.client);
self.last_request_len = Some(req.len());
self.fut = Some(client.get_block_bodies_with_priority(req, priority));
}
/// Process block response.
/// Returns an error if the response is invalid.
fn on_block_response(&mut self, response: WithPeerId<Vec<B::Body>>) -> DownloadResult<()>
where
B::Body: InMemorySize,
{
let (peer_id, bodies) = response.split();
let request_len = self.last_request_len.unwrap_or_default();
let response_len = bodies.len();
tracing::trace!(target: "downloaders::bodies", request_len, response_len, ?peer_id, "Received bodies");
// Increment total downloaded metric
self.metrics.total_downloaded.increment(response_len as u64);
// TODO: Malicious peers often return a single block even if it does not exceed the soft
// response limit (2MB). This could be penalized by checking if this block and the
// next one exceed the soft response limit, if not then peer either does not have the next
// block or deliberately sent a single block.
if bodies.is_empty() {
return Err(DownloadError::EmptyResponse)
}
if response_len > request_len {
return Err(DownloadError::TooManyBodies(GotExpected {
got: response_len,
expected: request_len,
}))
}
// Buffer block responses
self.try_buffer_blocks(bodies)?;
// Submit next request if any
if let Some(req) = self.next_request() {
self.submit_request(req, Priority::High);
} else {
self.fut = None;
}
Ok(())
}
/// Attempt to buffer body responses. Returns an error if body response fails validation.
/// Every body preceding the failed one will be buffered.
///
/// This method removes headers from the internal collection.
/// If the response fails validation, then the header will be put back.
fn try_buffer_blocks(&mut self, bodies: Vec<C::Body>) -> DownloadResult<()>
where
C::Body: InMemorySize,
{
let bodies_capacity = bodies.capacity();
let bodies_len = bodies.len();
let mut bodies = bodies.into_iter().peekable();
let mut total_size = bodies_capacity * mem::size_of::<C::Body>();
while bodies.peek().is_some() {
let next_header = match self.pending_headers.pop_front() {
Some(header) => header,
None => return Ok(()), // no more headers
};
if next_header.is_empty() {
// increment empty block body metric
total_size += mem::size_of::<C::Body>();
self.buffer.push(BlockResponse::Empty(next_header));
} else {
let next_body = bodies.next().unwrap();
// increment full block body metric
total_size += next_body.size();
let block = SealedBlock::from_sealed_parts(next_header, next_body);
if let Err(error) = self.consensus.validate_block_pre_execution(&block) {
// Body is invalid, put the header back and return an error
let hash = block.hash();
let number = block.number();
self.pending_headers.push_front(block.into_sealed_header());
return Err(DownloadError::BodyValidation {
hash,
number,
error: Box::new(error),
})
}
self.buffer.push(BlockResponse::Full(block));
}
}
// Increment per-response metric
self.response_metrics.response_size_bytes.set(total_size as f64);
self.response_metrics.response_length.set(bodies_len as f64);
Ok(())
}
}
impl<B, C> Future for BodiesRequestFuture<B, C>
where
B: Block + 'static,
C: BodiesClient<Body = B::Body> + 'static,
{
type Output = DownloadResult<Vec<BlockResponse<B>>>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.get_mut();
loop {
if this.pending_headers.is_empty() {
return Poll::Ready(Ok(std::mem::take(&mut this.buffer)))
}
// Check if there is a pending requests. It might not exist if all
// headers are empty and there is nothing to download.
if let Some(fut) = this.fut.as_mut() {
match ready!(fut.poll_unpin(cx)) {
Ok(response) => {
let peer_id = response.peer_id();
if let Err(error) = this.on_block_response(response) {
this.on_error(error, Some(peer_id));
}
}
Err(error) => {
if error.is_channel_closed() {
return Poll::Ready(Err(error.into()))
}
this.on_error(error.into(), None);
}
}
}
// Buffer any empty headers
while this.pending_headers.front().is_some_and(|h| h.is_empty()) {
let header = this.pending_headers.pop_front().unwrap();
this.buffer.push(BlockResponse::Empty(header));
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
bodies::test_utils::zip_blocks,
test_utils::{generate_bodies, TestBodiesClient},
};
use reth_consensus::test_utils::TestConsensus;
use reth_ethereum_primitives::Block;
use reth_testing_utils::{generators, generators::random_header_range};
/// Check if future returns empty bodies without dispatching any requests.
#[tokio::test]
async fn request_returns_empty_bodies() {
let mut rng = generators::rng();
let headers = random_header_range(&mut rng, 0..20, B256::ZERO);
let client = Arc::new(TestBodiesClient::default());
let fut = BodiesRequestFuture::<Block, _>::new(
client.clone(),
Arc::new(TestConsensus::default()),
BodyDownloaderMetrics::default(),
)
.with_headers(headers.clone());
assert_eq!(
fut.await.unwrap(),
headers.into_iter().map(BlockResponse::Empty).collect::<Vec<_>>()
);
assert_eq!(client.times_requested(), 0);
}
/// Check that the request future
#[tokio::test]
async fn request_submits_until_fulfilled() {
// Generate some random blocks
let (headers, mut bodies) = generate_bodies(0..=19);
let batch_size = 2;
let client = Arc::new(
TestBodiesClient::default().with_bodies(bodies.clone()).with_max_batch_size(batch_size),
);
let fut = BodiesRequestFuture::<Block, _>::new(
client.clone(),
Arc::new(TestConsensus::default()),
BodyDownloaderMetrics::default(),
)
.with_headers(headers.clone());
assert_eq!(fut.await.unwrap(), zip_blocks(headers.iter(), &mut bodies));
assert_eq!(
client.times_requested(),
// div_ceild
(headers.into_iter().filter(|h| !h.is_empty()).count() as u64).div_ceil(2)
);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/downloaders/src/bodies/queue.rs | crates/net/downloaders/src/bodies/queue.rs | use super::request::BodiesRequestFuture;
use crate::metrics::BodyDownloaderMetrics;
use alloy_consensus::BlockHeader;
use alloy_primitives::BlockNumber;
use futures::{stream::FuturesUnordered, Stream};
use futures_util::StreamExt;
use reth_consensus::{Consensus, ConsensusError};
use reth_network_p2p::{
bodies::{client::BodiesClient, response::BlockResponse},
error::DownloadResult,
};
use reth_primitives_traits::{Block, SealedHeader};
use std::{
pin::Pin,
sync::Arc,
task::{Context, Poll},
};
/// The wrapper around [`FuturesUnordered`] that keeps information
/// about the blocks currently being requested.
#[derive(Debug)]
pub(crate) struct BodiesRequestQueue<B: Block, C: BodiesClient<Body = B::Body>> {
/// Inner body request queue.
inner: FuturesUnordered<BodiesRequestFuture<B, C>>,
/// The downloader metrics.
metrics: BodyDownloaderMetrics,
/// Last requested block number.
pub(crate) last_requested_block_number: Option<BlockNumber>,
}
impl<B, C> BodiesRequestQueue<B, C>
where
B: Block,
C: BodiesClient<Body = B::Body> + 'static,
{
/// Create new instance of request queue.
pub(crate) fn new(metrics: BodyDownloaderMetrics) -> Self {
Self { metrics, inner: Default::default(), last_requested_block_number: None }
}
/// Returns `true` if the queue is empty.
pub(crate) fn is_empty(&self) -> bool {
self.inner.is_empty()
}
/// Returns the number of queued requests.
pub(crate) fn len(&self) -> usize {
self.inner.len()
}
/// Clears the inner queue and related data.
pub(crate) fn clear(&mut self) {
self.inner.clear();
self.last_requested_block_number.take();
}
/// Add new request to the queue.
/// Expects a sorted list of headers.
pub(crate) fn push_new_request(
&mut self,
client: Arc<C>,
consensus: Arc<dyn Consensus<B, Error = ConsensusError>>,
request: Vec<SealedHeader<B::Header>>,
) {
// Set last max requested block number
self.last_requested_block_number = request
.last()
.map(|last| match self.last_requested_block_number {
Some(num) => last.number().max(num),
None => last.number(),
})
.or(self.last_requested_block_number);
// Create request and push into the queue.
self.inner.push(
BodiesRequestFuture::new(client, consensus, self.metrics.clone()).with_headers(request),
)
}
}
impl<B, C> Stream for BodiesRequestQueue<B, C>
where
B: Block + 'static,
C: BodiesClient<Body = B::Body> + 'static,
{
type Item = DownloadResult<Vec<BlockResponse<B>>>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
self.get_mut().inner.poll_next_unpin(cx)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/downloaders/src/headers/noop.rs | crates/net/downloaders/src/headers/noop.rs | use alloy_primitives::Sealable;
use futures::Stream;
use reth_network_p2p::headers::{
downloader::{HeaderDownloader, SyncTarget},
error::HeadersDownloaderError,
};
use reth_primitives_traits::SealedHeader;
use std::fmt::Debug;
/// A [`HeaderDownloader`] implementation that does nothing.
#[derive(Debug, Default)]
#[non_exhaustive]
pub struct NoopHeaderDownloader<H>(std::marker::PhantomData<H>);
impl<H: Sealable + Debug + Send + Sync + Unpin + 'static> HeaderDownloader
for NoopHeaderDownloader<H>
{
type Header = H;
fn update_local_head(&mut self, _: SealedHeader<H>) {}
fn update_sync_target(&mut self, _: SyncTarget) {}
fn set_batch_size(&mut self, _: usize) {}
}
impl<H: Sealable> Stream for NoopHeaderDownloader<H> {
type Item = Result<Vec<SealedHeader<H>>, HeadersDownloaderError<H>>;
fn poll_next(
self: std::pin::Pin<&mut Self>,
_: &mut std::task::Context<'_>,
) -> std::task::Poll<Option<Self::Item>> {
panic!("NoopHeaderDownloader shouldn't be polled.")
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/downloaders/src/headers/reverse_headers.rs | crates/net/downloaders/src/headers/reverse_headers.rs | //! A headers downloader that can handle multiple requests concurrently.
use super::task::TaskDownloader;
use crate::metrics::HeaderDownloaderMetrics;
use alloy_consensus::BlockHeader;
use alloy_eips::BlockHashOrNumber;
use alloy_primitives::{BlockNumber, Sealable, B256};
use futures::{stream::Stream, FutureExt};
use futures_util::{stream::FuturesUnordered, StreamExt};
use rayon::prelude::*;
use reth_config::config::HeadersConfig;
use reth_consensus::HeaderValidator;
use reth_network_p2p::{
error::{DownloadError, DownloadResult, PeerRequestResult},
headers::{
client::{HeadersClient, HeadersRequest},
downloader::{validate_header_download, HeaderDownloader, SyncTarget},
error::{HeadersDownloaderError, HeadersDownloaderResult},
},
priority::Priority,
};
use reth_network_peers::PeerId;
use reth_primitives_traits::{GotExpected, SealedHeader};
use reth_tasks::{TaskSpawner, TokioTaskExecutor};
use std::{
cmp::{Ordering, Reverse},
collections::{binary_heap::PeekMut, BinaryHeap},
future::Future,
pin::Pin,
sync::Arc,
task::{ready, Context, Poll},
};
use thiserror::Error;
use tracing::{debug, error, trace};
/// A heuristic that is used to determine the number of requests that should be prepared for a peer.
/// This should ensure that there are always requests lined up for peers to handle while the
/// downloader is yielding a next batch of headers that is being committed to the database.
const REQUESTS_PER_PEER_MULTIPLIER: usize = 5;
/// Wrapper for internal downloader errors.
#[derive(Error, Debug)]
enum ReverseHeadersDownloaderError<H: Sealable> {
#[error(transparent)]
Downloader(#[from] HeadersDownloaderError<H>),
#[error(transparent)]
Response(#[from] Box<HeadersResponseError>),
}
impl<H: Sealable> From<HeadersResponseError> for ReverseHeadersDownloaderError<H> {
fn from(value: HeadersResponseError) -> Self {
Self::Response(Box::new(value))
}
}
/// Downloads headers concurrently.
///
/// This [`HeaderDownloader`] downloads headers using the configured [`HeadersClient`].
/// Headers can be requested by hash or block number and take a `limit` parameter. This downloader
/// tries to fill the gap between the local head of the node and the chain tip by issuing multiple
/// requests at a time but yielding them in batches on [`Stream::poll_next`].
///
/// **Note:** This downloader downloads in reverse, see also
/// [`reth_network_p2p::headers::client::HeadersDirection`], this means the batches of headers that
/// this downloader yields will start at the chain tip and move towards the local head: falling
/// block numbers.
#[must_use = "Stream does nothing unless polled"]
#[derive(Debug)]
pub struct ReverseHeadersDownloader<H: HeadersClient> {
/// Consensus client used to validate headers
consensus: Arc<dyn HeaderValidator<H::Header>>,
/// Client used to download headers.
client: Arc<H>,
/// The local head of the chain.
local_head: Option<SealedHeader<H::Header>>,
/// Block we want to close the gap to.
sync_target: Option<SyncTargetBlock>,
/// The block number to use for requests.
next_request_block_number: u64,
/// Keeps track of the block we need to validate next.
lowest_validated_header: Option<SealedHeader<H::Header>>,
/// Tip block number to start validating from (in reverse)
next_chain_tip_block_number: u64,
/// The batch size per one request
request_limit: u64,
/// Minimum amount of requests to handle concurrently.
min_concurrent_requests: usize,
/// Maximum amount of requests to handle concurrently.
max_concurrent_requests: usize,
/// The number of block headers to return at once
stream_batch_size: usize,
/// Maximum amount of received headers to buffer internally.
max_buffered_responses: usize,
/// Contains the request to retrieve the headers for the sync target
///
/// This will give us the block number of the `sync_target`, after which we can send multiple
/// requests at a time.
sync_target_request: Option<HeadersRequestFuture<H::Output>>,
/// requests in progress
in_progress_queue: FuturesUnordered<HeadersRequestFuture<H::Output>>,
/// Buffered, unvalidated responses
buffered_responses: BinaryHeap<OrderedHeadersResponse<H::Header>>,
/// Buffered, _sorted_ and validated headers ready to be returned.
///
/// Note: headers are sorted from high to low
queued_validated_headers: Vec<SealedHeader<H::Header>>,
/// Header downloader metrics.
metrics: HeaderDownloaderMetrics,
}
// === impl ReverseHeadersDownloader ===
impl<H> ReverseHeadersDownloader<H>
where
H: HeadersClient<Header: reth_primitives_traits::BlockHeader> + 'static,
{
/// Convenience method to create a [`ReverseHeadersDownloaderBuilder`] without importing it
pub fn builder() -> ReverseHeadersDownloaderBuilder {
ReverseHeadersDownloaderBuilder::default()
}
/// Returns the block number the local node is at.
#[inline]
fn local_block_number(&self) -> Option<BlockNumber> {
self.local_head.as_ref().map(|h| h.number())
}
/// Returns the existing local head block number
///
/// # Panics
///
/// If the local head has not been set.
#[inline]
fn existing_local_block_number(&self) -> BlockNumber {
self.local_head.as_ref().expect("is initialized").number()
}
/// Returns the existing sync target.
///
/// # Panics
///
/// If the sync target has never been set.
#[inline]
fn existing_sync_target(&self) -> SyncTargetBlock {
self.sync_target.as_ref().expect("is initialized").clone()
}
/// Max requests to handle at the same time
///
/// This depends on the number of active peers but will always be
/// `min_concurrent_requests..max_concurrent_requests`
#[inline]
fn concurrent_request_limit(&self) -> usize {
let num_peers = self.client.num_connected_peers();
// we try to keep more requests than available peers active so that there's always a
// followup request available for a peer
let dynamic_target = num_peers * REQUESTS_PER_PEER_MULTIPLIER;
let max_dynamic = dynamic_target.max(self.min_concurrent_requests);
// If only a few peers are connected we keep it low
if num_peers < self.min_concurrent_requests {
return max_dynamic
}
max_dynamic.min(self.max_concurrent_requests)
}
/// Returns the next header request
///
/// This will advance the current block towards the local head.
///
/// Returns `None` if no more requests are required.
fn next_request(&mut self) -> Option<HeadersRequest> {
if let Some(local_head) = self.local_block_number() {
if self.next_request_block_number > local_head {
let request = calc_next_request(
local_head,
self.next_request_block_number,
self.request_limit,
);
// need to shift the tracked request block number based on the number of requested
// headers so follow-up requests will use that as start.
self.next_request_block_number -= request.limit;
return Some(request)
}
}
None
}
/// Returns the next header to use for validation.
///
/// Since this downloader downloads blocks with falling block number, this will return the
/// lowest (in terms of block number) validated header.
///
/// This is either the last `queued_validated_headers`, or if has been drained entirely the
/// `lowest_validated_header`.
///
/// This only returns `None` if we haven't fetched the initial chain tip yet.
fn lowest_validated_header(&self) -> Option<&SealedHeader<H::Header>> {
self.queued_validated_headers.last().or(self.lowest_validated_header.as_ref())
}
/// Resets the request trackers and clears the sync target.
///
/// This ensures the downloader will restart after a new sync target has been set.
fn reset(&mut self) {
debug!(target: "downloaders::headers", "Resetting headers downloader");
self.next_request_block_number = 0;
self.next_chain_tip_block_number = 0;
self.sync_target.take();
}
/// Validate that the received header matches the expected sync target.
fn validate_sync_target(
&self,
header: &SealedHeader<H::Header>,
request: HeadersRequest,
peer_id: PeerId,
) -> Result<(), Box<HeadersResponseError>> {
match self.existing_sync_target() {
SyncTargetBlock::Hash(hash) | SyncTargetBlock::HashAndNumber { hash, .. }
if header.hash() != hash =>
{
Err(Box::new(HeadersResponseError {
request,
peer_id: Some(peer_id),
error: DownloadError::InvalidTip(
GotExpected { got: header.hash(), expected: hash }.into(),
),
}))
}
SyncTargetBlock::Number(number) if header.number() != number => {
Err(Box::new(HeadersResponseError {
request,
peer_id: Some(peer_id),
error: DownloadError::InvalidTipNumber(GotExpected {
got: header.number(),
expected: number,
}),
}))
}
_ => Ok(()),
}
}
/// Processes the next headers in line.
///
/// This will validate all headers and insert them into the validated buffer.
///
/// Returns an error if the given headers are invalid.
///
/// Caution: this expects the `headers` to be sorted with _falling_ block numbers
fn process_next_headers(
&mut self,
request: HeadersRequest,
headers: Vec<H::Header>,
peer_id: PeerId,
) -> Result<(), ReverseHeadersDownloaderError<H::Header>> {
let mut validated = Vec::with_capacity(headers.len());
let sealed_headers =
headers.into_par_iter().map(SealedHeader::seal_slow).collect::<Vec<_>>();
for parent in sealed_headers {
// Validate that the header is the parent header of the last validated header.
if let Some(validated_header) =
validated.last().or_else(|| self.lowest_validated_header())
{
if let Err(error) = self.validate(validated_header, &parent) {
trace!(target: "downloaders::headers", %error ,"Failed to validate header");
return Err(
HeadersResponseError { request, peer_id: Some(peer_id), error }.into()
)
}
} else {
self.validate_sync_target(&parent, request.clone(), peer_id)?;
}
validated.push(parent);
}
// If the last (smallest) validated header attaches to the local head, validate it.
if let Some((last_header, head)) = validated
.last_mut()
.zip(self.local_head.as_ref())
.filter(|(last, head)| last.number() == head.number() + 1)
{
// Every header must be valid on its own
if let Err(error) = self.consensus.validate_header(&*last_header) {
trace!(target: "downloaders::headers", %error, "Failed to validate header");
return Err(HeadersResponseError {
request,
peer_id: Some(peer_id),
error: DownloadError::HeaderValidation {
hash: head.hash(),
number: head.number(),
error: Box::new(error),
},
}
.into())
}
// If the header is valid on its own, but not against its parent, we return it as
// detached head error.
// In stage sync this will trigger an unwind because this means that the local head
// is not part of the chain the sync target is on. In other words, the downloader was
// unable to connect the sync target with the local head because the sync target and
// the local head or on different chains.
if let Err(error) = self.consensus.validate_header_against_parent(&*last_header, head) {
let local_head = head.clone();
// Replace the last header with a detached variant
error!(target: "downloaders::headers", %error, number = last_header.number(), hash = ?last_header.hash(), "Header cannot be attached to known canonical chain");
// Reset trackers so that we can start over the next time the sync target is
// updated.
// The expected event flow when that happens is that the node will unwind the local
// chain and restart the downloader.
self.reset();
return Err(HeadersDownloaderError::DetachedHead {
local_head: Box::new(local_head),
header: Box::new(last_header.clone()),
error: Box::new(error),
}
.into())
}
}
// update tracked block info (falling block number)
self.next_chain_tip_block_number =
validated.last().expect("exists").number().saturating_sub(1);
self.queued_validated_headers.extend(validated);
Ok(())
}
/// Updates the state based on the given `target_block_number`
///
/// There are three different outcomes:
/// * This is the first time this is called: current `sync_target` block is still `None`. In
/// which case we're initializing the request trackers to `next_block`
/// * The `target_block_number` is _higher_ than the current target. In which case we start
/// over with a new range
/// * The `target_block_number` is _lower_ than the current target or the _same_. In which case
/// we don't need to update the request trackers but need to ensure already buffered headers
/// are _not_ higher than the new `target_block_number`.
fn on_block_number_update(&mut self, target_block_number: u64, next_block: u64) {
// Update the trackers
if let Some(old_target) =
self.sync_target.as_mut().and_then(|t| t.replace_number(target_block_number))
{
if target_block_number > old_target {
// the new target is higher than the old target we need to update the
// request tracker and reset everything
self.next_request_block_number = next_block;
self.next_chain_tip_block_number = next_block;
self.clear();
} else {
// ensure already validated headers are in range
let skip = self
.queued_validated_headers
.iter()
.take_while(|last| last.number() > target_block_number)
.count();
// removes all headers that are higher than current target
self.queued_validated_headers.drain(..skip);
}
} else {
// this occurs on the initial sync target request
self.next_request_block_number = next_block;
self.next_chain_tip_block_number = next_block;
}
}
/// Handles the response for the request for the sync target
fn on_sync_target_outcome(
&mut self,
response: HeadersRequestOutcome<H::Header>,
) -> Result<(), ReverseHeadersDownloaderError<H::Header>> {
let sync_target = self.existing_sync_target();
let HeadersRequestOutcome { request, outcome } = response;
match outcome {
Ok(res) => {
let (peer_id, mut headers) = res.split();
// update total downloaded metric
self.metrics.total_downloaded.increment(headers.len() as u64);
// sort headers from highest to lowest block number
headers.sort_unstable_by_key(|h| Reverse(h.number()));
if headers.is_empty() {
return Err(HeadersResponseError {
request,
peer_id: Some(peer_id),
error: DownloadError::EmptyResponse,
}
.into())
}
let header = headers.swap_remove(0);
let target = SealedHeader::seal_slow(header);
match sync_target {
SyncTargetBlock::Hash(hash) | SyncTargetBlock::HashAndNumber { hash, .. } => {
if target.hash() != hash {
return Err(HeadersResponseError {
request,
peer_id: Some(peer_id),
error: DownloadError::InvalidTip(
GotExpected { got: target.hash(), expected: hash }.into(),
),
}
.into())
}
}
SyncTargetBlock::Number(number) => {
if target.number() != number {
return Err(HeadersResponseError {
request,
peer_id: Some(peer_id),
error: DownloadError::InvalidTipNumber(GotExpected {
got: target.number(),
expected: number,
}),
}
.into())
}
}
}
trace!(target: "downloaders::headers", head=?self.local_block_number(), hash=?target.hash(), number=%target.number(), "Received sync target");
// This is the next block we need to start issuing requests from
let parent_block_number = target.number().saturating_sub(1);
self.on_block_number_update(target.number(), parent_block_number);
self.queued_validated_headers.push(target);
// try to validate all buffered responses blocked by this successful response
self.try_validate_buffered()
.map(Err::<(), ReverseHeadersDownloaderError<H::Header>>)
.transpose()?;
Ok(())
}
Err(err) => {
Err(HeadersResponseError { request, peer_id: None, error: err.into() }.into())
}
}
}
/// Invoked when we received a response
fn on_headers_outcome(
&mut self,
response: HeadersRequestOutcome<H::Header>,
) -> Result<(), ReverseHeadersDownloaderError<H::Header>> {
let requested_block_number = response.block_number();
let HeadersRequestOutcome { request, outcome } = response;
match outcome {
Ok(res) => {
let (peer_id, mut headers) = res.split();
// update total downloaded metric
self.metrics.total_downloaded.increment(headers.len() as u64);
trace!(target: "downloaders::headers", len=%headers.len(), "Received headers response");
if headers.is_empty() {
return Err(HeadersResponseError {
request,
peer_id: Some(peer_id),
error: DownloadError::EmptyResponse,
}
.into())
}
if (headers.len() as u64) != request.limit {
return Err(HeadersResponseError {
peer_id: Some(peer_id),
error: DownloadError::HeadersResponseTooShort(GotExpected {
got: headers.len() as u64,
expected: request.limit,
}),
request,
}
.into())
}
// sort headers from highest to lowest block number
headers.sort_unstable_by_key(|h| Reverse(h.number()));
// validate the response
let highest = &headers[0];
trace!(target: "downloaders::headers", requested_block_number, highest=?highest.number(), "Validating non-empty headers response");
if highest.number() != requested_block_number {
return Err(HeadersResponseError {
request,
peer_id: Some(peer_id),
error: DownloadError::HeadersResponseStartBlockMismatch(GotExpected {
got: highest.number(),
expected: requested_block_number,
}),
}
.into())
}
// check if the response is the next expected
if highest.number() == self.next_chain_tip_block_number {
// is next response, validate it
self.process_next_headers(request, headers, peer_id)?;
// try to validate all buffered responses blocked by this successful response
self.try_validate_buffered()
.map(Err::<(), ReverseHeadersDownloaderError<H::Header>>)
.transpose()?;
} else if highest.number() > self.existing_local_block_number() {
self.metrics.buffered_responses.increment(1.);
// can't validate yet
self.buffered_responses.push(OrderedHeadersResponse {
headers,
request,
peer_id,
})
}
Ok(())
}
// most likely a noop, because this error
// would've been handled by the fetcher internally
Err(err) => {
trace!(target: "downloaders::headers", %err, "Response error");
Err(HeadersResponseError { request, peer_id: None, error: err.into() }.into())
}
}
}
fn penalize_peer(&self, peer_id: Option<PeerId>, error: &DownloadError) {
// Penalize the peer for bad response
if let Some(peer_id) = peer_id {
trace!(target: "downloaders::headers", ?peer_id, %error, "Penalizing peer");
self.client.report_bad_message(peer_id);
}
}
/// Handles the error of a bad response
///
/// This will re-submit the request.
fn on_headers_error(&self, err: Box<HeadersResponseError>) {
let HeadersResponseError { request, peer_id, error } = *err;
self.penalize_peer(peer_id, &error);
// Update error metric
self.metrics.increment_errors(&error);
// Re-submit the request
self.submit_request(request, Priority::High);
}
/// Attempts to validate the buffered responses
///
/// Returns an error if the next expected response was popped, but failed validation.
fn try_validate_buffered(&mut self) -> Option<ReverseHeadersDownloaderError<H::Header>> {
loop {
// Check to see if we've already received the next value
let next_response = self.buffered_responses.peek_mut()?;
let next_block_number = next_response.block_number();
match next_block_number.cmp(&self.next_chain_tip_block_number) {
Ordering::Less => return None,
Ordering::Equal => {
let OrderedHeadersResponse { headers, request, peer_id } =
PeekMut::pop(next_response);
self.metrics.buffered_responses.decrement(1.);
if let Err(err) = self.process_next_headers(request, headers, peer_id) {
return Some(err)
}
}
Ordering::Greater => {
self.metrics.buffered_responses.decrement(1.);
PeekMut::pop(next_response);
}
}
}
}
/// Returns the request for the `sync_target` header.
const fn get_sync_target_request(&self, start: BlockHashOrNumber) -> HeadersRequest {
HeadersRequest::falling(start, 1)
}
/// Starts a request future
fn submit_request(&self, request: HeadersRequest, priority: Priority) {
trace!(target: "downloaders::headers", ?request, "Submitting headers request");
self.in_progress_queue.push(self.request_fut(request, priority));
self.metrics.in_flight_requests.increment(1.);
}
fn request_fut(
&self,
request: HeadersRequest,
priority: Priority,
) -> HeadersRequestFuture<H::Output> {
let client = Arc::clone(&self.client);
HeadersRequestFuture {
request: Some(request.clone()),
fut: client.get_headers_with_priority(request, priority),
}
}
/// Validate whether the header is valid in relation to it's parent
fn validate(
&self,
header: &SealedHeader<H::Header>,
parent: &SealedHeader<H::Header>,
) -> DownloadResult<()> {
validate_header_download(&self.consensus, header, parent)
}
/// Clears all requests/responses.
fn clear(&mut self) {
self.lowest_validated_header.take();
self.queued_validated_headers = Vec::new();
self.buffered_responses = BinaryHeap::new();
self.in_progress_queue.clear();
self.metrics.in_flight_requests.set(0.);
self.metrics.buffered_responses.set(0.);
}
/// Splits off the next batch of headers
fn split_next_batch(&mut self) -> Vec<SealedHeader<H::Header>> {
let batch_size = self.stream_batch_size.min(self.queued_validated_headers.len());
let mut rem = self.queued_validated_headers.split_off(batch_size);
std::mem::swap(&mut rem, &mut self.queued_validated_headers);
// If the downloader consumer does not flush headers at the same rate that the downloader
// queues them, then the `queued_validated_headers` buffer can grow unbounded.
//
// The semantics of `split_off` state that the capacity of the original buffer is
// unchanged, so queued_validated_headers will then have only `batch_size` elements, and
// its original capacity. Because `rem` is initially populated with elements `[batch_size,
// len)` of `queued_validated_headers`, it will have a capacity of at least `len -
// batch_size`, and the total memory allocated by the two buffers will be around double the
// original size of `queued_validated_headers`.
//
// These are then mem::swapped, leaving `rem` with a large capacity, but small length.
//
// To prevent these allocations from leaking to the consumer, we shrink the capacity of the
// new buffer. The total memory allocated should then be not much more than the original
// size of `queued_validated_headers`.
rem.shrink_to_fit();
rem
}
}
impl<H> ReverseHeadersDownloader<H>
where
H: HeadersClient,
Self: HeaderDownloader + 'static,
{
/// Spawns the downloader task via [`tokio::task::spawn`]
pub fn into_task(self) -> TaskDownloader<<Self as HeaderDownloader>::Header> {
self.into_task_with(&TokioTaskExecutor::default())
}
/// Convert the downloader into a [`TaskDownloader`] by spawning it via the given `spawner`.
pub fn into_task_with<S>(
self,
spawner: &S,
) -> TaskDownloader<<Self as HeaderDownloader>::Header>
where
S: TaskSpawner,
{
TaskDownloader::spawn_with(self, spawner)
}
}
impl<H> HeaderDownloader for ReverseHeadersDownloader<H>
where
H: HeadersClient<Header: reth_primitives_traits::BlockHeader> + 'static,
{
type Header = H::Header;
fn update_local_head(&mut self, head: SealedHeader<H::Header>) {
// ensure we're only yielding headers that are in range and follow the current local head.
while self
.queued_validated_headers
.last()
.is_some_and(|last| last.number() <= head.number())
{
// headers are sorted high to low
self.queued_validated_headers.pop();
}
trace!(
target: "downloaders::headers",
head=?head.num_hash(),
"Updating local head"
);
// update the local head
self.local_head = Some(head);
}
/// If the given target is different from the current target, we need to update the sync target
fn update_sync_target(&mut self, target: SyncTarget) {
let current_tip = self.sync_target.as_ref().and_then(|t| t.hash());
trace!(
target: "downloaders::headers",
sync_target=?target,
current_tip=?current_tip,
"Updating sync target"
);
match target {
SyncTarget::Tip(tip) => {
if Some(tip) != current_tip {
trace!(target: "downloaders::headers", current=?current_tip, new=?tip, "Update sync target");
let new_sync_target = SyncTargetBlock::from_hash(tip);
// if the new sync target is the next queued request we don't need to re-start
// the target update
if let Some(target_number) = self
.queued_validated_headers
.first()
.filter(|h| h.hash() == tip)
.map(|h| h.number())
{
self.sync_target = Some(new_sync_target.with_number(target_number));
return
}
trace!(target: "downloaders::headers", new=?target, "Request new sync target");
self.metrics.out_of_order_requests.increment(1);
self.sync_target = Some(new_sync_target);
self.sync_target_request = Some(
self.request_fut(self.get_sync_target_request(tip.into()), Priority::High),
);
}
}
SyncTarget::Gap(existing) => {
let target = existing.parent;
if Some(target) != current_tip {
// there could be a sync target request in progress
self.sync_target_request.take();
// If the target has changed, update the request pointers based on the new
// targeted block number
let parent_block_number = existing.block.number.saturating_sub(1);
trace!(target: "downloaders::headers", current=?current_tip, new=?target, %parent_block_number, "Updated sync target");
// Update the sync target hash
self.sync_target = match self.sync_target.take() {
Some(sync_target) => Some(sync_target.with_hash(target)),
None => Some(SyncTargetBlock::from_hash(target)),
};
self.on_block_number_update(parent_block_number, parent_block_number);
}
}
SyncTarget::TipNum(num) => {
let current_tip_num = self.sync_target.as_ref().and_then(|t| t.number());
if Some(num) != current_tip_num {
trace!(target: "downloaders::headers", %num, "Updating sync target based on num");
// just update the sync target
self.sync_target = Some(SyncTargetBlock::from_number(num));
self.sync_target_request = Some(
self.request_fut(self.get_sync_target_request(num.into()), Priority::High),
);
}
}
}
}
fn set_batch_size(&mut self, batch_size: usize) {
self.stream_batch_size = batch_size;
}
}
impl<H> Stream for ReverseHeadersDownloader<H>
where
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | true |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/downloaders/src/headers/test_utils.rs | crates/net/downloaders/src/headers/test_utils.rs | //! Test helper impls for generating bodies
#![allow(dead_code)]
use reth_primitives_traits::SealedHeader;
/// Returns a new [`SealedHeader`] that's the child header of the given `parent`.
pub(crate) fn child_header(parent: &SealedHeader) -> SealedHeader {
let mut child = parent.as_ref().clone();
child.number += 1;
child.parent_hash = parent.hash_slow();
SealedHeader::seal_slow(child)
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/downloaders/src/headers/mod.rs | crates/net/downloaders/src/headers/mod.rs | /// A Linear downloader implementation.
pub mod reverse_headers;
/// A header downloader that does nothing. Useful to build unwind-only pipelines.
pub mod noop;
/// A downloader implementation that spawns a downloader to a task
pub mod task;
#[cfg(any(test, feature = "test-utils"))]
pub mod test_utils;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/downloaders/src/headers/task.rs | crates/net/downloaders/src/headers/task.rs | use alloy_primitives::Sealable;
use futures::{FutureExt, Stream};
use futures_util::StreamExt;
use pin_project::pin_project;
use reth_network_p2p::headers::{
downloader::{HeaderDownloader, SyncTarget},
error::HeadersDownloaderResult,
};
use reth_primitives_traits::SealedHeader;
use reth_tasks::{TaskSpawner, TokioTaskExecutor};
use std::{
fmt::Debug,
future::Future,
pin::Pin,
task::{ready, Context, Poll},
};
use tokio::sync::{mpsc, mpsc::UnboundedSender};
use tokio_stream::wrappers::{ReceiverStream, UnboundedReceiverStream};
use tokio_util::sync::PollSender;
/// The maximum number of header results to hold in the buffer.
pub const HEADERS_TASK_BUFFER_SIZE: usize = 8;
/// A [HeaderDownloader] that drives a spawned [HeaderDownloader] on a spawned task.
#[derive(Debug)]
#[pin_project]
pub struct TaskDownloader<H: Sealable> {
#[pin]
from_downloader: ReceiverStream<HeadersDownloaderResult<Vec<SealedHeader<H>>, H>>,
to_downloader: UnboundedSender<DownloaderUpdates<H>>,
}
// === impl TaskDownloader ===
impl<H: Sealable + Send + Sync + Unpin + 'static> TaskDownloader<H> {
/// Spawns the given `downloader` via [`tokio::task::spawn`] and returns a [`TaskDownloader`]
/// that's connected to that task.
///
/// # Panics
///
/// This method panics if called outside of a Tokio runtime
///
/// # Example
///
/// ```
/// # use std::sync::Arc;
/// # use reth_downloaders::headers::reverse_headers::ReverseHeadersDownloader;
/// # use reth_downloaders::headers::task::TaskDownloader;
/// # use reth_consensus::HeaderValidator;
/// # use reth_network_p2p::headers::client::HeadersClient;
/// # use reth_primitives_traits::BlockHeader;
/// # fn t<H: HeadersClient<Header: BlockHeader> + 'static>(consensus:Arc<dyn HeaderValidator<H::Header>>, client: Arc<H>) {
/// let downloader = ReverseHeadersDownloader::<H>::builder().build(
/// client,
/// consensus
/// );
/// let downloader = TaskDownloader::spawn(downloader);
/// # }
pub fn spawn<T>(downloader: T) -> Self
where
T: HeaderDownloader<Header = H> + 'static,
{
Self::spawn_with(downloader, &TokioTaskExecutor::default())
}
/// Spawns the given `downloader` via the given [`TaskSpawner`] returns a [`TaskDownloader`]
/// that's connected to that task.
pub fn spawn_with<T, S>(downloader: T, spawner: &S) -> Self
where
T: HeaderDownloader<Header = H> + 'static,
S: TaskSpawner,
{
let (headers_tx, headers_rx) = mpsc::channel(HEADERS_TASK_BUFFER_SIZE);
let (to_downloader, updates_rx) = mpsc::unbounded_channel();
let downloader = SpawnedDownloader {
headers_tx: PollSender::new(headers_tx),
updates: UnboundedReceiverStream::new(updates_rx),
downloader,
};
spawner.spawn(downloader.boxed());
Self { from_downloader: ReceiverStream::new(headers_rx), to_downloader }
}
}
impl<H: Sealable + Debug + Send + Sync + Unpin + 'static> HeaderDownloader for TaskDownloader<H> {
type Header = H;
fn update_sync_gap(&mut self, head: SealedHeader<H>, target: SyncTarget) {
let _ = self.to_downloader.send(DownloaderUpdates::UpdateSyncGap(head, target));
}
fn update_local_head(&mut self, head: SealedHeader<H>) {
let _ = self.to_downloader.send(DownloaderUpdates::UpdateLocalHead(head));
}
fn update_sync_target(&mut self, target: SyncTarget) {
let _ = self.to_downloader.send(DownloaderUpdates::UpdateSyncTarget(target));
}
fn set_batch_size(&mut self, limit: usize) {
let _ = self.to_downloader.send(DownloaderUpdates::SetBatchSize(limit));
}
}
impl<H: Sealable> Stream for TaskDownloader<H> {
type Item = HeadersDownloaderResult<Vec<SealedHeader<H>>, H>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
self.project().from_downloader.poll_next(cx)
}
}
/// A [`HeaderDownloader`] that runs on its own task
#[expect(clippy::complexity)]
struct SpawnedDownloader<T: HeaderDownloader> {
updates: UnboundedReceiverStream<DownloaderUpdates<T::Header>>,
headers_tx: PollSender<HeadersDownloaderResult<Vec<SealedHeader<T::Header>>, T::Header>>,
downloader: T,
}
impl<T: HeaderDownloader> Future for SpawnedDownloader<T> {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.get_mut();
loop {
loop {
match this.updates.poll_next_unpin(cx) {
Poll::Pending => break,
Poll::Ready(None) => {
// channel closed, this means [TaskDownloader] was dropped, so we can also
// exit
return Poll::Ready(())
}
Poll::Ready(Some(update)) => match update {
DownloaderUpdates::UpdateSyncGap(head, target) => {
this.downloader.update_sync_gap(head, target);
}
DownloaderUpdates::UpdateLocalHead(head) => {
this.downloader.update_local_head(head);
}
DownloaderUpdates::UpdateSyncTarget(target) => {
this.downloader.update_sync_target(target);
}
DownloaderUpdates::SetBatchSize(limit) => {
this.downloader.set_batch_size(limit);
}
},
}
}
match ready!(this.headers_tx.poll_reserve(cx)) {
Ok(()) => {
match ready!(this.downloader.poll_next_unpin(cx)) {
Some(headers) => {
if this.headers_tx.send_item(headers).is_err() {
// channel closed, this means [TaskDownloader] was dropped, so we
// can also exit
return Poll::Ready(())
}
}
None => return Poll::Pending,
}
}
Err(_) => {
// channel closed, this means [TaskDownloader] was dropped, so
// we can also exit
return Poll::Ready(())
}
}
}
}
}
/// Commands delegated to the spawned [`HeaderDownloader`]
#[derive(Debug)]
enum DownloaderUpdates<H> {
UpdateSyncGap(SealedHeader<H>, SyncTarget),
UpdateLocalHead(SealedHeader<H>),
UpdateSyncTarget(SyncTarget),
SetBatchSize(usize),
}
#[cfg(test)]
mod tests {
use super::*;
use crate::headers::{
reverse_headers::ReverseHeadersDownloaderBuilder, test_utils::child_header,
};
use reth_consensus::test_utils::TestConsensus;
use reth_network_p2p::test_utils::TestHeadersClient;
use std::sync::Arc;
#[tokio::test(flavor = "multi_thread")]
async fn download_one_by_one_on_task() {
reth_tracing::init_test_tracing();
let p3 = SealedHeader::default();
let p2 = child_header(&p3);
let p1 = child_header(&p2);
let p0 = child_header(&p1);
let client = Arc::new(TestHeadersClient::default());
let downloader = ReverseHeadersDownloaderBuilder::default()
.stream_batch_size(1)
.request_limit(1)
.build(Arc::clone(&client), Arc::new(TestConsensus::default()));
let mut downloader = TaskDownloader::spawn(downloader);
downloader.update_local_head(p3.clone());
downloader.update_sync_target(SyncTarget::Tip(p0.hash()));
client
.extend(vec![
p0.as_ref().clone(),
p1.as_ref().clone(),
p2.as_ref().clone(),
p3.as_ref().clone(),
])
.await;
let headers = downloader.next().await.unwrap();
assert_eq!(headers, Ok(vec![p0]));
let headers = downloader.next().await.unwrap();
assert_eq!(headers, Ok(vec![p1]));
let headers = downloader.next().await.unwrap();
assert_eq!(headers, Ok(vec![p2]));
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/downloaders/src/test_utils/bodies_client.rs | crates/net/downloaders/src/test_utils/bodies_client.rs | use alloy_primitives::B256;
use reth_ethereum_primitives::BlockBody;
use reth_network_p2p::{
bodies::client::{BodiesClient, BodiesFut},
download::DownloadClient,
priority::Priority,
};
use reth_network_peers::PeerId;
use std::{
collections::HashMap,
fmt::Debug,
ops::RangeInclusive,
sync::{
atomic::{AtomicU64, Ordering},
Arc,
},
time::Duration,
};
use tokio::sync::Mutex;
/// A [`BodiesClient`] for testing.
#[derive(Debug, Default)]
pub struct TestBodiesClient {
bodies: Arc<Mutex<HashMap<B256, BlockBody>>>,
should_delay: bool,
max_batch_size: Option<usize>,
times_requested: AtomicU64,
empty_response_mod: Option<u64>,
}
impl TestBodiesClient {
pub(crate) fn with_bodies(mut self, bodies: HashMap<B256, BlockBody>) -> Self {
self.bodies = Arc::new(Mutex::new(bodies));
self
}
pub(crate) const fn with_should_delay(mut self, should_delay: bool) -> Self {
self.should_delay = should_delay;
self
}
/// Instructs the client to respond with empty responses some portion of the time. Every
/// `empty_mod` responses, the client will respond with an empty response.
pub(crate) const fn with_empty_responses(mut self, empty_mod: u64) -> Self {
self.empty_response_mod = Some(empty_mod);
self
}
pub(crate) const fn with_max_batch_size(mut self, max_batch_size: usize) -> Self {
self.max_batch_size = Some(max_batch_size);
self
}
pub(crate) fn times_requested(&self) -> u64 {
self.times_requested.load(Ordering::Relaxed)
}
/// Returns whether or not the client should respond with an empty response.
///
/// This will only return true if `empty_response_mod` is `Some`, and `times_requested %
/// empty_response_mod == 0`.
pub(crate) fn should_respond_empty(&self) -> bool {
if let Some(empty_response_mod) = self.empty_response_mod {
self.times_requested.load(Ordering::Relaxed).is_multiple_of(empty_response_mod)
} else {
false
}
}
}
impl DownloadClient for TestBodiesClient {
fn report_bad_message(&self, _peer_id: PeerId) {
// noop
}
fn num_connected_peers(&self) -> usize {
0
}
}
impl BodiesClient for TestBodiesClient {
type Body = BlockBody;
type Output = BodiesFut;
fn get_block_bodies_with_priority_and_range_hint(
&self,
hashes: Vec<B256>,
_priority: Priority,
_range_hint: Option<RangeInclusive<u64>>,
) -> Self::Output {
let should_delay = self.should_delay;
let bodies = self.bodies.clone();
let max_batch_size = self.max_batch_size;
self.times_requested.fetch_add(1, Ordering::Relaxed);
let should_respond_empty = self.should_respond_empty();
Box::pin(async move {
if should_respond_empty {
return Ok((PeerId::default(), vec![]).into())
}
if should_delay {
tokio::time::sleep(Duration::from_millis((hashes[0][0] % 100) as u64)).await;
}
let bodies = &mut *bodies.lock().await;
Ok((
PeerId::default(),
hashes
.into_iter()
.take(max_batch_size.unwrap_or(usize::MAX))
.map(|hash| {
bodies
.remove(&hash)
.expect("Downloader asked for a block it should not ask for")
})
.collect(),
)
.into())
})
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/downloaders/src/test_utils/mod.rs | crates/net/downloaders/src/test_utils/mod.rs | //! Test helper impls.
#![allow(dead_code)]
use crate::{bodies::test_utils::create_raw_bodies, file_codec::BlockFileCodec};
use alloy_primitives::B256;
use futures::SinkExt;
use reth_ethereum_primitives::BlockBody;
use reth_testing_utils::generators::{self, random_block_range, BlockRangeParams};
use std::{collections::HashMap, io::SeekFrom, ops::RangeInclusive};
use tokio::{fs::File, io::AsyncSeekExt};
use tokio_util::codec::FramedWrite;
mod bodies_client;
pub use bodies_client::TestBodiesClient;
use reth_primitives_traits::SealedHeader;
/// Metrics scope used for testing.
pub(crate) const TEST_SCOPE: &str = "downloaders.test";
/// Generate a set of bodies and their corresponding block hashes
pub(crate) fn generate_bodies(
range: RangeInclusive<u64>,
) -> (Vec<SealedHeader>, HashMap<B256, BlockBody>) {
let mut rng = generators::rng();
let blocks = random_block_range(
&mut rng,
range,
BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..2, ..Default::default() },
);
let headers = blocks.iter().map(|block| block.clone_sealed_header()).collect();
let bodies = blocks.into_iter().map(|block| (block.hash(), block.into_body())).collect();
(headers, bodies)
}
/// Generate a set of bodies, write them to a temporary file, and return the file along with the
/// bodies and corresponding block hashes
pub(crate) async fn generate_bodies_file(
range: RangeInclusive<u64>,
) -> (tokio::fs::File, Vec<SealedHeader>, HashMap<B256, BlockBody>) {
let (headers, bodies) = generate_bodies(range);
let raw_block_bodies = create_raw_bodies(headers.iter().cloned(), &mut bodies.clone());
let file: File = tempfile::tempfile().unwrap().into();
let mut writer = FramedWrite::new(file, BlockFileCodec::default());
// rlp encode one after the other
for block in raw_block_bodies {
writer.feed(block).await.unwrap();
}
writer.flush().await.unwrap();
// get the file back
let mut file: File = writer.into_inner();
file.seek(SeekFrom::Start(0)).await.unwrap();
(file, headers, bodies)
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/eth-wire/src/lib.rs | crates/net/eth-wire/src/lib.rs | //! Implementation of the `eth` wire protocol.
//!
//! ## Feature Flags
//!
//! - `serde` (default): Enable serde support
//! - `arbitrary`: Adds `proptest` and `arbitrary` support for wire types.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
pub mod capability;
mod disconnect;
pub mod errors;
pub mod eth_snap_stream;
mod ethstream;
mod hello;
pub mod multiplex;
mod p2pstream;
mod pinger;
pub mod protocol;
/// Handshake logic
pub mod handshake;
#[cfg(test)]
pub mod test_utils;
#[cfg(test)]
pub use tokio_util::codec::{
LengthDelimitedCodec as PassthroughCodec, LengthDelimitedCodecError as PassthroughCodecError,
};
pub use crate::{
disconnect::CanDisconnect,
ethstream::{EthStream, EthStreamInner, UnauthedEthStream, MAX_MESSAGE_SIZE},
hello::{HelloMessage, HelloMessageBuilder, HelloMessageWithProtocols},
p2pstream::{
DisconnectP2P, P2PMessage, P2PMessageID, P2PStream, UnauthedP2PStream, HANDSHAKE_TIMEOUT,
MAX_RESERVED_MESSAGE_ID,
},
Capability, ProtocolVersion,
};
// Re-export wire types
#[doc(inline)]
pub use reth_eth_wire_types::*;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/eth-wire/src/multiplex.rs | crates/net/eth-wire/src/multiplex.rs | //! Rlpx protocol multiplexer and satellite stream
//!
//! A Satellite is a Stream that primarily drives a single `RLPx` subprotocol but can also handle
//! additional subprotocols.
//!
//! Most of other subprotocols are "dependent satellite" protocols of "eth" and not a fully standalone protocol, for example "snap", See also [snap protocol](https://github.com/ethereum/devp2p/blob/298d7a77c3bf833641579ecbbb5b13f0311eeeea/caps/snap.md?plain=1#L71)
//! Hence it is expected that the primary protocol is "eth" and the additional protocols are
//! "dependent satellite" protocols.
use std::{
collections::VecDeque,
fmt,
future::Future,
io,
pin::{pin, Pin},
sync::Arc,
task::{ready, Context, Poll},
};
use crate::{
capability::{SharedCapabilities, SharedCapability, UnsupportedCapabilityError},
errors::{EthStreamError, P2PStreamError},
handshake::EthRlpxHandshake,
p2pstream::DisconnectP2P,
CanDisconnect, Capability, DisconnectReason, EthStream, P2PStream, UnifiedStatus,
HANDSHAKE_TIMEOUT,
};
use bytes::{Bytes, BytesMut};
use futures::{Sink, SinkExt, Stream, StreamExt, TryStream, TryStreamExt};
use reth_eth_wire_types::NetworkPrimitives;
use reth_ethereum_forks::ForkFilter;
use tokio::sync::{mpsc, mpsc::UnboundedSender};
use tokio_stream::wrappers::UnboundedReceiverStream;
/// A Stream and Sink type that wraps a raw rlpx stream [`P2PStream`] and handles message ID
/// multiplexing.
#[derive(Debug)]
pub struct RlpxProtocolMultiplexer<St> {
inner: MultiplexInner<St>,
}
impl<St> RlpxProtocolMultiplexer<St> {
/// Wraps the raw p2p stream
pub fn new(conn: P2PStream<St>) -> Self {
Self {
inner: MultiplexInner {
conn,
protocols: Default::default(),
out_buffer: Default::default(),
},
}
}
/// Installs a new protocol on top of the raw p2p stream.
///
/// This accepts a closure that receives a [`ProtocolConnection`] that will yield messages for
/// the given capability.
pub fn install_protocol<F, Proto>(
&mut self,
cap: &Capability,
f: F,
) -> Result<(), UnsupportedCapabilityError>
where
F: FnOnce(ProtocolConnection) -> Proto,
Proto: Stream<Item = BytesMut> + Send + 'static,
{
self.inner.install_protocol(cap, f)
}
/// Returns the [`SharedCapabilities`] of the underlying raw p2p stream
pub const fn shared_capabilities(&self) -> &SharedCapabilities {
self.inner.shared_capabilities()
}
/// Converts this multiplexer into a [`RlpxSatelliteStream`] with the given primary protocol.
pub fn into_satellite_stream<F, Primary>(
self,
cap: &Capability,
primary: F,
) -> Result<RlpxSatelliteStream<St, Primary>, P2PStreamError>
where
F: FnOnce(ProtocolProxy) -> Primary,
{
let Ok(shared_cap) = self.shared_capabilities().ensure_matching_capability(cap).cloned()
else {
return Err(P2PStreamError::CapabilityNotShared)
};
let (to_primary, from_wire) = mpsc::unbounded_channel();
let (to_wire, from_primary) = mpsc::unbounded_channel();
let proxy = ProtocolProxy {
shared_cap: shared_cap.clone(),
from_wire: UnboundedReceiverStream::new(from_wire),
to_wire,
};
let st = primary(proxy);
Ok(RlpxSatelliteStream {
inner: self.inner,
primary: PrimaryProtocol {
to_primary,
from_primary: UnboundedReceiverStream::new(from_primary),
st,
shared_cap,
},
})
}
/// Converts this multiplexer into a [`RlpxSatelliteStream`] with the given primary protocol.
///
/// Returns an error if the primary protocol is not supported by the remote or the handshake
/// failed.
pub async fn into_satellite_stream_with_handshake<F, Fut, Err, Primary>(
self,
cap: &Capability,
handshake: F,
) -> Result<RlpxSatelliteStream<St, Primary>, Err>
where
F: FnOnce(ProtocolProxy) -> Fut,
Fut: Future<Output = Result<Primary, Err>>,
St: Stream<Item = io::Result<BytesMut>> + Sink<Bytes, Error = io::Error> + Unpin,
P2PStreamError: Into<Err>,
{
self.into_satellite_stream_with_tuple_handshake(cap, move |proxy| async move {
let st = handshake(proxy).await?;
Ok((st, ()))
})
.await
.map(|(st, _)| st)
}
/// Converts this multiplexer into a [`RlpxSatelliteStream`] with the given primary protocol.
///
/// Returns an error if the primary protocol is not supported by the remote or the handshake
/// failed.
///
/// This accepts a closure that does a handshake with the remote peer and returns a tuple of the
/// primary stream and extra data.
///
/// See also [`UnauthedEthStream::handshake`](crate::UnauthedEthStream)
pub async fn into_satellite_stream_with_tuple_handshake<F, Fut, Err, Primary, Extra>(
mut self,
cap: &Capability,
handshake: F,
) -> Result<(RlpxSatelliteStream<St, Primary>, Extra), Err>
where
F: FnOnce(ProtocolProxy) -> Fut,
Fut: Future<Output = Result<(Primary, Extra), Err>>,
St: Stream<Item = io::Result<BytesMut>> + Sink<Bytes, Error = io::Error> + Unpin,
P2PStreamError: Into<Err>,
{
let Ok(shared_cap) = self.shared_capabilities().ensure_matching_capability(cap).cloned()
else {
return Err(P2PStreamError::CapabilityNotShared.into())
};
let (to_primary, from_wire) = mpsc::unbounded_channel();
let (to_wire, mut from_primary) = mpsc::unbounded_channel();
let proxy = ProtocolProxy {
shared_cap: shared_cap.clone(),
from_wire: UnboundedReceiverStream::new(from_wire),
to_wire,
};
let f = handshake(proxy);
let mut f = pin!(f);
// this polls the connection and the primary stream concurrently until the handshake is
// complete
loop {
tokio::select! {
biased;
Some(Ok(msg)) = self.inner.conn.next() => {
// Ensure the message belongs to the primary protocol
let Some(offset) = msg.first().copied()
else {
return Err(P2PStreamError::EmptyProtocolMessage.into())
};
if let Some(cap) = self.shared_capabilities().find_by_relative_offset(offset).cloned() {
if cap == shared_cap {
// delegate to primary
let _ = to_primary.send(msg);
} else {
// delegate to satellite
self.inner.delegate_message(&cap, msg);
}
} else {
return Err(P2PStreamError::UnknownReservedMessageId(offset).into())
}
}
Some(msg) = from_primary.recv() => {
self.inner.conn.send(msg).await.map_err(Into::into)?;
}
// Poll all subprotocols for new messages
msg = ProtocolsPoller::new(&mut self.inner.protocols) => {
self.inner.conn.send(msg.map_err(Into::into)?).await.map_err(Into::into)?;
}
res = &mut f => {
let (st, extra) = res?;
return Ok((RlpxSatelliteStream {
inner: self.inner,
primary: PrimaryProtocol {
to_primary,
from_primary: UnboundedReceiverStream::new(from_primary),
st,
shared_cap,
}
}, extra))
}
}
}
}
/// Converts this multiplexer into a [`RlpxSatelliteStream`] with eth protocol as the given
/// primary protocol and the handshake implementation.
pub async fn into_eth_satellite_stream<N: NetworkPrimitives>(
self,
status: UnifiedStatus,
fork_filter: ForkFilter,
handshake: Arc<dyn EthRlpxHandshake>,
) -> Result<(RlpxSatelliteStream<St, EthStream<ProtocolProxy, N>>, UnifiedStatus), EthStreamError>
where
St: Stream<Item = io::Result<BytesMut>> + Sink<Bytes, Error = io::Error> + Unpin,
{
let eth_cap = self.inner.conn.shared_capabilities().eth_version()?;
self.into_satellite_stream_with_tuple_handshake(&Capability::eth(eth_cap), move |proxy| {
let handshake = handshake.clone();
async move {
let mut unauth = UnauthProxy { inner: proxy };
let their_status = handshake
.handshake(&mut unauth, status, fork_filter, HANDSHAKE_TIMEOUT)
.await?;
let eth_stream = EthStream::new(eth_cap, unauth.into_inner());
Ok((eth_stream, their_status))
}
})
.await
}
}
#[derive(Debug)]
struct MultiplexInner<St> {
/// The raw p2p stream
conn: P2PStream<St>,
/// All the subprotocols that are multiplexed on top of the raw p2p stream
protocols: Vec<ProtocolStream>,
/// Buffer for outgoing messages on the wire.
out_buffer: VecDeque<Bytes>,
}
impl<St> MultiplexInner<St> {
const fn shared_capabilities(&self) -> &SharedCapabilities {
self.conn.shared_capabilities()
}
/// Delegates a message to the matching protocol.
fn delegate_message(&self, cap: &SharedCapability, msg: BytesMut) -> bool {
for proto in &self.protocols {
if proto.shared_cap == *cap {
proto.send_raw(msg);
return true
}
}
false
}
fn install_protocol<F, Proto>(
&mut self,
cap: &Capability,
f: F,
) -> Result<(), UnsupportedCapabilityError>
where
F: FnOnce(ProtocolConnection) -> Proto,
Proto: Stream<Item = BytesMut> + Send + 'static,
{
let shared_cap =
self.conn.shared_capabilities().ensure_matching_capability(cap).cloned()?;
let (to_satellite, rx) = mpsc::unbounded_channel();
let proto_conn = ProtocolConnection { from_wire: UnboundedReceiverStream::new(rx) };
let st = f(proto_conn);
let st = ProtocolStream { shared_cap, to_satellite, satellite_st: Box::pin(st) };
self.protocols.push(st);
Ok(())
}
}
/// Represents a protocol in the multiplexer that is used as the primary protocol.
#[derive(Debug)]
struct PrimaryProtocol<Primary> {
/// Channel to send messages to the primary protocol.
to_primary: UnboundedSender<BytesMut>,
/// Receiver for messages from the primary protocol.
from_primary: UnboundedReceiverStream<Bytes>,
/// Shared capability of the primary protocol.
shared_cap: SharedCapability,
/// The primary stream.
st: Primary,
}
/// A Stream and Sink type that acts as a wrapper around a primary `RLPx` subprotocol (e.g. "eth")
///
/// Only emits and sends _non-empty_ messages
#[derive(Debug)]
pub struct ProtocolProxy {
shared_cap: SharedCapability,
/// Receives _non-empty_ messages from the wire
from_wire: UnboundedReceiverStream<BytesMut>,
/// Sends _non-empty_ messages from the wire
to_wire: UnboundedSender<Bytes>,
}
impl ProtocolProxy {
/// Sends a _non-empty_ message on the wire.
fn try_send(&self, msg: Bytes) -> Result<(), io::Error> {
if msg.is_empty() {
// message must not be empty
return Err(io::ErrorKind::InvalidInput.into())
}
self.to_wire.send(self.mask_msg_id(msg)?).map_err(|_| io::ErrorKind::BrokenPipe.into())
}
/// Masks the message ID of a message to be sent on the wire.
#[inline]
fn mask_msg_id(&self, msg: Bytes) -> Result<Bytes, io::Error> {
if msg.is_empty() {
// message must not be empty
return Err(io::ErrorKind::InvalidInput.into())
}
let offset = self.shared_cap.relative_message_id_offset();
if offset == 0 {
return Ok(msg);
}
let mut masked = Vec::from(msg);
masked[0] = masked[0].checked_add(offset).ok_or(io::ErrorKind::InvalidInput)?;
Ok(masked.into())
}
/// Unmasks the message ID of a message received from the wire.
#[inline]
fn unmask_id(&self, mut msg: BytesMut) -> Result<BytesMut, io::Error> {
if msg.is_empty() {
// message must not be empty
return Err(io::ErrorKind::InvalidInput.into())
}
msg[0] = msg[0]
.checked_sub(self.shared_cap.relative_message_id_offset())
.ok_or(io::ErrorKind::InvalidInput)?;
Ok(msg)
}
}
impl Stream for ProtocolProxy {
type Item = Result<BytesMut, io::Error>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let msg = ready!(self.from_wire.poll_next_unpin(cx));
Poll::Ready(msg.map(|msg| self.get_mut().unmask_id(msg)))
}
}
impl Sink<Bytes> for ProtocolProxy {
type Error = io::Error;
fn poll_ready(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn start_send(self: Pin<&mut Self>, item: Bytes) -> Result<(), Self::Error> {
self.get_mut().try_send(item)
}
fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn poll_close(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
}
impl CanDisconnect<Bytes> for ProtocolProxy {
fn disconnect(
&mut self,
_reason: DisconnectReason,
) -> Pin<Box<dyn Future<Output = Result<(), <Self as Sink<Bytes>>::Error>> + Send + '_>> {
// TODO handle disconnects
Box::pin(async move { Ok(()) })
}
}
/// Adapter so the injected `EthRlpxHandshake` can run over a multiplexed `ProtocolProxy`
/// using the same error type expectations (`P2PStreamError`).
#[derive(Debug)]
struct UnauthProxy {
inner: ProtocolProxy,
}
impl UnauthProxy {
fn into_inner(self) -> ProtocolProxy {
self.inner
}
}
impl Stream for UnauthProxy {
type Item = Result<BytesMut, P2PStreamError>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
self.inner.poll_next_unpin(cx).map(|opt| opt.map(|res| res.map_err(P2PStreamError::from)))
}
}
impl Sink<Bytes> for UnauthProxy {
type Error = P2PStreamError;
fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.inner.poll_ready_unpin(cx).map_err(P2PStreamError::from)
}
fn start_send(mut self: Pin<&mut Self>, item: Bytes) -> Result<(), Self::Error> {
self.inner.start_send_unpin(item).map_err(P2PStreamError::from)
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.inner.poll_flush_unpin(cx).map_err(P2PStreamError::from)
}
fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.inner.poll_close_unpin(cx).map_err(P2PStreamError::from)
}
}
impl CanDisconnect<Bytes> for UnauthProxy {
fn disconnect(
&mut self,
reason: DisconnectReason,
) -> Pin<Box<dyn Future<Output = Result<(), <Self as Sink<Bytes>>::Error>> + Send + '_>> {
let fut = self.inner.disconnect(reason);
Box::pin(async move { fut.await.map_err(P2PStreamError::from) })
}
}
/// A connection channel to receive _`non_empty`_ messages for the negotiated protocol.
///
/// This is a [Stream] that returns raw bytes of the received messages for this protocol.
#[derive(Debug)]
pub struct ProtocolConnection {
from_wire: UnboundedReceiverStream<BytesMut>,
}
impl Stream for ProtocolConnection {
type Item = BytesMut;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
self.from_wire.poll_next_unpin(cx)
}
}
/// A Stream and Sink type that acts as a wrapper around a primary `RLPx` subprotocol (e.g. "eth")
/// [`EthStream`] and can also handle additional subprotocols.
#[derive(Debug)]
pub struct RlpxSatelliteStream<St, Primary> {
inner: MultiplexInner<St>,
primary: PrimaryProtocol<Primary>,
}
impl<St, Primary> RlpxSatelliteStream<St, Primary> {
/// Installs a new protocol on top of the raw p2p stream.
///
/// This accepts a closure that receives a [`ProtocolConnection`] that will yield messages for
/// the given capability.
pub fn install_protocol<F, Proto>(
&mut self,
cap: &Capability,
f: F,
) -> Result<(), UnsupportedCapabilityError>
where
F: FnOnce(ProtocolConnection) -> Proto,
Proto: Stream<Item = BytesMut> + Send + 'static,
{
self.inner.install_protocol(cap, f)
}
/// Returns the primary protocol.
#[inline]
pub const fn primary(&self) -> &Primary {
&self.primary.st
}
/// Returns mutable access to the primary protocol.
#[inline]
pub const fn primary_mut(&mut self) -> &mut Primary {
&mut self.primary.st
}
/// Returns the underlying [`P2PStream`].
#[inline]
pub const fn inner(&self) -> &P2PStream<St> {
&self.inner.conn
}
/// Returns mutable access to the underlying [`P2PStream`].
#[inline]
pub const fn inner_mut(&mut self) -> &mut P2PStream<St> {
&mut self.inner.conn
}
/// Consumes this type and returns the wrapped [`P2PStream`].
#[inline]
pub fn into_inner(self) -> P2PStream<St> {
self.inner.conn
}
}
impl<St, Primary, PrimaryErr> Stream for RlpxSatelliteStream<St, Primary>
where
St: Stream<Item = io::Result<BytesMut>> + Sink<Bytes, Error = io::Error> + Unpin,
Primary: TryStream<Error = PrimaryErr> + Unpin,
P2PStreamError: Into<PrimaryErr>,
{
type Item = Result<Primary::Ok, Primary::Error>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = self.get_mut();
loop {
// first drain the primary stream
if let Poll::Ready(Some(msg)) = this.primary.st.try_poll_next_unpin(cx) {
return Poll::Ready(Some(msg))
}
let mut conn_ready = true;
loop {
match this.inner.conn.poll_ready_unpin(cx) {
Poll::Ready(Ok(())) => {
if let Some(msg) = this.inner.out_buffer.pop_front() {
if let Err(err) = this.inner.conn.start_send_unpin(msg) {
return Poll::Ready(Some(Err(err.into())))
}
} else {
break
}
}
Poll::Ready(Err(err)) => {
if let Err(disconnect_err) =
this.inner.conn.start_disconnect(DisconnectReason::DisconnectRequested)
{
return Poll::Ready(Some(Err(disconnect_err.into())))
}
return Poll::Ready(Some(Err(err.into())))
}
Poll::Pending => {
conn_ready = false;
break
}
}
}
// advance primary out
loop {
match this.primary.from_primary.poll_next_unpin(cx) {
Poll::Ready(Some(msg)) => {
this.inner.out_buffer.push_back(msg);
}
Poll::Ready(None) => {
// primary closed
return Poll::Ready(None)
}
Poll::Pending => break,
}
}
// advance all satellites
for idx in (0..this.inner.protocols.len()).rev() {
let mut proto = this.inner.protocols.swap_remove(idx);
loop {
match proto.poll_next_unpin(cx) {
Poll::Ready(Some(Err(err))) => {
return Poll::Ready(Some(Err(P2PStreamError::Io(err).into())))
}
Poll::Ready(Some(Ok(msg))) => {
this.inner.out_buffer.push_back(msg);
}
Poll::Ready(None) => return Poll::Ready(None),
Poll::Pending => {
this.inner.protocols.push(proto);
break
}
}
}
}
let mut delegated = false;
loop {
// pull messages from connection
match this.inner.conn.poll_next_unpin(cx) {
Poll::Ready(Some(Ok(msg))) => {
delegated = true;
let Some(offset) = msg.first().copied() else {
return Poll::Ready(Some(Err(
P2PStreamError::EmptyProtocolMessage.into()
)))
};
// delegate the multiplexed message to the correct protocol
if let Some(cap) =
this.inner.conn.shared_capabilities().find_by_relative_offset(offset)
{
if cap == &this.primary.shared_cap {
// delegate to primary
let _ = this.primary.to_primary.send(msg);
} else {
// delegate to installed satellite if any
for proto in &this.inner.protocols {
if proto.shared_cap == *cap {
proto.send_raw(msg);
break
}
}
}
} else {
return Poll::Ready(Some(Err(P2PStreamError::UnknownReservedMessageId(
offset,
)
.into())))
}
}
Poll::Ready(Some(Err(err))) => return Poll::Ready(Some(Err(err.into()))),
Poll::Ready(None) => {
// connection closed
return Poll::Ready(None)
}
Poll::Pending => break,
}
}
if !conn_ready || (!delegated && this.inner.out_buffer.is_empty()) {
return Poll::Pending
}
}
}
}
impl<St, Primary, T> Sink<T> for RlpxSatelliteStream<St, Primary>
where
St: Stream<Item = io::Result<BytesMut>> + Sink<Bytes, Error = io::Error> + Unpin,
Primary: Sink<T> + Unpin,
P2PStreamError: Into<<Primary as Sink<T>>::Error>,
{
type Error = <Primary as Sink<T>>::Error;
fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
let this = self.get_mut();
if let Err(err) = ready!(this.inner.conn.poll_ready_unpin(cx)) {
return Poll::Ready(Err(err.into()))
}
if let Err(err) = ready!(this.primary.st.poll_ready_unpin(cx)) {
return Poll::Ready(Err(err))
}
Poll::Ready(Ok(()))
}
fn start_send(self: Pin<&mut Self>, item: T) -> Result<(), Self::Error> {
self.get_mut().primary.st.start_send_unpin(item)
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.get_mut().inner.conn.poll_flush_unpin(cx).map_err(Into::into)
}
fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.get_mut().inner.conn.poll_close_unpin(cx).map_err(Into::into)
}
}
/// Wraps a `RLPx` subprotocol and handles message ID multiplexing.
struct ProtocolStream {
shared_cap: SharedCapability,
/// the channel shared with the satellite stream
to_satellite: UnboundedSender<BytesMut>,
satellite_st: Pin<Box<dyn Stream<Item = BytesMut> + Send>>,
}
impl ProtocolStream {
/// Masks the message ID of a message to be sent on the wire.
#[inline]
fn mask_msg_id(&self, mut msg: BytesMut) -> Result<Bytes, io::Error> {
if msg.is_empty() {
// message must not be empty
return Err(io::ErrorKind::InvalidInput.into())
}
msg[0] = msg[0]
.checked_add(self.shared_cap.relative_message_id_offset())
.ok_or(io::ErrorKind::InvalidInput)?;
Ok(msg.freeze())
}
/// Unmasks the message ID of a message received from the wire.
#[inline]
fn unmask_id(&self, mut msg: BytesMut) -> Result<BytesMut, io::Error> {
if msg.is_empty() {
// message must not be empty
return Err(io::ErrorKind::InvalidInput.into())
}
msg[0] = msg[0]
.checked_sub(self.shared_cap.relative_message_id_offset())
.ok_or(io::ErrorKind::InvalidInput)?;
Ok(msg)
}
/// Sends the message to the satellite stream.
fn send_raw(&self, msg: BytesMut) {
let _ = self.unmask_id(msg).map(|msg| self.to_satellite.send(msg));
}
}
impl Stream for ProtocolStream {
type Item = Result<Bytes, io::Error>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = self.get_mut();
let msg = ready!(this.satellite_st.as_mut().poll_next(cx));
Poll::Ready(msg.map(|msg| this.mask_msg_id(msg)))
}
}
impl fmt::Debug for ProtocolStream {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("ProtocolStream").field("cap", &self.shared_cap).finish_non_exhaustive()
}
}
/// Helper to poll multiple protocol streams in a `tokio::select`! branch
struct ProtocolsPoller<'a> {
protocols: &'a mut Vec<ProtocolStream>,
}
impl<'a> ProtocolsPoller<'a> {
const fn new(protocols: &'a mut Vec<ProtocolStream>) -> Self {
Self { protocols }
}
}
impl<'a> Future for ProtocolsPoller<'a> {
type Output = Result<Bytes, P2PStreamError>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
// Process protocols in reverse order, like the existing pattern
for idx in (0..self.protocols.len()).rev() {
let mut proto = self.protocols.swap_remove(idx);
match proto.poll_next_unpin(cx) {
Poll::Ready(Some(Err(err))) => {
self.protocols.push(proto);
return Poll::Ready(Err(P2PStreamError::from(err)))
}
Poll::Ready(Some(Ok(msg))) => {
// Got a message, put protocol back and return the message
self.protocols.push(proto);
return Poll::Ready(Ok(msg));
}
_ => {
// push it back because we still want to complete the handshake first
self.protocols.push(proto);
}
}
}
// All protocols processed, nothing ready
Poll::Pending
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
handshake::EthHandshake,
test_utils::{
connect_passthrough, eth_handshake, eth_hello,
proto::{test_hello, TestProtoMessage},
},
UnauthedEthStream, UnauthedP2PStream,
};
use reth_eth_wire_types::EthNetworkPrimitives;
use tokio::{net::TcpListener, sync::oneshot};
use tokio_util::codec::Decoder;
#[tokio::test]
async fn eth_satellite() {
reth_tracing::init_test_tracing();
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let local_addr = listener.local_addr().unwrap();
let (status, fork_filter) = eth_handshake();
let other_status = status;
let other_fork_filter = fork_filter.clone();
let _handle = tokio::spawn(async move {
let (incoming, _) = listener.accept().await.unwrap();
let stream = crate::PassthroughCodec::default().framed(incoming);
let (server_hello, _) = eth_hello();
let (p2p_stream, _) =
UnauthedP2PStream::new(stream).handshake(server_hello).await.unwrap();
let (_eth_stream, _) = UnauthedEthStream::new(p2p_stream)
.handshake::<EthNetworkPrimitives>(other_status, other_fork_filter)
.await
.unwrap();
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
});
let conn = connect_passthrough(local_addr, eth_hello().0).await;
let eth = conn.shared_capabilities().eth().unwrap().clone();
let multiplexer = RlpxProtocolMultiplexer::new(conn);
let _satellite = multiplexer
.into_satellite_stream_with_handshake(
eth.capability().as_ref(),
move |proxy| async move {
UnauthedEthStream::new(proxy)
.handshake::<EthNetworkPrimitives>(status, fork_filter)
.await
},
)
.await
.unwrap();
}
/// A test that install a satellite stream eth+test protocol and sends messages between them.
#[tokio::test(flavor = "multi_thread")]
async fn eth_test_protocol_satellite() {
reth_tracing::init_test_tracing();
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let local_addr = listener.local_addr().unwrap();
let (status, fork_filter) = eth_handshake();
let other_status = status;
let other_fork_filter = fork_filter.clone();
let _handle = tokio::spawn(async move {
let (incoming, _) = listener.accept().await.unwrap();
let stream = crate::PassthroughCodec::default().framed(incoming);
let (server_hello, _) = test_hello();
let (conn, _) = UnauthedP2PStream::new(stream).handshake(server_hello).await.unwrap();
let (mut st, _their_status) = RlpxProtocolMultiplexer::new(conn)
.into_eth_satellite_stream::<EthNetworkPrimitives>(
other_status,
other_fork_filter,
Arc::new(EthHandshake::default()),
)
.await
.unwrap();
st.install_protocol(&TestProtoMessage::capability(), |mut conn| {
async_stream::stream! {
yield TestProtoMessage::ping().encoded();
let msg = conn.next().await.unwrap();
let msg = TestProtoMessage::decode_message(&mut &msg[..]).unwrap();
assert_eq!(msg, TestProtoMessage::pong());
yield TestProtoMessage::message("hello").encoded();
let msg = conn.next().await.unwrap();
let msg = TestProtoMessage::decode_message(&mut &msg[..]).unwrap();
assert_eq!(msg, TestProtoMessage::message("good bye!"));
yield TestProtoMessage::message("good bye!").encoded();
futures::future::pending::<()>().await;
unreachable!()
}
})
.unwrap();
loop {
let _ = st.next().await;
}
});
let conn = connect_passthrough(local_addr, test_hello().0).await;
let (mut st, _their_status) = RlpxProtocolMultiplexer::new(conn)
.into_eth_satellite_stream::<EthNetworkPrimitives>(
status,
fork_filter,
Arc::new(EthHandshake::default()),
)
.await
.unwrap();
let (tx, mut rx) = oneshot::channel();
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | true |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/eth-wire/src/disconnect.rs | crates/net/eth-wire/src/disconnect.rs | //! Disconnect
use std::{future::Future, pin::Pin};
use futures::{Sink, SinkExt};
use reth_ecies::stream::ECIESStream;
use reth_eth_wire_types::DisconnectReason;
use tokio::io::AsyncWrite;
use tokio_util::codec::{Encoder, Framed};
type DisconnectResult<E> = Result<(), E>;
/// This trait is meant to allow higher level protocols like `eth` to disconnect from a peer, using
/// lower-level disconnect functions (such as those that exist in the `p2p` protocol) if the
/// underlying stream supports it.
pub trait CanDisconnect<T>: Sink<T> + Unpin {
/// Disconnects from the underlying stream, using a [`DisconnectReason`] as disconnect
/// information if the stream implements a protocol that can carry the additional disconnect
/// metadata.
fn disconnect(
&mut self,
reason: DisconnectReason,
) -> Pin<Box<dyn Future<Output = DisconnectResult<Self::Error>> + Send + '_>>;
}
// basic impls for things like Framed<TcpStream, etc>
impl<T, I, U> CanDisconnect<I> for Framed<T, U>
where
T: AsyncWrite + Unpin + Send,
U: Encoder<I> + Send,
{
fn disconnect(
&mut self,
_reason: DisconnectReason,
) -> Pin<Box<dyn Future<Output = Result<(), <Self as Sink<I>>::Error>> + Send + '_>> {
Box::pin(async move { self.close().await })
}
}
impl<S> CanDisconnect<bytes::Bytes> for ECIESStream<S>
where
S: AsyncWrite + Unpin + Send,
{
fn disconnect(
&mut self,
_reason: DisconnectReason,
) -> Pin<Box<dyn Future<Output = Result<(), std::io::Error>> + Send + '_>> {
Box::pin(async move { self.close().await })
}
}
#[cfg(test)]
mod tests {
use crate::{p2pstream::P2PMessage, DisconnectReason};
use alloy_primitives::hex;
use alloy_rlp::{Decodable, Encodable};
fn all_reasons() -> Vec<DisconnectReason> {
vec![
DisconnectReason::DisconnectRequested,
DisconnectReason::TcpSubsystemError,
DisconnectReason::ProtocolBreach,
DisconnectReason::UselessPeer,
DisconnectReason::TooManyPeers,
DisconnectReason::AlreadyConnected,
DisconnectReason::IncompatibleP2PProtocolVersion,
DisconnectReason::NullNodeIdentity,
DisconnectReason::ClientQuitting,
DisconnectReason::UnexpectedHandshakeIdentity,
DisconnectReason::ConnectedToSelf,
DisconnectReason::PingTimeout,
DisconnectReason::SubprotocolSpecific,
]
}
#[test]
fn disconnect_round_trip() {
let all_reasons = all_reasons();
for reason in all_reasons {
let disconnect = P2PMessage::Disconnect(reason);
let mut disconnect_encoded = Vec::new();
disconnect.encode(&mut disconnect_encoded);
let disconnect_decoded = P2PMessage::decode(&mut &disconnect_encoded[..]).unwrap();
assert_eq!(disconnect, disconnect_decoded);
}
}
#[test]
fn test_reason_too_short() {
assert!(DisconnectReason::decode(&mut &[0u8; 0][..]).is_err())
}
#[test]
fn test_reason_too_long() {
assert!(DisconnectReason::decode(&mut &[0u8; 3][..]).is_err())
}
#[test]
fn test_reason_zero_length_list() {
let list_with_zero_length = hex::decode("c000").unwrap();
let res = DisconnectReason::decode(&mut &list_with_zero_length[..]);
assert!(res.is_err());
assert_eq!(res.unwrap_err().to_string(), "unexpected list length (got 0, expected 1)")
}
#[test]
fn disconnect_encoding_length() {
let all_reasons = all_reasons();
for reason in all_reasons {
let disconnect = P2PMessage::Disconnect(reason);
let mut disconnect_encoded = Vec::new();
disconnect.encode(&mut disconnect_encoded);
assert_eq!(disconnect_encoded.len(), disconnect.length());
}
}
#[test]
fn test_decode_known_reasons() {
let all_reasons = vec![
// encoding the disconnect reason as a single byte
"0100", // 0x00 case
"0180", // second 0x00 case
"0101", "0102", "0103", "0104", "0105", "0106", "0107", "0108", "0109", "010a", "010b",
"0110", // encoding the disconnect reason in a list
"01c100", // 0x00 case
"01c180", // second 0x00 case
"01c101", "01c102", "01c103", "01c104", "01c105", "01c106", "01c107", "01c108",
"01c109", "01c10a", "01c10b", "01c110",
];
for reason in all_reasons {
let reason = hex::decode(reason).unwrap();
let message = P2PMessage::decode(&mut &reason[..]).unwrap();
let P2PMessage::Disconnect(_) = message else {
panic!("expected a disconnect message");
};
}
}
#[test]
fn test_decode_disconnect_requested() {
let reason = "0100";
let reason = hex::decode(reason).unwrap();
match P2PMessage::decode(&mut &reason[..]).unwrap() {
P2PMessage::Disconnect(DisconnectReason::DisconnectRequested) => {}
_ => {
unreachable!()
}
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/eth-wire/src/test_utils.rs | crates/net/eth-wire/src/test_utils.rs | //! Utilities for testing p2p protocol.
#![allow(missing_docs)]
use crate::{
hello::DEFAULT_TCP_PORT, EthVersion, HelloMessageWithProtocols, P2PStream, ProtocolVersion,
Status, StatusMessage, UnauthedP2PStream, UnifiedStatus,
};
use alloy_chains::Chain;
use alloy_primitives::{B256, U256};
use reth_ethereum_forks::{ForkFilter, Head};
use reth_network_peers::pk2id;
use secp256k1::{SecretKey, SECP256K1};
use std::net::SocketAddr;
use tokio::net::TcpStream;
use tokio_util::codec::{Decoder, Framed, LengthDelimitedCodec};
pub type P2pPassthroughTcpStream = P2PStream<Framed<TcpStream, LengthDelimitedCodec>>;
/// Returns a new testing `HelloMessage` and new secretkey
pub fn eth_hello() -> (HelloMessageWithProtocols, SecretKey) {
let server_key = SecretKey::new(&mut rand_08::thread_rng());
let protocols = vec![EthVersion::Eth67.into()];
let hello = HelloMessageWithProtocols {
protocol_version: ProtocolVersion::V5,
client_version: "eth/1.0.0".to_string(),
protocols,
port: DEFAULT_TCP_PORT,
id: pk2id(&server_key.public_key(SECP256K1)),
};
(hello, server_key)
}
/// Returns testing eth handshake status and fork filter.
pub fn eth_handshake() -> (UnifiedStatus, ForkFilter) {
let genesis = B256::random();
let fork_filter = ForkFilter::new(Head::default(), genesis, 0, Vec::new());
let status = Status {
version: EthVersion::Eth67,
chain: Chain::mainnet(),
total_difficulty: U256::ZERO,
blockhash: B256::random(),
genesis,
// Pass the current fork id.
forkid: fork_filter.current(),
};
let unified_status = UnifiedStatus::from_message(StatusMessage::Legacy(status));
(unified_status, fork_filter)
}
/// Connects to a remote node and returns an authenticated `P2PStream` with the remote node.
pub async fn connect_passthrough(
addr: SocketAddr,
client_hello: HelloMessageWithProtocols,
) -> P2pPassthroughTcpStream {
let outgoing = TcpStream::connect(addr).await.unwrap();
let sink = crate::PassthroughCodec::default().framed(outgoing);
let (p2p_stream, _) = UnauthedP2PStream::new(sink).handshake(client_hello).await.unwrap();
p2p_stream
}
/// An Rplx subprotocol for testing
pub mod proto {
use super::*;
use crate::{protocol::Protocol, Capability};
use bytes::{Buf, BufMut, BytesMut};
/// Returns a new testing `HelloMessage` with eth and the test protocol
pub fn test_hello() -> (HelloMessageWithProtocols, SecretKey) {
let mut handshake = eth_hello();
handshake.0.protocols.push(TestProtoMessage::protocol());
handshake
}
#[repr(u8)]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum TestProtoMessageId {
Ping = 0x00,
Pong = 0x01,
Message = 0x02,
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum TestProtoMessageKind {
Message(String),
Ping,
Pong,
}
/// An `test` protocol message, containing a message ID and payload.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct TestProtoMessage {
pub message_type: TestProtoMessageId,
pub message: TestProtoMessageKind,
}
impl TestProtoMessage {
/// Returns the capability for the `test` protocol.
pub const fn capability() -> Capability {
Capability::new_static("test", 1)
}
/// Returns the protocol for the `test` protocol.
pub const fn protocol() -> Protocol {
Protocol::new(Self::capability(), 3)
}
/// Creates a ping message
pub const fn ping() -> Self {
Self { message_type: TestProtoMessageId::Ping, message: TestProtoMessageKind::Ping }
}
/// Creates a pong message
pub const fn pong() -> Self {
Self { message_type: TestProtoMessageId::Pong, message: TestProtoMessageKind::Pong }
}
/// Creates a message
pub fn message(msg: impl Into<String>) -> Self {
Self {
message_type: TestProtoMessageId::Message,
message: TestProtoMessageKind::Message(msg.into()),
}
}
/// Creates a new `TestProtoMessage` with the given message ID and payload.
pub fn encoded(&self) -> BytesMut {
let mut buf = BytesMut::new();
buf.put_u8(self.message_type as u8);
match &self.message {
TestProtoMessageKind::Ping | TestProtoMessageKind::Pong => {}
TestProtoMessageKind::Message(msg) => {
buf.put(msg.as_bytes());
}
}
buf
}
/// Decodes a `TestProtoMessage` from the given message buffer.
pub fn decode_message(buf: &mut &[u8]) -> Option<Self> {
if buf.is_empty() {
return None
}
let id = buf[0];
buf.advance(1);
let message_type = match id {
0x00 => TestProtoMessageId::Ping,
0x01 => TestProtoMessageId::Pong,
0x02 => TestProtoMessageId::Message,
_ => return None,
};
let message = match message_type {
TestProtoMessageId::Ping => TestProtoMessageKind::Ping,
TestProtoMessageId::Pong => TestProtoMessageKind::Pong,
TestProtoMessageId::Message => {
TestProtoMessageKind::Message(String::from_utf8_lossy(&buf[..]).into_owned())
}
};
Some(Self { message_type, message })
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/eth-wire/src/hello.rs | crates/net/eth-wire/src/hello.rs | use crate::{Capability, EthVersion, ProtocolVersion};
use alloy_rlp::{RlpDecodable, RlpEncodable};
use reth_codecs::add_arbitrary_tests;
use reth_network_peers::PeerId;
use reth_primitives_traits::constants::RETH_CLIENT_VERSION;
/// The default tcp port for p2p.
///
/// Note: this is the same as discovery port: `DEFAULT_DISCOVERY_PORT`
pub(crate) const DEFAULT_TCP_PORT: u16 = 30303;
use crate::protocol::Protocol;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
/// This is a superset of [`HelloMessage`] that provides additional protocol [Protocol] information
/// about the number of messages used by each capability in order to do proper message ID
/// multiplexing.
///
/// This type is required for the `p2p` handshake because the [`HelloMessage`] does not share the
/// number of messages used by each capability.
///
/// To get the encodable [`HelloMessage`] without the additional protocol information, use the
/// [`HelloMessageWithProtocols::message`].
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct HelloMessageWithProtocols {
/// The version of the `p2p` protocol.
pub protocol_version: ProtocolVersion,
/// Specifies the client software identity, as a human-readable string (e.g.
/// "Ethereum(++)/1.0.0").
pub client_version: String,
/// The list of supported capabilities and their versions.
pub protocols: Vec<Protocol>,
/// The port that the client is listening on, zero indicates the client is not listening.
///
/// By default this is `30303` which is the same as the default discovery port.
pub port: u16,
/// The secp256k1 public key corresponding to the node's private key.
pub id: PeerId,
}
impl HelloMessageWithProtocols {
/// Starts a new `HelloMessageProtocolsBuilder`
///
/// ```
/// use reth_eth_wire::HelloMessageWithProtocols;
/// use reth_network_peers::pk2id;
/// use secp256k1::{SecretKey, SECP256K1};
/// let secret_key = SecretKey::new(&mut rand_08::thread_rng());
/// let id = pk2id(&secret_key.public_key(SECP256K1));
/// let status = HelloMessageWithProtocols::builder(id).build();
/// ```
pub const fn builder(id: PeerId) -> HelloMessageBuilder {
HelloMessageBuilder::new(id)
}
/// Returns the raw [`HelloMessage`] without the additional protocol information.
#[inline]
pub fn message(&self) -> HelloMessage {
HelloMessage {
protocol_version: self.protocol_version,
client_version: self.client_version.clone(),
capabilities: self.protocols.iter().map(|p| p.cap.clone()).collect(),
port: self.port,
id: self.id,
}
}
/// Converts the type into a [`HelloMessage`] without the additional protocol information.
pub fn into_message(self) -> HelloMessage {
HelloMessage {
protocol_version: self.protocol_version,
client_version: self.client_version,
capabilities: self.protocols.into_iter().map(|p| p.cap).collect(),
port: self.port,
id: self.id,
}
}
/// Returns true if the set of protocols contains the given protocol.
#[inline]
pub fn contains_protocol(&self, protocol: &Protocol) -> bool {
self.protocols.iter().any(|p| p.cap == protocol.cap)
}
/// Adds a new protocol to the set.
///
/// Returns an error if the protocol already exists.
#[inline]
pub fn try_add_protocol(&mut self, protocol: Protocol) -> Result<(), Protocol> {
if self.contains_protocol(&protocol) {
Err(protocol)
} else {
self.protocols.push(protocol);
Ok(())
}
}
}
// TODO: determine if we should allow for the extra fields at the end like EIP-706 suggests
/// Raw rlpx protocol message used in the `p2p` handshake, containing information about the
/// supported `RLPx` protocol version and capabilities.
///
/// See also <https://github.com/ethereum/devp2p/blob/master/rlpx.md#hello-0x00>
#[derive(Clone, Debug, PartialEq, Eq, RlpEncodable, RlpDecodable)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))]
#[add_arbitrary_tests(rlp)]
pub struct HelloMessage {
/// The version of the `p2p` protocol.
pub protocol_version: ProtocolVersion,
/// Specifies the client software identity, as a human-readable string (e.g.
/// "Ethereum(++)/1.0.0").
pub client_version: String,
/// The list of supported capabilities and their versions.
pub capabilities: Vec<Capability>,
/// The port that the client is listening on, zero indicates the client is not listening.
pub port: u16,
/// The secp256k1 public key corresponding to the node's private key.
pub id: PeerId,
}
// === impl HelloMessage ===
impl HelloMessage {
/// Starts a new `HelloMessageBuilder`
///
/// ```
/// use reth_eth_wire::HelloMessage;
/// use reth_network_peers::pk2id;
/// use secp256k1::{SecretKey, SECP256K1};
/// let secret_key = SecretKey::new(&mut rand_08::thread_rng());
/// let id = pk2id(&secret_key.public_key(SECP256K1));
/// let status = HelloMessage::builder(id).build();
/// ```
pub const fn builder(id: PeerId) -> HelloMessageBuilder {
HelloMessageBuilder::new(id)
}
}
/// Builder for [`HelloMessageWithProtocols`]
#[derive(Debug)]
pub struct HelloMessageBuilder {
/// The version of the `p2p` protocol.
pub protocol_version: Option<ProtocolVersion>,
/// Specifies the client software identity, as a human-readable string (e.g.
/// "Ethereum(++)/1.0.0").
pub client_version: Option<String>,
/// The list of supported protocols.
pub protocols: Option<Vec<Protocol>>,
/// The port that the client is listening on, zero indicates the client is not listening.
pub port: Option<u16>,
/// The secp256k1 public key corresponding to the node's private key.
pub id: PeerId,
}
// === impl HelloMessageBuilder ===
impl HelloMessageBuilder {
/// Create a new builder to configure a [`HelloMessage`]
pub const fn new(id: PeerId) -> Self {
Self { protocol_version: None, client_version: None, protocols: None, port: None, id }
}
/// Sets the port the client is listening on
pub const fn port(mut self, port: u16) -> Self {
self.port = Some(port);
self
}
/// Adds a new protocol to use.
pub fn protocol(mut self, protocols: impl Into<Protocol>) -> Self {
self.protocols.get_or_insert_with(Vec::new).push(protocols.into());
self
}
/// Sets protocols to use.
pub fn protocols(mut self, protocols: impl IntoIterator<Item = Protocol>) -> Self {
self.protocols.get_or_insert_with(Vec::new).extend(protocols);
self
}
/// Sets client version.
pub fn client_version(mut self, client_version: impl Into<String>) -> Self {
self.client_version = Some(client_version.into());
self
}
/// Sets protocol version.
pub const fn protocol_version(mut self, protocol_version: ProtocolVersion) -> Self {
self.protocol_version = Some(protocol_version);
self
}
/// Consumes the type and returns the configured [`HelloMessage`]
///
/// Unset fields will be set to their default values:
/// - `protocol_version`: [`ProtocolVersion::V5`]
/// - `client_version`: [`RETH_CLIENT_VERSION`]
/// - `capabilities`: All [`EthVersion`]
pub fn build(self) -> HelloMessageWithProtocols {
let Self { protocol_version, client_version, protocols, port, id } = self;
HelloMessageWithProtocols {
protocol_version: protocol_version.unwrap_or_default(),
client_version: client_version.unwrap_or_else(|| RETH_CLIENT_VERSION.to_string()),
protocols: protocols.unwrap_or_else(|| {
vec![EthVersion::Eth68.into(), EthVersion::Eth67.into(), EthVersion::Eth66.into()]
// TODO: enable: EthVersion::ALL_VERSIONS.iter().copied().map(Into::into).collect()
}),
port: port.unwrap_or(DEFAULT_TCP_PORT),
id,
}
}
}
#[cfg(test)]
mod tests {
use crate::{p2pstream::P2PMessage, Capability, EthVersion, HelloMessage, ProtocolVersion};
use alloy_rlp::{Decodable, Encodable, EMPTY_STRING_CODE};
use reth_network_peers::pk2id;
use secp256k1::{SecretKey, SECP256K1};
#[test]
fn test_hello_encoding_round_trip() {
let secret_key = SecretKey::new(&mut rand_08::thread_rng());
let id = pk2id(&secret_key.public_key(SECP256K1));
let hello = P2PMessage::Hello(HelloMessage {
protocol_version: ProtocolVersion::V5,
client_version: "reth/0.1.0".to_string(),
capabilities: vec![Capability::new_static("eth", EthVersion::Eth67 as usize)],
port: 30303,
id,
});
let mut hello_encoded = Vec::new();
hello.encode(&mut hello_encoded);
let hello_decoded = P2PMessage::decode(&mut &hello_encoded[..]).unwrap();
assert_eq!(hello, hello_decoded);
}
#[test]
fn hello_encoding_length() {
let secret_key = SecretKey::new(&mut rand_08::thread_rng());
let id = pk2id(&secret_key.public_key(SECP256K1));
let hello = P2PMessage::Hello(HelloMessage {
protocol_version: ProtocolVersion::V5,
client_version: "reth/0.1.0".to_string(),
capabilities: vec![Capability::new_static("eth", EthVersion::Eth67 as usize)],
port: 30303,
id,
});
let mut hello_encoded = Vec::new();
hello.encode(&mut hello_encoded);
assert_eq!(hello_encoded.len(), hello.length());
}
#[test]
fn hello_message_id_prefix() {
// ensure that the hello message id is prefixed
let secret_key = SecretKey::new(&mut rand_08::thread_rng());
let id = pk2id(&secret_key.public_key(SECP256K1));
let hello = P2PMessage::Hello(HelloMessage {
protocol_version: ProtocolVersion::V5,
client_version: "reth/0.1.0".to_string(),
capabilities: vec![Capability::new_static("eth", EthVersion::Eth67 as usize)],
port: 30303,
id,
});
let mut hello_encoded = Vec::new();
hello.encode(&mut hello_encoded);
// zero is encoded as 0x80, the empty string code in RLP
assert_eq!(hello_encoded[0], EMPTY_STRING_CODE);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/eth-wire/src/ethstream.rs | crates/net/eth-wire/src/ethstream.rs | //! Ethereum protocol stream implementations.
//!
//! Provides stream types for the Ethereum wire protocol.
//! It separates protocol logic [`EthStreamInner`] from transport concerns [`EthStream`].
//! Handles handshaking, message processing, and RLP serialization.
use crate::{
errors::{EthHandshakeError, EthStreamError},
handshake::EthereumEthHandshake,
message::{EthBroadcastMessage, ProtocolBroadcastMessage},
p2pstream::HANDSHAKE_TIMEOUT,
CanDisconnect, DisconnectReason, EthMessage, EthNetworkPrimitives, EthVersion, ProtocolMessage,
UnifiedStatus,
};
use alloy_primitives::bytes::{Bytes, BytesMut};
use alloy_rlp::Encodable;
use futures::{ready, Sink, SinkExt};
use pin_project::pin_project;
use reth_eth_wire_types::{NetworkPrimitives, RawCapabilityMessage};
use reth_ethereum_forks::ForkFilter;
use std::{
future::Future,
pin::Pin,
task::{Context, Poll},
time::Duration,
};
use tokio::time::timeout;
use tokio_stream::Stream;
use tracing::{debug, trace};
/// [`MAX_MESSAGE_SIZE`] is the maximum cap on the size of a protocol message.
// https://github.com/ethereum/go-ethereum/blob/30602163d5d8321fbc68afdcbbaf2362b2641bde/eth/protocols/eth/protocol.go#L50
pub const MAX_MESSAGE_SIZE: usize = 10 * 1024 * 1024;
/// [`MAX_STATUS_SIZE`] is the maximum cap on the size of the initial status message
pub(crate) const MAX_STATUS_SIZE: usize = 500 * 1024;
/// An un-authenticated [`EthStream`]. This is consumed and returns a [`EthStream`] after the
/// `Status` handshake is completed.
#[pin_project]
#[derive(Debug)]
pub struct UnauthedEthStream<S> {
#[pin]
inner: S,
}
impl<S> UnauthedEthStream<S> {
/// Create a new `UnauthedEthStream` from a type `S` which implements `Stream` and `Sink`.
pub const fn new(inner: S) -> Self {
Self { inner }
}
/// Consumes the type and returns the wrapped stream
pub fn into_inner(self) -> S {
self.inner
}
}
impl<S, E> UnauthedEthStream<S>
where
S: Stream<Item = Result<BytesMut, E>> + CanDisconnect<Bytes> + Send + Unpin,
EthStreamError: From<E> + From<<S as Sink<Bytes>>::Error>,
{
/// Consumes the [`UnauthedEthStream`] and returns an [`EthStream`] after the `Status`
/// handshake is completed successfully. This also returns the `Status` message sent by the
/// remote peer.
///
/// Caution: This expects that the [`UnifiedStatus`] has the proper eth version configured, with
/// ETH69 the initial status message changed.
pub async fn handshake<N: NetworkPrimitives>(
self,
status: UnifiedStatus,
fork_filter: ForkFilter,
) -> Result<(EthStream<S, N>, UnifiedStatus), EthStreamError> {
self.handshake_with_timeout(status, fork_filter, HANDSHAKE_TIMEOUT).await
}
/// Wrapper around handshake which enforces a timeout.
pub async fn handshake_with_timeout<N: NetworkPrimitives>(
self,
status: UnifiedStatus,
fork_filter: ForkFilter,
timeout_limit: Duration,
) -> Result<(EthStream<S, N>, UnifiedStatus), EthStreamError> {
timeout(timeout_limit, Self::handshake_without_timeout(self, status, fork_filter))
.await
.map_err(|_| EthStreamError::StreamTimeout)?
}
/// Handshake with no timeout
pub async fn handshake_without_timeout<N: NetworkPrimitives>(
mut self,
status: UnifiedStatus,
fork_filter: ForkFilter,
) -> Result<(EthStream<S, N>, UnifiedStatus), EthStreamError> {
trace!(
status = %status.into_message(),
"sending eth status to peer"
);
let their_status =
EthereumEthHandshake(&mut self.inner).eth_handshake(status, fork_filter).await?;
// now we can create the `EthStream` because the peer has successfully completed
// the handshake
let stream = EthStream::new(status.version, self.inner);
Ok((stream, their_status))
}
}
/// Contains eth protocol specific logic for processing messages
#[derive(Debug)]
pub struct EthStreamInner<N> {
/// Negotiated eth version
version: EthVersion,
_pd: std::marker::PhantomData<N>,
}
impl<N> EthStreamInner<N>
where
N: NetworkPrimitives,
{
/// Creates a new [`EthStreamInner`] with the given eth version
pub const fn new(version: EthVersion) -> Self {
Self { version, _pd: std::marker::PhantomData }
}
/// Returns the eth version
#[inline]
pub const fn version(&self) -> EthVersion {
self.version
}
/// Decodes incoming bytes into an [`EthMessage`].
pub fn decode_message(&self, bytes: BytesMut) -> Result<EthMessage<N>, EthStreamError> {
if bytes.len() > MAX_MESSAGE_SIZE {
return Err(EthStreamError::MessageTooBig(bytes.len()));
}
let msg = match ProtocolMessage::decode_message(self.version, &mut bytes.as_ref()) {
Ok(m) => m,
Err(err) => {
let msg = if bytes.len() > 50 {
format!("{:02x?}...{:x?}", &bytes[..10], &bytes[bytes.len() - 10..])
} else {
format!("{bytes:02x?}")
};
debug!(
version=?self.version,
%msg,
"failed to decode protocol message"
);
return Err(EthStreamError::InvalidMessage(err));
}
};
if matches!(msg.message, EthMessage::Status(_)) {
return Err(EthStreamError::EthHandshakeError(EthHandshakeError::StatusNotInHandshake));
}
Ok(msg.message)
}
/// Encodes an [`EthMessage`] to bytes.
///
/// Validates that Status messages are not sent after handshake, enforcing protocol rules.
pub fn encode_message(&self, item: EthMessage<N>) -> Result<Bytes, EthStreamError> {
if matches!(item, EthMessage::Status(_)) {
return Err(EthStreamError::EthHandshakeError(EthHandshakeError::StatusNotInHandshake));
}
Ok(Bytes::from(alloy_rlp::encode(ProtocolMessage::from(item))))
}
}
/// An `EthStream` wraps over any `Stream` that yields bytes and makes it
/// compatible with eth-networking protocol messages, which get RLP encoded/decoded.
#[pin_project]
#[derive(Debug)]
pub struct EthStream<S, N = EthNetworkPrimitives> {
/// Eth-specific logic
eth: EthStreamInner<N>,
#[pin]
inner: S,
}
impl<S, N: NetworkPrimitives> EthStream<S, N> {
/// Creates a new unauthed [`EthStream`] from a provided stream. You will need
/// to manually handshake a peer.
#[inline]
pub const fn new(version: EthVersion, inner: S) -> Self {
Self { eth: EthStreamInner::new(version), inner }
}
/// Returns the eth version.
#[inline]
pub const fn version(&self) -> EthVersion {
self.eth.version()
}
/// Returns the underlying stream.
#[inline]
pub const fn inner(&self) -> &S {
&self.inner
}
/// Returns mutable access to the underlying stream.
#[inline]
pub const fn inner_mut(&mut self) -> &mut S {
&mut self.inner
}
/// Consumes this type and returns the wrapped stream.
#[inline]
pub fn into_inner(self) -> S {
self.inner
}
}
impl<S, E, N> EthStream<S, N>
where
S: Sink<Bytes, Error = E> + Unpin,
EthStreamError: From<E>,
N: NetworkPrimitives,
{
/// Same as [`Sink::start_send`] but accepts a [`EthBroadcastMessage`] instead.
pub fn start_send_broadcast(
&mut self,
item: EthBroadcastMessage<N>,
) -> Result<(), EthStreamError> {
self.inner.start_send_unpin(Bytes::from(alloy_rlp::encode(
ProtocolBroadcastMessage::from(item),
)))?;
Ok(())
}
/// Sends a raw capability message directly over the stream
pub fn start_send_raw(&mut self, msg: RawCapabilityMessage) -> Result<(), EthStreamError> {
let mut bytes = Vec::with_capacity(msg.payload.len() + 1);
msg.id.encode(&mut bytes);
bytes.extend_from_slice(&msg.payload);
self.inner.start_send_unpin(bytes.into())?;
Ok(())
}
}
impl<S, E, N> Stream for EthStream<S, N>
where
S: Stream<Item = Result<BytesMut, E>> + Unpin,
EthStreamError: From<E>,
N: NetworkPrimitives,
{
type Item = Result<EthMessage<N>, EthStreamError>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = self.project();
let res = ready!(this.inner.poll_next(cx));
match res {
Some(Ok(bytes)) => Poll::Ready(Some(this.eth.decode_message(bytes))),
Some(Err(err)) => Poll::Ready(Some(Err(err.into()))),
None => Poll::Ready(None),
}
}
}
impl<S, N> Sink<EthMessage<N>> for EthStream<S, N>
where
S: CanDisconnect<Bytes> + Unpin,
EthStreamError: From<<S as Sink<Bytes>>::Error>,
N: NetworkPrimitives,
{
type Error = EthStreamError;
fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.project().inner.poll_ready(cx).map_err(Into::into)
}
fn start_send(self: Pin<&mut Self>, item: EthMessage<N>) -> Result<(), Self::Error> {
if matches!(item, EthMessage::Status(_)) {
// TODO: to disconnect here we would need to do something similar to P2PStream's
// start_disconnect, which would ideally be a part of the CanDisconnect trait, or at
// least similar.
//
// Other parts of reth do not yet need traits like CanDisconnect because atm they work
// exclusively with EthStream<P2PStream<S>>, where the inner P2PStream is accessible,
// allowing for its start_disconnect method to be called.
//
// self.project().inner.start_disconnect(DisconnectReason::ProtocolBreach);
return Err(EthStreamError::EthHandshakeError(EthHandshakeError::StatusNotInHandshake))
}
self.project()
.inner
.start_send(Bytes::from(alloy_rlp::encode(ProtocolMessage::from(item))))?;
Ok(())
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.project().inner.poll_flush(cx).map_err(Into::into)
}
fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.project().inner.poll_close(cx).map_err(Into::into)
}
}
impl<S, N> CanDisconnect<EthMessage<N>> for EthStream<S, N>
where
S: CanDisconnect<Bytes> + Send,
EthStreamError: From<<S as Sink<Bytes>>::Error>,
N: NetworkPrimitives,
{
fn disconnect(
&mut self,
reason: DisconnectReason,
) -> Pin<Box<dyn Future<Output = Result<(), EthStreamError>> + Send + '_>> {
Box::pin(async move { self.inner.disconnect(reason).await.map_err(Into::into) })
}
}
#[cfg(test)]
mod tests {
use super::UnauthedEthStream;
use crate::{
broadcast::BlockHashNumber,
errors::{EthHandshakeError, EthStreamError},
ethstream::RawCapabilityMessage,
hello::DEFAULT_TCP_PORT,
p2pstream::UnauthedP2PStream,
EthMessage, EthStream, EthVersion, HelloMessageWithProtocols, PassthroughCodec,
ProtocolVersion, Status, StatusMessage,
};
use alloy_chains::NamedChain;
use alloy_primitives::{bytes::Bytes, B256, U256};
use alloy_rlp::Decodable;
use futures::{SinkExt, StreamExt};
use reth_ecies::stream::ECIESStream;
use reth_eth_wire_types::{EthNetworkPrimitives, UnifiedStatus};
use reth_ethereum_forks::{ForkFilter, Head};
use reth_network_peers::pk2id;
use secp256k1::{SecretKey, SECP256K1};
use std::time::Duration;
use tokio::net::{TcpListener, TcpStream};
use tokio_util::codec::Decoder;
#[tokio::test]
async fn can_handshake() {
let genesis = B256::random();
let fork_filter = ForkFilter::new(Head::default(), genesis, 0, Vec::new());
let status = Status {
version: EthVersion::Eth67,
chain: NamedChain::Mainnet.into(),
total_difficulty: U256::ZERO,
blockhash: B256::random(),
genesis,
// Pass the current fork id.
forkid: fork_filter.current(),
};
let unified_status = UnifiedStatus::from_message(StatusMessage::Legacy(status));
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let local_addr = listener.local_addr().unwrap();
let status_clone = unified_status;
let fork_filter_clone = fork_filter.clone();
let handle = tokio::spawn(async move {
// roughly based off of the design of tokio::net::TcpListener
let (incoming, _) = listener.accept().await.unwrap();
let stream = PassthroughCodec::default().framed(incoming);
let (_, their_status) = UnauthedEthStream::new(stream)
.handshake::<EthNetworkPrimitives>(status_clone, fork_filter_clone)
.await
.unwrap();
// just make sure it equals our status (our status is a clone of their status)
assert_eq!(their_status, status_clone);
});
let outgoing = TcpStream::connect(local_addr).await.unwrap();
let sink = PassthroughCodec::default().framed(outgoing);
// try to connect
let (_, their_status) = UnauthedEthStream::new(sink)
.handshake::<EthNetworkPrimitives>(unified_status, fork_filter)
.await
.unwrap();
// their status is a clone of our status, these should be equal
assert_eq!(their_status, unified_status);
// wait for it to finish
handle.await.unwrap();
}
#[tokio::test]
async fn pass_handshake_on_low_td_bitlen() {
let genesis = B256::random();
let fork_filter = ForkFilter::new(Head::default(), genesis, 0, Vec::new());
let status = Status {
version: EthVersion::Eth67,
chain: NamedChain::Mainnet.into(),
total_difficulty: U256::from(2).pow(U256::from(100)) - U256::from(1),
blockhash: B256::random(),
genesis,
// Pass the current fork id.
forkid: fork_filter.current(),
};
let unified_status = UnifiedStatus::from_message(StatusMessage::Legacy(status));
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let local_addr = listener.local_addr().unwrap();
let status_clone = unified_status;
let fork_filter_clone = fork_filter.clone();
let handle = tokio::spawn(async move {
// roughly based off of the design of tokio::net::TcpListener
let (incoming, _) = listener.accept().await.unwrap();
let stream = PassthroughCodec::default().framed(incoming);
let (_, their_status) = UnauthedEthStream::new(stream)
.handshake::<EthNetworkPrimitives>(status_clone, fork_filter_clone)
.await
.unwrap();
// just make sure it equals our status, and that the handshake succeeded
assert_eq!(their_status, status_clone);
});
let outgoing = TcpStream::connect(local_addr).await.unwrap();
let sink = PassthroughCodec::default().framed(outgoing);
// try to connect
let (_, their_status) = UnauthedEthStream::new(sink)
.handshake::<EthNetworkPrimitives>(unified_status, fork_filter)
.await
.unwrap();
// their status is a clone of our status, these should be equal
assert_eq!(their_status, unified_status);
// await the other handshake
handle.await.unwrap();
}
#[tokio::test]
async fn fail_handshake_on_high_td_bitlen() {
let genesis = B256::random();
let fork_filter = ForkFilter::new(Head::default(), genesis, 0, Vec::new());
let status = Status {
version: EthVersion::Eth67,
chain: NamedChain::Mainnet.into(),
total_difficulty: U256::from(2).pow(U256::from(164)),
blockhash: B256::random(),
genesis,
// Pass the current fork id.
forkid: fork_filter.current(),
};
let unified_status = UnifiedStatus::from_message(StatusMessage::Legacy(status));
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let local_addr = listener.local_addr().unwrap();
let status_clone = unified_status;
let fork_filter_clone = fork_filter.clone();
let handle = tokio::spawn(async move {
// roughly based off of the design of tokio::net::TcpListener
let (incoming, _) = listener.accept().await.unwrap();
let stream = PassthroughCodec::default().framed(incoming);
let handshake_res = UnauthedEthStream::new(stream)
.handshake::<EthNetworkPrimitives>(status_clone, fork_filter_clone)
.await;
// make sure the handshake fails due to td too high
assert!(matches!(
handshake_res,
Err(EthStreamError::EthHandshakeError(
EthHandshakeError::TotalDifficultyBitLenTooLarge { got: 165, maximum: 160 }
))
));
});
let outgoing = TcpStream::connect(local_addr).await.unwrap();
let sink = PassthroughCodec::default().framed(outgoing);
// try to connect
let handshake_res = UnauthedEthStream::new(sink)
.handshake::<EthNetworkPrimitives>(unified_status, fork_filter)
.await;
// this handshake should also fail due to td too high
assert!(matches!(
handshake_res,
Err(EthStreamError::EthHandshakeError(
EthHandshakeError::TotalDifficultyBitLenTooLarge { got: 165, maximum: 160 }
))
));
// await the other handshake
handle.await.unwrap();
}
#[tokio::test]
async fn can_write_and_read_cleartext() {
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let local_addr = listener.local_addr().unwrap();
let test_msg = EthMessage::<EthNetworkPrimitives>::NewBlockHashes(
vec![
BlockHashNumber { hash: B256::random(), number: 5 },
BlockHashNumber { hash: B256::random(), number: 6 },
]
.into(),
);
let test_msg_clone = test_msg.clone();
let handle = tokio::spawn(async move {
// roughly based off of the design of tokio::net::TcpListener
let (incoming, _) = listener.accept().await.unwrap();
let stream = PassthroughCodec::default().framed(incoming);
let mut stream = EthStream::new(EthVersion::Eth67, stream);
// use the stream to get the next message
let message = stream.next().await.unwrap().unwrap();
assert_eq!(message, test_msg_clone);
});
let outgoing = TcpStream::connect(local_addr).await.unwrap();
let sink = PassthroughCodec::default().framed(outgoing);
let mut client_stream = EthStream::new(EthVersion::Eth67, sink);
client_stream.send(test_msg).await.unwrap();
// make sure the server receives the message and asserts before ending the test
handle.await.unwrap();
}
#[tokio::test]
async fn can_write_and_read_ecies() {
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let local_addr = listener.local_addr().unwrap();
let server_key = SecretKey::new(&mut rand_08::thread_rng());
let test_msg = EthMessage::<EthNetworkPrimitives>::NewBlockHashes(
vec![
BlockHashNumber { hash: B256::random(), number: 5 },
BlockHashNumber { hash: B256::random(), number: 6 },
]
.into(),
);
let test_msg_clone = test_msg.clone();
let handle = tokio::spawn(async move {
// roughly based off of the design of tokio::net::TcpListener
let (incoming, _) = listener.accept().await.unwrap();
let stream = ECIESStream::incoming(incoming, server_key).await.unwrap();
let mut stream = EthStream::new(EthVersion::Eth67, stream);
// use the stream to get the next message
let message = stream.next().await.unwrap().unwrap();
assert_eq!(message, test_msg_clone);
});
// create the server pubkey
let server_id = pk2id(&server_key.public_key(SECP256K1));
let client_key = SecretKey::new(&mut rand_08::thread_rng());
let outgoing = TcpStream::connect(local_addr).await.unwrap();
let outgoing = ECIESStream::connect(outgoing, client_key, server_id).await.unwrap();
let mut client_stream = EthStream::new(EthVersion::Eth67, outgoing);
client_stream.send(test_msg).await.unwrap();
// make sure the server receives the message and asserts before ending the test
handle.await.unwrap();
}
#[tokio::test(flavor = "multi_thread")]
async fn ethstream_over_p2p() {
// create a p2p stream and server, then confirm that the two are authed
// create tcpstream
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let local_addr = listener.local_addr().unwrap();
let server_key = SecretKey::new(&mut rand_08::thread_rng());
let test_msg = EthMessage::<EthNetworkPrimitives>::NewBlockHashes(
vec![
BlockHashNumber { hash: B256::random(), number: 5 },
BlockHashNumber { hash: B256::random(), number: 6 },
]
.into(),
);
let genesis = B256::random();
let fork_filter = ForkFilter::new(Head::default(), genesis, 0, Vec::new());
let status = Status {
version: EthVersion::Eth67,
chain: NamedChain::Mainnet.into(),
total_difficulty: U256::ZERO,
blockhash: B256::random(),
genesis,
// Pass the current fork id.
forkid: fork_filter.current(),
};
let unified_status = UnifiedStatus::from_message(StatusMessage::Legacy(status));
let status_copy = unified_status;
let fork_filter_clone = fork_filter.clone();
let test_msg_clone = test_msg.clone();
let handle = tokio::spawn(async move {
// roughly based off of the design of tokio::net::TcpListener
let (incoming, _) = listener.accept().await.unwrap();
let stream = ECIESStream::incoming(incoming, server_key).await.unwrap();
let server_hello = HelloMessageWithProtocols {
protocol_version: ProtocolVersion::V5,
client_version: "bitcoind/1.0.0".to_string(),
protocols: vec![EthVersion::Eth67.into()],
port: DEFAULT_TCP_PORT,
id: pk2id(&server_key.public_key(SECP256K1)),
};
let unauthed_stream = UnauthedP2PStream::new(stream);
let (p2p_stream, _) = unauthed_stream.handshake(server_hello).await.unwrap();
let (mut eth_stream, _) = UnauthedEthStream::new(p2p_stream)
.handshake(status_copy, fork_filter_clone)
.await
.unwrap();
// use the stream to get the next message
let message = eth_stream.next().await.unwrap().unwrap();
assert_eq!(message, test_msg_clone);
});
// create the server pubkey
let server_id = pk2id(&server_key.public_key(SECP256K1));
let client_key = SecretKey::new(&mut rand_08::thread_rng());
let outgoing = TcpStream::connect(local_addr).await.unwrap();
let sink = ECIESStream::connect(outgoing, client_key, server_id).await.unwrap();
let client_hello = HelloMessageWithProtocols {
protocol_version: ProtocolVersion::V5,
client_version: "bitcoind/1.0.0".to_string(),
protocols: vec![EthVersion::Eth67.into()],
port: DEFAULT_TCP_PORT,
id: pk2id(&client_key.public_key(SECP256K1)),
};
let unauthed_stream = UnauthedP2PStream::new(sink);
let (p2p_stream, _) = unauthed_stream.handshake(client_hello).await.unwrap();
let (mut client_stream, _) = UnauthedEthStream::new(p2p_stream)
.handshake(unified_status, fork_filter)
.await
.unwrap();
client_stream.send(test_msg).await.unwrap();
// make sure the server receives the message and asserts before ending the test
handle.await.unwrap();
}
#[tokio::test]
async fn handshake_should_timeout() {
let genesis = B256::random();
let fork_filter = ForkFilter::new(Head::default(), genesis, 0, Vec::new());
let status = Status {
version: EthVersion::Eth67,
chain: NamedChain::Mainnet.into(),
total_difficulty: U256::ZERO,
blockhash: B256::random(),
genesis,
// Pass the current fork id.
forkid: fork_filter.current(),
};
let unified_status = UnifiedStatus::from_message(StatusMessage::Legacy(status));
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let local_addr = listener.local_addr().unwrap();
let status_clone = unified_status;
let fork_filter_clone = fork_filter.clone();
let _handle = tokio::spawn(async move {
// Delay accepting the connection for longer than the client's timeout period
tokio::time::sleep(Duration::from_secs(11)).await;
// roughly based off of the design of tokio::net::TcpListener
let (incoming, _) = listener.accept().await.unwrap();
let stream = PassthroughCodec::default().framed(incoming);
let (_, their_status) = UnauthedEthStream::new(stream)
.handshake::<EthNetworkPrimitives>(status_clone, fork_filter_clone)
.await
.unwrap();
// just make sure it equals our status (our status is a clone of their status)
assert_eq!(their_status, status_clone);
});
let outgoing = TcpStream::connect(local_addr).await.unwrap();
let sink = PassthroughCodec::default().framed(outgoing);
// try to connect
let handshake_result = UnauthedEthStream::new(sink)
.handshake_with_timeout::<EthNetworkPrimitives>(
unified_status,
fork_filter,
Duration::from_secs(1),
)
.await;
// Assert that a timeout error occurred
assert!(
matches!(handshake_result, Err(e) if e.to_string() == EthStreamError::StreamTimeout.to_string())
);
}
#[tokio::test]
async fn can_write_and_read_raw_capability() {
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let local_addr = listener.local_addr().unwrap();
let test_msg = RawCapabilityMessage { id: 0x1234, payload: Bytes::from(vec![1, 2, 3, 4]) };
let test_msg_clone = test_msg.clone();
let handle = tokio::spawn(async move {
let (incoming, _) = listener.accept().await.unwrap();
let stream = PassthroughCodec::default().framed(incoming);
let mut stream = EthStream::<_, EthNetworkPrimitives>::new(EthVersion::Eth67, stream);
let bytes = stream.inner_mut().next().await.unwrap().unwrap();
// Create a cursor to track position while decoding
let mut id_bytes = &bytes[..];
let decoded_id = <usize as Decodable>::decode(&mut id_bytes).unwrap();
assert_eq!(decoded_id, test_msg_clone.id);
// Get remaining bytes after ID decoding
let remaining = id_bytes;
assert_eq!(remaining, &test_msg_clone.payload[..]);
});
let outgoing = TcpStream::connect(local_addr).await.unwrap();
let sink = PassthroughCodec::default().framed(outgoing);
let mut client_stream = EthStream::<_, EthNetworkPrimitives>::new(EthVersion::Eth67, sink);
client_stream.start_send_raw(test_msg).unwrap();
client_stream.inner_mut().flush().await.unwrap();
handle.await.unwrap();
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/eth-wire/src/eth_snap_stream.rs | crates/net/eth-wire/src/eth_snap_stream.rs | //! Ethereum and snap combined protocol stream implementation.
//!
//! A stream type for handling both eth and snap protocol messages over a single `RLPx` connection.
//! Provides message encoding/decoding, ID multiplexing, and protocol message processing.
use super::message::MAX_MESSAGE_SIZE;
use crate::{
message::{EthBroadcastMessage, ProtocolBroadcastMessage},
EthMessage, EthMessageID, EthNetworkPrimitives, EthVersion, NetworkPrimitives, ProtocolMessage,
RawCapabilityMessage, SnapMessageId, SnapProtocolMessage,
};
use alloy_rlp::{Bytes, BytesMut, Encodable};
use core::fmt::Debug;
use futures::{Sink, SinkExt};
use pin_project::pin_project;
use std::{
marker::PhantomData,
pin::Pin,
task::{ready, Context, Poll},
};
use tokio_stream::Stream;
/// Error type for the eth and snap stream
#[derive(thiserror::Error, Debug)]
pub enum EthSnapStreamError {
/// Invalid message for protocol version
#[error("invalid message for version {0:?}: {1}")]
InvalidMessage(EthVersion, String),
/// Unknown message ID
#[error("unknown message id: {0}")]
UnknownMessageId(u8),
/// Message too large
#[error("message too large: {0} > {1}")]
MessageTooLarge(usize, usize),
/// RLP decoding error
#[error("rlp error: {0}")]
Rlp(#[from] alloy_rlp::Error),
/// Status message received outside handshake
#[error("status message received outside handshake")]
StatusNotInHandshake,
}
/// Combined message type that include either eth or snap protocol messages
#[derive(Debug)]
pub enum EthSnapMessage<N: NetworkPrimitives = EthNetworkPrimitives> {
/// An Ethereum protocol message
Eth(EthMessage<N>),
/// A snap protocol message
Snap(SnapProtocolMessage),
}
/// A stream implementation that can handle both eth and snap protocol messages
/// over a single connection.
#[pin_project]
#[derive(Debug, Clone)]
pub struct EthSnapStream<S, N = EthNetworkPrimitives> {
/// Protocol logic
eth_snap: EthSnapStreamInner<N>,
/// Inner byte stream
#[pin]
inner: S,
}
impl<S, N> EthSnapStream<S, N>
where
N: NetworkPrimitives,
{
/// Create a new eth and snap protocol stream
pub const fn new(stream: S, eth_version: EthVersion) -> Self {
Self { eth_snap: EthSnapStreamInner::new(eth_version), inner: stream }
}
/// Returns the eth version
#[inline]
pub const fn eth_version(&self) -> EthVersion {
self.eth_snap.eth_version()
}
/// Returns the underlying stream
#[inline]
pub const fn inner(&self) -> &S {
&self.inner
}
/// Returns mutable access to the underlying stream
#[inline]
pub const fn inner_mut(&mut self) -> &mut S {
&mut self.inner
}
/// Consumes this type and returns the wrapped stream
#[inline]
pub fn into_inner(self) -> S {
self.inner
}
}
impl<S, E, N> EthSnapStream<S, N>
where
S: Sink<Bytes, Error = E> + Unpin,
EthSnapStreamError: From<E>,
N: NetworkPrimitives,
{
/// Same as [`Sink::start_send`] but accepts a [`EthBroadcastMessage`] instead.
pub fn start_send_broadcast(
&mut self,
item: EthBroadcastMessage<N>,
) -> Result<(), EthSnapStreamError> {
self.inner.start_send_unpin(Bytes::from(alloy_rlp::encode(
ProtocolBroadcastMessage::from(item),
)))?;
Ok(())
}
/// Sends a raw capability message directly over the stream
pub fn start_send_raw(&mut self, msg: RawCapabilityMessage) -> Result<(), EthSnapStreamError> {
let mut bytes = Vec::with_capacity(msg.payload.len() + 1);
msg.id.encode(&mut bytes);
bytes.extend_from_slice(&msg.payload);
self.inner.start_send_unpin(bytes.into())?;
Ok(())
}
}
impl<S, E, N> Stream for EthSnapStream<S, N>
where
S: Stream<Item = Result<BytesMut, E>> + Unpin,
EthSnapStreamError: From<E>,
N: NetworkPrimitives,
{
type Item = Result<EthSnapMessage<N>, EthSnapStreamError>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = self.project();
let res = ready!(this.inner.poll_next(cx));
match res {
Some(Ok(bytes)) => Poll::Ready(Some(this.eth_snap.decode_message(bytes))),
Some(Err(err)) => Poll::Ready(Some(Err(err.into()))),
None => Poll::Ready(None),
}
}
}
impl<S, E, N> Sink<EthSnapMessage<N>> for EthSnapStream<S, N>
where
S: Sink<Bytes, Error = E> + Unpin,
EthSnapStreamError: From<E>,
N: NetworkPrimitives,
{
type Error = EthSnapStreamError;
fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.project().inner.poll_ready(cx).map_err(Into::into)
}
fn start_send(mut self: Pin<&mut Self>, item: EthSnapMessage<N>) -> Result<(), Self::Error> {
let mut this = self.as_mut().project();
let bytes = match item {
EthSnapMessage::Eth(eth_msg) => this.eth_snap.encode_eth_message(eth_msg)?,
EthSnapMessage::Snap(snap_msg) => this.eth_snap.encode_snap_message(snap_msg),
};
this.inner.start_send_unpin(bytes)?;
Ok(())
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.project().inner.poll_flush(cx).map_err(Into::into)
}
fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.project().inner.poll_close(cx).map_err(Into::into)
}
}
/// Stream handling combined eth and snap protocol logic
/// Snap version is not critical to specify yet,
/// Only one version, snap/1, does exist.
#[derive(Debug, Clone)]
struct EthSnapStreamInner<N> {
/// Eth protocol version
eth_version: EthVersion,
/// Type marker
_pd: PhantomData<N>,
}
impl<N> EthSnapStreamInner<N>
where
N: NetworkPrimitives,
{
/// Create a new eth and snap protocol stream
const fn new(eth_version: EthVersion) -> Self {
Self { eth_version, _pd: PhantomData }
}
#[inline]
const fn eth_version(&self) -> EthVersion {
self.eth_version
}
/// Decode a message from the stream
fn decode_message(&self, bytes: BytesMut) -> Result<EthSnapMessage<N>, EthSnapStreamError> {
if bytes.len() > MAX_MESSAGE_SIZE {
return Err(EthSnapStreamError::MessageTooLarge(bytes.len(), MAX_MESSAGE_SIZE));
}
if bytes.is_empty() {
return Err(EthSnapStreamError::Rlp(alloy_rlp::Error::InputTooShort));
}
let message_id = bytes[0];
// This check works because capabilities are sorted lexicographically
// if "eth" before "snap", giving eth messages lower IDs than snap messages,
// and eth message IDs are <= [`EthMessageID::max()`],
// snap message IDs are > [`EthMessageID::max()`].
// See also <https://github.com/paradigmxyz/reth/blob/main/crates/net/eth-wire/src/capability.rs#L272-L283>.
if message_id <= EthMessageID::max(self.eth_version) {
let mut buf = bytes.as_ref();
match ProtocolMessage::decode_message(self.eth_version, &mut buf) {
Ok(protocol_msg) => {
if matches!(protocol_msg.message, EthMessage::Status(_)) {
return Err(EthSnapStreamError::StatusNotInHandshake);
}
Ok(EthSnapMessage::Eth(protocol_msg.message))
}
Err(err) => {
Err(EthSnapStreamError::InvalidMessage(self.eth_version, err.to_string()))
}
}
} else if message_id > EthMessageID::max(self.eth_version) &&
message_id <=
EthMessageID::message_count(self.eth_version) + SnapMessageId::TrieNodes as u8
{
// Checks for multiplexed snap message IDs :
// - message_id > EthMessageID::max() : ensures it's not an eth message
// - message_id <= EthMessageID::message_count() + snap_max : ensures it's within valid
// snap range
// Message IDs are assigned lexicographically during capability negotiation
// So real_snap_id = multiplexed_id - num_eth_messages
let adjusted_message_id = message_id - EthMessageID::message_count(self.eth_version);
let mut buf = &bytes[1..];
match SnapProtocolMessage::decode(adjusted_message_id, &mut buf) {
Ok(snap_msg) => Ok(EthSnapMessage::Snap(snap_msg)),
Err(err) => Err(EthSnapStreamError::Rlp(err)),
}
} else {
Err(EthSnapStreamError::UnknownMessageId(message_id))
}
}
/// Encode an eth message
fn encode_eth_message(&self, item: EthMessage<N>) -> Result<Bytes, EthSnapStreamError> {
if matches!(item, EthMessage::Status(_)) {
return Err(EthSnapStreamError::StatusNotInHandshake);
}
let protocol_msg = ProtocolMessage::from(item);
let mut buf = Vec::new();
protocol_msg.encode(&mut buf);
Ok(Bytes::from(buf))
}
/// Encode a snap protocol message, adjusting the message ID to follow eth message IDs
/// for proper multiplexing.
fn encode_snap_message(&self, message: SnapProtocolMessage) -> Bytes {
let encoded = message.encode();
let message_id = encoded[0];
let adjusted_id = message_id + EthMessageID::message_count(self.eth_version);
let mut adjusted = Vec::with_capacity(encoded.len());
adjusted.push(adjusted_id);
adjusted.extend_from_slice(&encoded[1..]);
Bytes::from(adjusted)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{EthMessage, SnapProtocolMessage};
use alloy_eips::BlockHashOrNumber;
use alloy_primitives::B256;
use alloy_rlp::Encodable;
use reth_eth_wire_types::{
message::RequestPair, GetAccountRangeMessage, GetBlockHeaders, HeadersDirection,
};
// Helper to create eth message and its bytes
fn create_eth_message() -> (EthMessage<EthNetworkPrimitives>, BytesMut) {
let eth_msg = EthMessage::<EthNetworkPrimitives>::GetBlockHeaders(RequestPair {
request_id: 1,
message: GetBlockHeaders {
start_block: BlockHashOrNumber::Number(1),
limit: 10,
skip: 0,
direction: HeadersDirection::Rising,
},
});
let protocol_msg = ProtocolMessage::from(eth_msg.clone());
let mut buf = Vec::new();
protocol_msg.encode(&mut buf);
(eth_msg, BytesMut::from(&buf[..]))
}
// Helper to create snap message and its bytes
fn create_snap_message() -> (SnapProtocolMessage, BytesMut) {
let snap_msg = SnapProtocolMessage::GetAccountRange(GetAccountRangeMessage {
request_id: 1,
root_hash: B256::default(),
starting_hash: B256::default(),
limit_hash: B256::default(),
response_bytes: 1000,
});
let inner = EthSnapStreamInner::<EthNetworkPrimitives>::new(EthVersion::Eth67);
let encoded = inner.encode_snap_message(snap_msg.clone());
(snap_msg, BytesMut::from(&encoded[..]))
}
#[test]
fn test_eth_message_roundtrip() {
let inner = EthSnapStreamInner::<EthNetworkPrimitives>::new(EthVersion::Eth67);
let (eth_msg, eth_bytes) = create_eth_message();
// Verify encoding
let encoded_result = inner.encode_eth_message(eth_msg.clone());
assert!(encoded_result.is_ok());
// Verify decoding
let decoded_result = inner.decode_message(eth_bytes.clone());
assert!(matches!(decoded_result, Ok(EthSnapMessage::Eth(_))));
// round trip
if let Ok(EthSnapMessage::Eth(decoded_msg)) = inner.decode_message(eth_bytes) {
assert_eq!(decoded_msg, eth_msg);
let re_encoded = inner.encode_eth_message(decoded_msg.clone()).unwrap();
let re_encoded_bytes = BytesMut::from(&re_encoded[..]);
let re_decoded = inner.decode_message(re_encoded_bytes);
assert!(matches!(re_decoded, Ok(EthSnapMessage::Eth(_))));
if let Ok(EthSnapMessage::Eth(final_msg)) = re_decoded {
assert_eq!(final_msg, decoded_msg);
}
}
}
#[test]
fn test_snap_protocol() {
let inner = EthSnapStreamInner::<EthNetworkPrimitives>::new(EthVersion::Eth67);
let (snap_msg, snap_bytes) = create_snap_message();
// Verify encoding
let encoded_bytes = inner.encode_snap_message(snap_msg.clone());
assert!(!encoded_bytes.is_empty());
// Verify decoding
let decoded_result = inner.decode_message(snap_bytes.clone());
assert!(matches!(decoded_result, Ok(EthSnapMessage::Snap(_))));
// round trip
if let Ok(EthSnapMessage::Snap(decoded_msg)) = inner.decode_message(snap_bytes) {
assert_eq!(decoded_msg, snap_msg);
// re-encode message
let encoded = inner.encode_snap_message(decoded_msg.clone());
let re_encoded_bytes = BytesMut::from(&encoded[..]);
// decode with properly adjusted ID
let re_decoded = inner.decode_message(re_encoded_bytes);
assert!(matches!(re_decoded, Ok(EthSnapMessage::Snap(_))));
if let Ok(EthSnapMessage::Snap(final_msg)) = re_decoded {
assert_eq!(final_msg, decoded_msg);
}
}
}
#[test]
fn test_message_id_boundaries() {
let inner = EthSnapStreamInner::<EthNetworkPrimitives>::new(EthVersion::Eth67);
// Create a bytes buffer with eth message ID at the max boundary with minimal content
let eth_max_id = EthMessageID::max(EthVersion::Eth67);
let mut eth_boundary_bytes = BytesMut::new();
eth_boundary_bytes.extend_from_slice(&[eth_max_id]);
eth_boundary_bytes.extend_from_slice(&[0, 0]);
// This should be decoded as eth message
let eth_boundary_result = inner.decode_message(eth_boundary_bytes);
assert!(
eth_boundary_result.is_err() ||
matches!(eth_boundary_result, Ok(EthSnapMessage::Eth(_)))
);
// Create a bytes buffer with message ID just above eth max, it should be snap min
let snap_min_id = eth_max_id + 1;
let mut snap_boundary_bytes = BytesMut::new();
snap_boundary_bytes.extend_from_slice(&[snap_min_id]);
snap_boundary_bytes.extend_from_slice(&[0, 0]);
// Not a valid snap message yet, only snap id --> error
let snap_boundary_result = inner.decode_message(snap_boundary_bytes);
assert!(snap_boundary_result.is_err());
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/eth-wire/src/p2pstream.rs | crates/net/eth-wire/src/p2pstream.rs | use crate::{
capability::SharedCapabilities,
disconnect::CanDisconnect,
errors::{P2PHandshakeError, P2PStreamError},
pinger::{Pinger, PingerEvent},
DisconnectReason, HelloMessage, HelloMessageWithProtocols,
};
use alloy_primitives::{
bytes::{Buf, BufMut, Bytes, BytesMut},
hex,
};
use alloy_rlp::{Decodable, Encodable, Error as RlpError, EMPTY_LIST_CODE};
use futures::{Sink, SinkExt, StreamExt};
use pin_project::pin_project;
use reth_codecs::add_arbitrary_tests;
use reth_metrics::metrics::counter;
use reth_primitives_traits::GotExpected;
use std::{
collections::VecDeque,
future::Future,
io,
pin::Pin,
task::{ready, Context, Poll},
time::Duration,
};
use tokio_stream::Stream;
use tracing::{debug, trace};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
/// [`MAX_PAYLOAD_SIZE`] is the maximum size of an uncompressed message payload.
/// This is defined in [EIP-706](https://eips.ethereum.org/EIPS/eip-706).
const MAX_PAYLOAD_SIZE: usize = 16 * 1024 * 1024;
/// [`MAX_RESERVED_MESSAGE_ID`] is the maximum message ID reserved for the `p2p` subprotocol. If
/// there are any incoming messages with an ID greater than this, they are subprotocol messages.
pub const MAX_RESERVED_MESSAGE_ID: u8 = 0x0f;
/// [`MAX_P2P_MESSAGE_ID`] is the maximum message ID in use for the `p2p` subprotocol.
const MAX_P2P_MESSAGE_ID: u8 = P2PMessageID::Pong as u8;
/// [`HANDSHAKE_TIMEOUT`] determines the amount of time to wait before determining that a `p2p`
/// handshake has timed out.
pub const HANDSHAKE_TIMEOUT: Duration = Duration::from_secs(10);
/// [`PING_TIMEOUT`] determines the amount of time to wait before determining that a `p2p` ping has
/// timed out.
const PING_TIMEOUT: Duration = Duration::from_secs(15);
/// [`PING_INTERVAL`] determines the amount of time to wait between sending `p2p` ping messages
/// when the peer is responsive.
const PING_INTERVAL: Duration = Duration::from_secs(60);
/// [`MAX_P2P_CAPACITY`] is the maximum number of messages that can be buffered to be sent in the
/// `p2p` stream.
///
/// Note: this default is rather low because it is expected that the [`P2PStream`] wraps an
/// [`ECIESStream`](reth_ecies::stream::ECIESStream) which internally already buffers a few MB of
/// encoded data.
const MAX_P2P_CAPACITY: usize = 2;
/// An un-authenticated [`P2PStream`]. This is consumed and returns a [`P2PStream`] after the
/// `Hello` handshake is completed.
#[pin_project]
#[derive(Debug)]
pub struct UnauthedP2PStream<S> {
#[pin]
inner: S,
}
impl<S> UnauthedP2PStream<S> {
/// Create a new `UnauthedP2PStream` from a type `S` which implements `Stream` and `Sink`.
pub const fn new(inner: S) -> Self {
Self { inner }
}
/// Returns a reference to the inner stream.
pub const fn inner(&self) -> &S {
&self.inner
}
}
impl<S> UnauthedP2PStream<S>
where
S: Stream<Item = io::Result<BytesMut>> + Sink<Bytes, Error = io::Error> + Unpin,
{
/// Consumes the `UnauthedP2PStream` and returns a `P2PStream` after the `Hello` handshake is
/// completed successfully. This also returns the `Hello` message sent by the remote peer.
pub async fn handshake(
mut self,
hello: HelloMessageWithProtocols,
) -> Result<(P2PStream<S>, HelloMessage), P2PStreamError> {
trace!(?hello, "sending p2p hello to peer");
// send our hello message with the Sink
self.inner.send(alloy_rlp::encode(P2PMessage::Hello(hello.message())).into()).await?;
let first_message_bytes = tokio::time::timeout(HANDSHAKE_TIMEOUT, self.inner.next())
.await
.or(Err(P2PStreamError::HandshakeError(P2PHandshakeError::Timeout)))?
.ok_or(P2PStreamError::HandshakeError(P2PHandshakeError::NoResponse))??;
// let's check the compressed length first, we will need to check again once confirming
// that it contains snappy-compressed data (this will be the case for all non-p2p messages).
if first_message_bytes.len() > MAX_PAYLOAD_SIZE {
return Err(P2PStreamError::MessageTooBig {
message_size: first_message_bytes.len(),
max_size: MAX_PAYLOAD_SIZE,
})
}
// The first message sent MUST be a hello OR disconnect message
//
// If the first message is a disconnect message, we should not decode using
// Decodable::decode, because the first message (either Disconnect or Hello) is not snappy
// compressed, and the Decodable implementation assumes that non-hello messages are snappy
// compressed.
let their_hello = match P2PMessage::decode(&mut &first_message_bytes[..]) {
Ok(P2PMessage::Hello(hello)) => Ok(hello),
Ok(P2PMessage::Disconnect(reason)) => {
if matches!(reason, DisconnectReason::TooManyPeers) {
// Too many peers is a very common disconnect reason that spams the DEBUG logs
trace!(%reason, "Disconnected by peer during handshake");
} else {
debug!(%reason, "Disconnected by peer during handshake");
};
counter!("p2pstream.disconnected_errors").increment(1);
Err(P2PStreamError::HandshakeError(P2PHandshakeError::Disconnected(reason)))
}
Err(err) => {
debug!(%err, msg=%hex::encode(&first_message_bytes), "Failed to decode first message from peer");
Err(P2PStreamError::HandshakeError(err.into()))
}
Ok(msg) => {
debug!(?msg, "expected hello message but received another message");
Err(P2PStreamError::HandshakeError(P2PHandshakeError::NonHelloMessageInHandshake))
}
}?;
trace!(
hello=?their_hello,
"validating incoming p2p hello from peer"
);
if (hello.protocol_version as u8) != their_hello.protocol_version as u8 {
// send a disconnect message notifying the peer of the protocol version mismatch
self.send_disconnect(DisconnectReason::IncompatibleP2PProtocolVersion).await?;
return Err(P2PStreamError::MismatchedProtocolVersion(GotExpected {
got: their_hello.protocol_version,
expected: hello.protocol_version,
}))
}
// determine shared capabilities (currently returns only one capability)
let capability_res =
SharedCapabilities::try_new(hello.protocols, their_hello.capabilities.clone());
let shared_capability = match capability_res {
Err(err) => {
// we don't share any capabilities, send a disconnect message
self.send_disconnect(DisconnectReason::UselessPeer).await?;
Err(err)
}
Ok(cap) => Ok(cap),
}?;
let stream = P2PStream::new(self.inner, shared_capability);
Ok((stream, their_hello))
}
}
impl<S> UnauthedP2PStream<S>
where
S: Sink<Bytes, Error = io::Error> + Unpin,
{
/// Send a disconnect message during the handshake. This is sent without snappy compression.
pub async fn send_disconnect(
&mut self,
reason: DisconnectReason,
) -> Result<(), P2PStreamError> {
trace!(
%reason,
"Sending disconnect message during the handshake",
);
self.inner
.send(Bytes::from(alloy_rlp::encode(P2PMessage::Disconnect(reason))))
.await
.map_err(P2PStreamError::Io)
}
}
impl<S> CanDisconnect<Bytes> for P2PStream<S>
where
S: Sink<Bytes, Error = io::Error> + Unpin + Send + Sync,
{
fn disconnect(
&mut self,
reason: DisconnectReason,
) -> Pin<Box<dyn Future<Output = Result<(), P2PStreamError>> + Send + '_>> {
Box::pin(async move { self.disconnect(reason).await })
}
}
/// A `P2PStream` wraps over any `Stream` that yields bytes and makes it compatible with `p2p`
/// protocol messages.
///
/// This stream supports multiple shared capabilities, that were negotiated during the handshake.
///
/// ### Message-ID based multiplexing
///
/// > Each capability is given as much of the message-ID space as it needs. All such capabilities
/// > must statically specify how many message IDs they require. On connection and reception of the
/// > Hello message, both peers have equivalent information about what capabilities they share
/// > (including versions) and are able to form consensus over the composition of message ID space.
///
/// > Message IDs are assumed to be compact from ID 0x10 onwards (0x00-0x0f is reserved for the
/// > "p2p" capability) and given to each shared (equal-version, equal-name) capability in
/// > alphabetic order. Capability names are case-sensitive. Capabilities which are not shared are
/// > ignored. If multiple versions are shared of the same (equal name) capability, the numerically
/// > highest wins, others are ignored.
///
/// See also <https://github.com/ethereum/devp2p/blob/master/rlpx.md#message-id-based-multiplexing>
///
/// This stream emits _non-empty_ Bytes that start with the normalized message id, so that the first
/// byte of each message starts from 0. If this stream only supports a single capability, for
/// example `eth` then the first byte of each message will match
/// [EthMessageID](reth_eth_wire_types::message::EthMessageID).
#[pin_project]
#[derive(Debug)]
pub struct P2PStream<S> {
#[pin]
inner: S,
/// The snappy encoder used for compressing outgoing messages
encoder: snap::raw::Encoder,
/// The snappy decoder used for decompressing incoming messages
decoder: snap::raw::Decoder,
/// The state machine used for keeping track of the peer's ping status.
pinger: Pinger,
/// The supported capability for this stream.
shared_capabilities: SharedCapabilities,
/// Outgoing messages buffered for sending to the underlying stream.
outgoing_messages: VecDeque<Bytes>,
/// Maximum number of messages that we can buffer here before the [Sink] impl returns
/// [`Poll::Pending`].
outgoing_message_buffer_capacity: usize,
/// Whether this stream is currently in the process of disconnecting by sending a disconnect
/// message.
disconnecting: bool,
}
impl<S> P2PStream<S> {
/// Create a new [`P2PStream`] from the provided stream.
/// New [`P2PStream`]s are assumed to have completed the `p2p` handshake successfully and are
/// ready to send and receive subprotocol messages.
pub fn new(inner: S, shared_capabilities: SharedCapabilities) -> Self {
Self {
inner,
encoder: snap::raw::Encoder::new(),
decoder: snap::raw::Decoder::new(),
pinger: Pinger::new(PING_INTERVAL, PING_TIMEOUT),
shared_capabilities,
outgoing_messages: VecDeque::new(),
outgoing_message_buffer_capacity: MAX_P2P_CAPACITY,
disconnecting: false,
}
}
/// Returns a reference to the inner stream.
pub const fn inner(&self) -> &S {
&self.inner
}
/// Sets a custom outgoing message buffer capacity.
///
/// # Panics
///
/// If the provided capacity is `0`.
pub const fn set_outgoing_message_buffer_capacity(&mut self, capacity: usize) {
self.outgoing_message_buffer_capacity = capacity;
}
/// Returns the shared capabilities for this stream.
///
/// This includes all the shared capabilities that were negotiated during the handshake and
/// their offsets based on the number of messages of each capability.
pub const fn shared_capabilities(&self) -> &SharedCapabilities {
&self.shared_capabilities
}
/// Returns `true` if the stream has outgoing capacity.
fn has_outgoing_capacity(&self) -> bool {
self.outgoing_messages.len() < self.outgoing_message_buffer_capacity
}
/// Queues in a _snappy_ encoded [`P2PMessage::Pong`] message.
fn send_pong(&mut self) {
self.outgoing_messages.push_back(Bytes::from(alloy_rlp::encode(P2PMessage::Pong)));
}
/// Queues in a _snappy_ encoded [`P2PMessage::Ping`] message.
pub fn send_ping(&mut self) {
self.outgoing_messages.push_back(Bytes::from(alloy_rlp::encode(P2PMessage::Ping)));
}
}
/// Gracefully disconnects the connection by sending a disconnect message and stop reading new
/// messages.
pub trait DisconnectP2P {
/// Starts to gracefully disconnect.
fn start_disconnect(&mut self, reason: DisconnectReason) -> Result<(), P2PStreamError>;
/// Returns `true` if the connection is about to disconnect.
fn is_disconnecting(&self) -> bool;
}
impl<S> DisconnectP2P for P2PStream<S> {
/// Starts to gracefully disconnect the connection by sending a Disconnect message and stop
/// reading new messages.
///
/// Once disconnect process has started, the [`Stream`] will terminate immediately.
///
/// # Errors
///
/// Returns an error only if the message fails to compress.
fn start_disconnect(&mut self, reason: DisconnectReason) -> Result<(), P2PStreamError> {
// clear any buffered messages and queue in
self.outgoing_messages.clear();
let disconnect = P2PMessage::Disconnect(reason);
let mut buf = Vec::with_capacity(disconnect.length());
disconnect.encode(&mut buf);
let mut compressed = vec![0u8; 1 + snap::raw::max_compress_len(buf.len() - 1)];
let compressed_size =
self.encoder.compress(&buf[1..], &mut compressed[1..]).map_err(|err| {
debug!(
%err,
msg=%hex::encode(&buf[1..]),
"error compressing disconnect"
);
err
})?;
// truncate the compressed buffer to the actual compressed size (plus one for the message
// id)
compressed.truncate(compressed_size + 1);
// we do not add the capability offset because the disconnect message is a `p2p` reserved
// message
compressed[0] = buf[0];
self.outgoing_messages.push_back(compressed.into());
self.disconnecting = true;
Ok(())
}
fn is_disconnecting(&self) -> bool {
self.disconnecting
}
}
impl<S> P2PStream<S>
where
S: Sink<Bytes, Error = io::Error> + Unpin + Send,
{
/// Disconnects the connection by sending a disconnect message.
///
/// This future resolves once the disconnect message has been sent and the stream has been
/// closed.
pub async fn disconnect(&mut self, reason: DisconnectReason) -> Result<(), P2PStreamError> {
self.start_disconnect(reason)?;
self.close().await
}
}
// S must also be `Sink` because we need to be able to respond with ping messages to follow the
// protocol
impl<S> Stream for P2PStream<S>
where
S: Stream<Item = io::Result<BytesMut>> + Sink<Bytes, Error = io::Error> + Unpin,
{
type Item = Result<BytesMut, P2PStreamError>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = self.get_mut();
if this.disconnecting {
// if disconnecting, stop reading messages
return Poll::Ready(None)
}
// we should loop here to ensure we don't return Poll::Pending if we have a message to
// return behind any pings we need to respond to
while let Poll::Ready(res) = this.inner.poll_next_unpin(cx) {
let bytes = match res {
Some(Ok(bytes)) => bytes,
Some(Err(err)) => return Poll::Ready(Some(Err(err.into()))),
None => return Poll::Ready(None),
};
if bytes.is_empty() {
// empty messages are not allowed
return Poll::Ready(Some(Err(P2PStreamError::EmptyProtocolMessage)))
}
// first decode disconnect reasons, because they can be encoded in a variety of forms
// over the wire, in both snappy compressed and uncompressed forms.
//
// see: [crate::disconnect::tests::test_decode_known_reasons]
let id = bytes[0];
if id == P2PMessageID::Disconnect as u8 {
// We can't handle the error here because disconnect reasons are encoded as both:
// * snappy compressed, AND
// * uncompressed
// over the network.
//
// If the decoding succeeds, we already checked the id and know this is a
// disconnect message, so we can return with the reason.
//
// If the decoding fails, we continue, and will attempt to decode it again if the
// message is snappy compressed. Failure handling in that step is the primary point
// where an error is returned if the disconnect reason is malformed.
if let Ok(reason) = DisconnectReason::decode(&mut &bytes[1..]) {
return Poll::Ready(Some(Err(P2PStreamError::Disconnected(reason))))
}
}
// first check that the compressed message length does not exceed the max
// payload size
let decompressed_len = snap::raw::decompress_len(&bytes[1..])?;
if decompressed_len > MAX_PAYLOAD_SIZE {
return Poll::Ready(Some(Err(P2PStreamError::MessageTooBig {
message_size: decompressed_len,
max_size: MAX_PAYLOAD_SIZE,
})))
}
// create a buffer to hold the decompressed message, adding a byte to the length for
// the message ID byte, which is the first byte in this buffer
let mut decompress_buf = BytesMut::zeroed(decompressed_len + 1);
// each message following a successful handshake is compressed with snappy, so we need
// to decompress the message before we can decode it.
this.decoder.decompress(&bytes[1..], &mut decompress_buf[1..]).map_err(|err| {
debug!(
%err,
msg=%hex::encode(&bytes[1..]),
"error decompressing p2p message"
);
err
})?;
match id {
_ if id == P2PMessageID::Ping as u8 => {
trace!("Received Ping, Sending Pong");
this.send_pong();
// This is required because the `Sink` may not be polled externally, and if
// that happens, the pong will never be sent.
cx.waker().wake_by_ref();
}
_ if id == P2PMessageID::Hello as u8 => {
// we have received a hello message outside of the handshake, so we will return
// an error
return Poll::Ready(Some(Err(P2PStreamError::HandshakeError(
P2PHandshakeError::HelloNotInHandshake,
))))
}
_ if id == P2PMessageID::Pong as u8 => {
// if we were waiting for a pong, this will reset the pinger state
this.pinger.on_pong()?
}
_ if id == P2PMessageID::Disconnect as u8 => {
// At this point, the `decompress_buf` contains the snappy decompressed
// disconnect message.
//
// It's possible we already tried to RLP decode this, but it was snappy
// compressed, so we need to RLP decode it again.
let reason = DisconnectReason::decode(&mut &decompress_buf[1..]).inspect_err(|err| {
debug!(
%err, msg=%hex::encode(&decompress_buf[1..]), "Failed to decode disconnect message from peer"
);
})?;
return Poll::Ready(Some(Err(P2PStreamError::Disconnected(reason))))
}
_ if id > MAX_P2P_MESSAGE_ID && id <= MAX_RESERVED_MESSAGE_ID => {
// we have received an unknown reserved message
return Poll::Ready(Some(Err(P2PStreamError::UnknownReservedMessageId(id))))
}
_ => {
// we have received a message that is outside the `p2p` reserved message space,
// so it is a subprotocol message.
// Peers must be able to identify messages meant for different subprotocols
// using a single message ID byte, and those messages must be distinct from the
// lower-level `p2p` messages.
//
// To ensure that messages for subprotocols are distinct from messages meant
// for the `p2p` capability, message IDs 0x00 - 0x0f are reserved for `p2p`
// messages, so subprotocol messages must have an ID of 0x10 or higher.
//
// To ensure that messages for two different capabilities are distinct from
// each other, all shared capabilities are first ordered lexicographically.
// Message IDs are then reserved in this order, starting at 0x10, reserving a
// message ID for each message the capability supports.
//
// For example, if the shared capabilities are `eth/67` (containing 10
// messages), and "qrs/65" (containing 8 messages):
//
// * The special case of `p2p`: `p2p` is reserved message IDs 0x00 - 0x0f.
// * `eth/67` is reserved message IDs 0x10 - 0x19.
// * `qrs/65` is reserved message IDs 0x1a - 0x21.
//
decompress_buf[0] = bytes[0] - MAX_RESERVED_MESSAGE_ID - 1;
return Poll::Ready(Some(Ok(decompress_buf)))
}
}
}
Poll::Pending
}
}
impl<S> Sink<Bytes> for P2PStream<S>
where
S: Sink<Bytes, Error = io::Error> + Unpin,
{
type Error = P2PStreamError;
fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
let mut this = self.as_mut();
// poll the pinger to determine if we should send a ping
match this.pinger.poll_ping(cx) {
Poll::Pending => {}
Poll::Ready(Ok(PingerEvent::Ping)) => {
this.send_ping();
}
_ => {
// encode the disconnect message
this.start_disconnect(DisconnectReason::PingTimeout)?;
// End the stream after ping related error
return Poll::Ready(Ok(()))
}
}
match this.inner.poll_ready_unpin(cx) {
Poll::Pending => {}
Poll::Ready(Err(err)) => return Poll::Ready(Err(P2PStreamError::Io(err))),
Poll::Ready(Ok(())) => {
let flushed = this.poll_flush(cx);
if flushed.is_ready() {
return flushed
}
}
}
if self.has_outgoing_capacity() {
// still has capacity
Poll::Ready(Ok(()))
} else {
Poll::Pending
}
}
fn start_send(self: Pin<&mut Self>, item: Bytes) -> Result<(), Self::Error> {
if item.len() > MAX_PAYLOAD_SIZE {
return Err(P2PStreamError::MessageTooBig {
message_size: item.len(),
max_size: MAX_PAYLOAD_SIZE,
})
}
if item.is_empty() {
// empty messages are not allowed
return Err(P2PStreamError::EmptyProtocolMessage)
}
// ensure we have free capacity
if !self.has_outgoing_capacity() {
return Err(P2PStreamError::SendBufferFull)
}
let this = self.project();
let mut compressed = BytesMut::zeroed(1 + snap::raw::max_compress_len(item.len() - 1));
let compressed_size =
this.encoder.compress(&item[1..], &mut compressed[1..]).map_err(|err| {
debug!(
%err,
msg=%hex::encode(&item[1..]),
"error compressing p2p message"
);
err
})?;
// truncate the compressed buffer to the actual compressed size (plus one for the message
// id)
compressed.truncate(compressed_size + 1);
// all messages sent in this stream are subprotocol messages, so we need to switch the
// message id based on the offset
compressed[0] = item[0] + MAX_RESERVED_MESSAGE_ID + 1;
this.outgoing_messages.push_back(compressed.freeze());
Ok(())
}
/// Returns `Poll::Ready(Ok(()))` when no buffered items remain.
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
let mut this = self.project();
let poll_res = loop {
match this.inner.as_mut().poll_ready(cx) {
Poll::Pending => break Poll::Pending,
Poll::Ready(Err(err)) => break Poll::Ready(Err(err.into())),
Poll::Ready(Ok(())) => {
let Some(message) = this.outgoing_messages.pop_front() else {
break Poll::Ready(Ok(()))
};
if let Err(err) = this.inner.as_mut().start_send(message) {
break Poll::Ready(Err(err.into()))
}
}
}
};
ready!(this.inner.as_mut().poll_flush(cx))?;
poll_res
}
fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
ready!(self.as_mut().poll_flush(cx))?;
ready!(self.project().inner.poll_close(cx))?;
Poll::Ready(Ok(()))
}
}
/// This represents only the reserved `p2p` subprotocol messages.
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))]
#[add_arbitrary_tests(rlp)]
pub enum P2PMessage {
/// The first packet sent over the connection, and sent once by both sides.
Hello(HelloMessage),
/// Inform the peer that a disconnection is imminent; if received, a peer should disconnect
/// immediately.
Disconnect(DisconnectReason),
/// Requests an immediate reply of [`P2PMessage::Pong`] from the peer.
Ping,
/// Reply to the peer's [`P2PMessage::Ping`] packet.
Pong,
}
impl P2PMessage {
/// Gets the [`P2PMessageID`] for the given message.
pub const fn message_id(&self) -> P2PMessageID {
match self {
Self::Hello(_) => P2PMessageID::Hello,
Self::Disconnect(_) => P2PMessageID::Disconnect,
Self::Ping => P2PMessageID::Ping,
Self::Pong => P2PMessageID::Pong,
}
}
}
impl Encodable for P2PMessage {
/// The [`Encodable`] implementation for [`P2PMessage::Ping`] and [`P2PMessage::Pong`] encodes
/// the message as RLP, and prepends a snappy header to the RLP bytes for all variants except
/// the [`P2PMessage::Hello`] variant, because the hello message is never compressed in the
/// `p2p` subprotocol.
fn encode(&self, out: &mut dyn BufMut) {
(self.message_id() as u8).encode(out);
match self {
Self::Hello(msg) => msg.encode(out),
Self::Disconnect(msg) => msg.encode(out),
Self::Ping => {
// Ping payload is _always_ snappy encoded
out.put_u8(0x01);
out.put_u8(0x00);
out.put_u8(EMPTY_LIST_CODE);
}
Self::Pong => {
// Pong payload is _always_ snappy encoded
out.put_u8(0x01);
out.put_u8(0x00);
out.put_u8(EMPTY_LIST_CODE);
}
}
}
fn length(&self) -> usize {
let payload_len = match self {
Self::Hello(msg) => msg.length(),
Self::Disconnect(msg) => msg.length(),
// id + snappy encoded payload
Self::Ping | Self::Pong => 3, // len([0x01, 0x00, 0xc0]) = 3
};
payload_len + 1 // (1 for length of p2p message id)
}
}
impl Decodable for P2PMessage {
/// The [`Decodable`] implementation for [`P2PMessage`] assumes that each of the message
/// variants are snappy compressed, except for the [`P2PMessage::Hello`] variant since the
/// hello message is never compressed in the `p2p` subprotocol.
///
/// The [`Decodable`] implementation for [`P2PMessage::Ping`] and [`P2PMessage::Pong`] expects
/// a snappy encoded payload, see [`Encodable`] implementation.
fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
/// Removes the snappy prefix from the Ping/Pong buffer
fn advance_snappy_ping_pong_payload(buf: &mut &[u8]) -> alloy_rlp::Result<()> {
if buf.len() < 3 {
return Err(RlpError::InputTooShort)
}
if buf[..3] != [0x01, 0x00, EMPTY_LIST_CODE] {
return Err(RlpError::Custom("expected snappy payload"))
}
buf.advance(3);
Ok(())
}
let message_id = u8::decode(&mut &buf[..])?;
let id = P2PMessageID::try_from(message_id)
.or(Err(RlpError::Custom("unknown p2p message id")))?;
buf.advance(1);
match id {
P2PMessageID::Hello => Ok(Self::Hello(HelloMessage::decode(buf)?)),
P2PMessageID::Disconnect => Ok(Self::Disconnect(DisconnectReason::decode(buf)?)),
P2PMessageID::Ping => {
advance_snappy_ping_pong_payload(buf)?;
Ok(Self::Ping)
}
P2PMessageID::Pong => {
advance_snappy_ping_pong_payload(buf)?;
Ok(Self::Pong)
}
}
}
}
/// Message IDs for `p2p` subprotocol messages.
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum P2PMessageID {
/// Message ID for the [`P2PMessage::Hello`] message.
Hello = 0x00,
/// Message ID for the [`P2PMessage::Disconnect`] message.
Disconnect = 0x01,
/// Message ID for the [`P2PMessage::Ping`] message.
Ping = 0x02,
/// Message ID for the [`P2PMessage::Pong`] message.
Pong = 0x03,
}
impl From<P2PMessage> for P2PMessageID {
fn from(msg: P2PMessage) -> Self {
match msg {
P2PMessage::Hello(_) => Self::Hello,
P2PMessage::Disconnect(_) => Self::Disconnect,
P2PMessage::Ping => Self::Ping,
P2PMessage::Pong => Self::Pong,
}
}
}
impl TryFrom<u8> for P2PMessageID {
type Error = P2PStreamError;
fn try_from(id: u8) -> Result<Self, Self::Error> {
match id {
0x00 => Ok(Self::Hello),
0x01 => Ok(Self::Disconnect),
0x02 => Ok(Self::Ping),
0x03 => Ok(Self::Pong),
_ => Err(P2PStreamError::UnknownReservedMessageId(id)),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{capability::SharedCapability, test_utils::eth_hello, EthVersion, ProtocolVersion};
use tokio::net::{TcpListener, TcpStream};
use tokio_util::codec::Decoder;
#[tokio::test]
async fn test_can_disconnect() {
reth_tracing::init_test_tracing();
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let local_addr = listener.local_addr().unwrap();
let expected_disconnect = DisconnectReason::UselessPeer;
let handle = tokio::spawn(async move {
// roughly based off of the design of tokio::net::TcpListener
let (incoming, _) = listener.accept().await.unwrap();
let stream = crate::PassthroughCodec::default().framed(incoming);
let (server_hello, _) = eth_hello();
let (mut p2p_stream, _) =
UnauthedP2PStream::new(stream).handshake(server_hello).await.unwrap();
p2p_stream.disconnect(expected_disconnect).await.unwrap();
});
let outgoing = TcpStream::connect(local_addr).await.unwrap();
let sink = crate::PassthroughCodec::default().framed(outgoing);
let (client_hello, _) = eth_hello();
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | true |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/eth-wire/src/handshake.rs | crates/net/eth-wire/src/handshake.rs | use crate::{
errors::{EthHandshakeError, EthStreamError, P2PStreamError},
ethstream::MAX_STATUS_SIZE,
CanDisconnect,
};
use bytes::{Bytes, BytesMut};
use futures::{Sink, SinkExt, Stream};
use reth_eth_wire_types::{
DisconnectReason, EthMessage, EthNetworkPrimitives, ProtocolMessage, StatusMessage,
UnifiedStatus,
};
use reth_ethereum_forks::ForkFilter;
use reth_primitives_traits::GotExpected;
use std::{fmt::Debug, future::Future, pin::Pin, time::Duration};
use tokio::time::timeout;
use tokio_stream::StreamExt;
use tracing::{debug, trace};
/// A trait that knows how to perform the P2P handshake.
pub trait EthRlpxHandshake: Debug + Send + Sync + 'static {
/// Perform the P2P handshake for the `eth` protocol.
fn handshake<'a>(
&'a self,
unauth: &'a mut dyn UnauthEth,
status: UnifiedStatus,
fork_filter: ForkFilter,
timeout_limit: Duration,
) -> Pin<Box<dyn Future<Output = Result<UnifiedStatus, EthStreamError>> + 'a + Send>>;
}
/// An unauthenticated stream that can send and receive messages.
pub trait UnauthEth:
Stream<Item = Result<BytesMut, P2PStreamError>>
+ Sink<Bytes, Error = P2PStreamError>
+ CanDisconnect<Bytes>
+ Unpin
+ Send
{
}
impl<T> UnauthEth for T where
T: Stream<Item = Result<BytesMut, P2PStreamError>>
+ Sink<Bytes, Error = P2PStreamError>
+ CanDisconnect<Bytes>
+ Unpin
+ Send
{
}
/// The Ethereum P2P handshake.
///
/// This performs the regular ethereum `eth` rlpx handshake.
#[derive(Debug, Default, Clone)]
#[non_exhaustive]
pub struct EthHandshake;
impl EthRlpxHandshake for EthHandshake {
fn handshake<'a>(
&'a self,
unauth: &'a mut dyn UnauthEth,
status: UnifiedStatus,
fork_filter: ForkFilter,
timeout_limit: Duration,
) -> Pin<Box<dyn Future<Output = Result<UnifiedStatus, EthStreamError>> + 'a + Send>> {
Box::pin(async move {
timeout(timeout_limit, EthereumEthHandshake(unauth).eth_handshake(status, fork_filter))
.await
.map_err(|_| EthStreamError::StreamTimeout)?
})
}
}
/// A type that performs the ethereum specific `eth` protocol handshake.
#[derive(Debug)]
pub struct EthereumEthHandshake<'a, S: ?Sized>(pub &'a mut S);
impl<S: ?Sized, E> EthereumEthHandshake<'_, S>
where
S: Stream<Item = Result<BytesMut, E>> + CanDisconnect<Bytes> + Send + Unpin,
EthStreamError: From<E> + From<<S as Sink<Bytes>>::Error>,
{
/// Performs the `eth` rlpx protocol handshake using the given input stream.
pub async fn eth_handshake(
self,
unified_status: UnifiedStatus,
fork_filter: ForkFilter,
) -> Result<UnifiedStatus, EthStreamError> {
let unauth = self.0;
let status = unified_status.into_message();
// Send our status message
let status_msg = alloy_rlp::encode(ProtocolMessage::<EthNetworkPrimitives>::from(
EthMessage::Status(status),
))
.into();
unauth.send(status_msg).await.map_err(EthStreamError::from)?;
// Receive peer's response
let their_msg_res = unauth.next().await;
let their_msg = match their_msg_res {
Some(Ok(msg)) => msg,
Some(Err(e)) => return Err(EthStreamError::from(e)),
None => {
unauth
.disconnect(DisconnectReason::DisconnectRequested)
.await
.map_err(EthStreamError::from)?;
return Err(EthStreamError::EthHandshakeError(EthHandshakeError::NoResponse));
}
};
if their_msg.len() > MAX_STATUS_SIZE {
unauth
.disconnect(DisconnectReason::ProtocolBreach)
.await
.map_err(EthStreamError::from)?;
return Err(EthStreamError::MessageTooBig(their_msg.len()));
}
let version = status.version();
let msg = match ProtocolMessage::<EthNetworkPrimitives>::decode_message(
version,
&mut their_msg.as_ref(),
) {
Ok(m) => m,
Err(err) => {
debug!("decode error in eth handshake: msg={their_msg:x}");
unauth
.disconnect(DisconnectReason::DisconnectRequested)
.await
.map_err(EthStreamError::from)?;
return Err(EthStreamError::InvalidMessage(err));
}
};
// Validate peer response
match msg.message {
EthMessage::Status(their_status_message) => {
trace!("Validating incoming ETH status from peer");
if status.genesis() != their_status_message.genesis() {
unauth
.disconnect(DisconnectReason::ProtocolBreach)
.await
.map_err(EthStreamError::from)?;
return Err(EthHandshakeError::MismatchedGenesis(
GotExpected {
expected: status.genesis(),
got: their_status_message.genesis(),
}
.into(),
)
.into());
}
if status.version() != their_status_message.version() {
unauth
.disconnect(DisconnectReason::ProtocolBreach)
.await
.map_err(EthStreamError::from)?;
return Err(EthHandshakeError::MismatchedProtocolVersion(GotExpected {
got: their_status_message.version(),
expected: status.version(),
})
.into());
}
if *status.chain() != *their_status_message.chain() {
unauth
.disconnect(DisconnectReason::ProtocolBreach)
.await
.map_err(EthStreamError::from)?;
return Err(EthHandshakeError::MismatchedChain(GotExpected {
got: *their_status_message.chain(),
expected: *status.chain(),
})
.into());
}
// Ensure total difficulty is reasonable
if let StatusMessage::Legacy(s) = status {
if s.total_difficulty.bit_len() > 160 {
unauth
.disconnect(DisconnectReason::ProtocolBreach)
.await
.map_err(EthStreamError::from)?;
return Err(EthHandshakeError::TotalDifficultyBitLenTooLarge {
got: s.total_difficulty.bit_len(),
maximum: 160,
}
.into());
}
}
// Fork validation
if let Err(err) = fork_filter
.validate(their_status_message.forkid())
.map_err(EthHandshakeError::InvalidFork)
{
unauth
.disconnect(DisconnectReason::ProtocolBreach)
.await
.map_err(EthStreamError::from)?;
return Err(err.into());
}
Ok(UnifiedStatus::from_message(their_status_message))
}
_ => {
unauth
.disconnect(DisconnectReason::ProtocolBreach)
.await
.map_err(EthStreamError::from)?;
Err(EthStreamError::EthHandshakeError(
EthHandshakeError::NonStatusMessageInHandshake,
))
}
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/eth-wire/src/pinger.rs | crates/net/eth-wire/src/pinger.rs | use crate::errors::PingerError;
use std::{
pin::Pin,
task::{Context, Poll},
time::Duration,
};
use tokio::time::{Instant, Interval, Sleep};
use tokio_stream::Stream;
/// The pinger is a state machine that is created with a maximum number of pongs that can be
/// missed.
#[derive(Debug)]
pub(crate) struct Pinger {
/// The timer used for the next ping.
ping_interval: Interval,
/// The timer used for the next ping.
timeout_timer: Pin<Box<Sleep>>,
/// The timeout duration for each ping.
timeout: Duration,
/// Keeps track of the state
state: PingState,
}
// === impl Pinger ===
impl Pinger {
/// Creates a new [`Pinger`] with the given ping interval duration,
/// and timeout duration.
pub(crate) fn new(ping_interval: Duration, timeout_duration: Duration) -> Self {
let now = Instant::now();
let timeout_timer = tokio::time::sleep(timeout_duration);
Self {
state: PingState::Ready,
ping_interval: tokio::time::interval_at(now + ping_interval, ping_interval),
timeout_timer: Box::pin(timeout_timer),
timeout: timeout_duration,
}
}
/// Mark a pong as received, and transition the pinger to the `Ready` state if it was in the
/// `WaitingForPong` state. Unsets the sleep timer.
pub(crate) fn on_pong(&mut self) -> Result<(), PingerError> {
match self.state {
PingState::Ready => Err(PingerError::UnexpectedPong),
PingState::WaitingForPong => {
self.state = PingState::Ready;
self.ping_interval.reset();
Ok(())
}
PingState::TimedOut => {
// if we receive a pong after timeout then we also reset the state, since the
// connection was kept alive after timeout
self.state = PingState::Ready;
self.ping_interval.reset();
Ok(())
}
}
}
/// Returns the current state of the pinger.
pub(crate) const fn state(&self) -> PingState {
self.state
}
/// Polls the state of the pinger and returns whether a new ping needs to be sent or if a
/// previous ping timed out.
pub(crate) fn poll_ping(
&mut self,
cx: &mut Context<'_>,
) -> Poll<Result<PingerEvent, PingerError>> {
match self.state() {
PingState::Ready => {
if self.ping_interval.poll_tick(cx).is_ready() {
self.timeout_timer.as_mut().reset(Instant::now() + self.timeout);
self.state = PingState::WaitingForPong;
return Poll::Ready(Ok(PingerEvent::Ping))
}
}
PingState::WaitingForPong => {
if self.timeout_timer.is_elapsed() {
self.state = PingState::TimedOut;
return Poll::Ready(Ok(PingerEvent::Timeout))
}
}
PingState::TimedOut => {
// we treat continuous calls while in timeout as pending, since the connection is
// not yet terminated
return Poll::Pending
}
};
Poll::Pending
}
}
impl Stream for Pinger {
type Item = Result<PingerEvent, PingerError>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
self.get_mut().poll_ping(cx).map(Some)
}
}
/// This represents the possible states of the pinger.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub(crate) enum PingState {
/// There are no pings in flight, or all pings have been responded to, and we are ready to send
/// a ping at a later point.
Ready,
/// We have sent a ping and are waiting for a pong, but the peer has missed n pongs.
WaitingForPong,
/// The peer has failed to respond to a ping.
TimedOut,
}
/// The element type produced by a [`Pinger`], representing either a new
/// [`Ping`](super::P2PMessage::Ping)
/// message to send, or an indication that the peer should be timed out.
#[derive(Debug, Clone, PartialEq, Eq)]
pub(crate) enum PingerEvent {
/// A new [`Ping`](super::P2PMessage::Ping) message should be sent.
Ping,
/// The peer should be timed out.
Timeout,
}
#[cfg(test)]
mod tests {
use super::*;
use futures::StreamExt;
#[tokio::test]
async fn test_ping_timeout() {
let interval = Duration::from_millis(300);
// we should wait for the interval to elapse and receive a pong before the timeout elapses
let mut pinger = Pinger::new(interval, Duration::from_millis(20));
assert_eq!(pinger.next().await.unwrap().unwrap(), PingerEvent::Ping);
pinger.on_pong().unwrap();
assert_eq!(pinger.next().await.unwrap().unwrap(), PingerEvent::Ping);
tokio::time::sleep(interval).await;
assert_eq!(pinger.next().await.unwrap().unwrap(), PingerEvent::Timeout);
pinger.on_pong().unwrap();
assert_eq!(pinger.next().await.unwrap().unwrap(), PingerEvent::Ping);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/eth-wire/src/protocol.rs | crates/net/eth-wire/src/protocol.rs | //! A Protocol defines a P2P subprotocol in an `RLPx` connection
use crate::{Capability, EthMessageID, EthVersion};
/// Type that represents a [Capability] and the number of messages it uses.
///
/// Only the [Capability] is shared with the remote peer, assuming both parties know the number of
/// messages used by the protocol which is used for message ID multiplexing.
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct Protocol {
/// The name of the subprotocol
pub cap: Capability,
/// The number of messages used/reserved by this protocol
///
/// This is used for message ID multiplexing
messages: u8,
}
impl Protocol {
/// Create a new protocol with the given name and number of messages
pub const fn new(cap: Capability, messages: u8) -> Self {
Self { cap, messages }
}
/// Returns the corresponding eth capability for the given version.
pub const fn eth(version: EthVersion) -> Self {
let cap = Capability::eth(version);
let messages = EthMessageID::message_count(version);
Self::new(cap, messages)
}
/// Returns the [`EthVersion::Eth66`] capability.
pub const fn eth_66() -> Self {
Self::eth(EthVersion::Eth66)
}
/// Returns the [`EthVersion::Eth67`] capability.
pub const fn eth_67() -> Self {
Self::eth(EthVersion::Eth67)
}
/// Returns the [`EthVersion::Eth68`] capability.
pub const fn eth_68() -> Self {
Self::eth(EthVersion::Eth68)
}
/// Consumes the type and returns a tuple of the [Capability] and number of messages.
#[inline]
pub(crate) fn split(self) -> (Capability, u8) {
(self.cap, self.messages)
}
/// The number of values needed to represent all message IDs of capability.
pub const fn messages(&self) -> u8 {
self.messages
}
}
impl From<EthVersion> for Protocol {
fn from(version: EthVersion) -> Self {
Self::eth(version)
}
}
/// A helper type to keep track of the protocol version and number of messages used by the protocol.
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub(crate) struct ProtoVersion {
/// Number of messages for a protocol
pub(crate) messages: u8,
/// Version of the protocol
pub(crate) version: usize,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_protocol_eth_message_count() {
// Test that Protocol::eth() returns correct message counts for each version
// This ensures that EthMessageID::message_count() produces the expected results
assert_eq!(Protocol::eth(EthVersion::Eth66).messages(), 17);
assert_eq!(Protocol::eth(EthVersion::Eth67).messages(), 17);
assert_eq!(Protocol::eth(EthVersion::Eth68).messages(), 17);
assert_eq!(Protocol::eth(EthVersion::Eth69).messages(), 18);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/eth-wire/src/capability.rs | crates/net/eth-wire/src/capability.rs | //! All capability related types
use crate::{
errors::{P2PHandshakeError, P2PStreamError},
p2pstream::MAX_RESERVED_MESSAGE_ID,
protocol::{ProtoVersion, Protocol},
version::ParseVersionError,
Capability, EthMessageID, EthVersion,
};
use derive_more::{Deref, DerefMut};
use std::{
borrow::Cow,
collections::{BTreeSet, HashMap},
};
/// This represents a shared capability, its version, and its message id offset.
///
/// The [offset](SharedCapability::message_id_offset) is the message ID offset for this shared
/// capability, determined during the rlpx handshake.
///
/// See also [Message-id based multiplexing](https://github.com/ethereum/devp2p/blob/master/rlpx.md#message-id-based-multiplexing)
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum SharedCapability {
/// The `eth` capability.
Eth {
/// (Highest) negotiated version of the eth capability.
version: EthVersion,
/// The message ID offset for this capability.
///
/// This represents the message ID offset for the first message of the eth capability in
/// the message id space.
offset: u8,
},
/// Any other unknown capability.
UnknownCapability {
/// Shared capability.
cap: Capability,
/// The message ID offset for this capability.
///
/// This represents the message ID offset for the first message of the eth capability in
/// the message id space.
offset: u8,
/// The number of messages of this capability. Needed to calculate range of message IDs in
/// demuxing.
messages: u8,
},
}
impl SharedCapability {
/// Creates a new [`SharedCapability`] based on the given name, offset, version (and messages
/// if the capability is custom).
///
/// Returns an error if the offset is equal or less than [`MAX_RESERVED_MESSAGE_ID`].
pub(crate) fn new(
name: &str,
version: u8,
offset: u8,
messages: u8,
) -> Result<Self, SharedCapabilityError> {
if offset <= MAX_RESERVED_MESSAGE_ID {
return Err(SharedCapabilityError::ReservedMessageIdOffset(offset))
}
match name {
"eth" => Ok(Self::eth(EthVersion::try_from(version)?, offset)),
_ => Ok(Self::UnknownCapability {
cap: Capability::new(name.to_string(), version as usize),
offset,
messages,
}),
}
}
/// Creates a new [`SharedCapability`] based on the given name, offset, and version.
pub(crate) const fn eth(version: EthVersion, offset: u8) -> Self {
Self::Eth { version, offset }
}
/// Returns the capability.
pub const fn capability(&self) -> Cow<'_, Capability> {
match self {
Self::Eth { version, .. } => Cow::Owned(Capability::eth(*version)),
Self::UnknownCapability { cap, .. } => Cow::Borrowed(cap),
}
}
/// Returns the name of the capability.
#[inline]
pub fn name(&self) -> &str {
match self {
Self::Eth { .. } => "eth",
Self::UnknownCapability { cap, .. } => cap.name.as_ref(),
}
}
/// Returns true if the capability is eth.
#[inline]
pub const fn is_eth(&self) -> bool {
matches!(self, Self::Eth { .. })
}
/// Returns the version of the capability.
pub const fn version(&self) -> u8 {
match self {
Self::Eth { version, .. } => *version as u8,
Self::UnknownCapability { cap, .. } => cap.version as u8,
}
}
/// Returns the eth version if it's the `eth` capability.
pub const fn eth_version(&self) -> Option<EthVersion> {
match self {
Self::Eth { version, .. } => Some(*version),
_ => None,
}
}
/// Returns the message ID offset of the current capability.
///
/// This represents the message ID offset for the first message of the eth capability in the
/// message id space.
pub const fn message_id_offset(&self) -> u8 {
match self {
Self::Eth { offset, .. } | Self::UnknownCapability { offset, .. } => *offset,
}
}
/// Returns the message ID offset of the current capability relative to the start of the
/// reserved message id space: [`MAX_RESERVED_MESSAGE_ID`].
pub const fn relative_message_id_offset(&self) -> u8 {
self.message_id_offset() - MAX_RESERVED_MESSAGE_ID - 1
}
/// Returns the number of protocol messages supported by this capability.
pub const fn num_messages(&self) -> u8 {
match self {
Self::Eth { version, .. } => EthMessageID::message_count(*version),
Self::UnknownCapability { messages, .. } => *messages,
}
}
}
/// Non-empty,ordered list of recognized shared capabilities.
///
/// Shared capabilities are ordered alphabetically by case sensitive name.
#[derive(Debug, Clone, Deref, DerefMut, PartialEq, Eq)]
pub struct SharedCapabilities(Vec<SharedCapability>);
impl SharedCapabilities {
/// Merges the local and peer capabilities and returns a new [`SharedCapabilities`] instance.
#[inline]
pub fn try_new(
local_protocols: Vec<Protocol>,
peer_capabilities: Vec<Capability>,
) -> Result<Self, P2PStreamError> {
shared_capability_offsets(local_protocols, peer_capabilities).map(Self)
}
/// Iterates over the shared capabilities.
#[inline]
pub fn iter_caps(&self) -> impl Iterator<Item = &SharedCapability> {
self.0.iter()
}
/// Returns the eth capability if it is shared.
#[inline]
pub fn eth(&self) -> Result<&SharedCapability, P2PStreamError> {
self.iter_caps().find(|c| c.is_eth()).ok_or(P2PStreamError::CapabilityNotShared)
}
/// Returns the negotiated eth version if it is shared.
#[inline]
pub fn eth_version(&self) -> Result<EthVersion, P2PStreamError> {
self.iter_caps()
.find_map(SharedCapability::eth_version)
.ok_or(P2PStreamError::CapabilityNotShared)
}
/// Returns true if the shared capabilities contain the given capability.
#[inline]
pub fn contains(&self, cap: &Capability) -> bool {
self.find(cap).is_some()
}
/// Returns the shared capability for the given capability.
#[inline]
pub fn find(&self, cap: &Capability) -> Option<&SharedCapability> {
self.0.iter().find(|c| c.version() == cap.version as u8 && c.name() == cap.name)
}
/// Returns the matching shared capability for the given capability offset.
///
/// `offset` is the multiplexed message id offset of the capability relative to the reserved
/// message id space. In other words, counting starts at [`MAX_RESERVED_MESSAGE_ID`] + 1, which
/// corresponds to the first non-reserved message id.
///
/// For example: `offset == 0` corresponds to the first shared message across the shared
/// capabilities and will return the first shared capability that supports messages.
#[inline]
pub fn find_by_relative_offset(&self, offset: u8) -> Option<&SharedCapability> {
self.find_by_offset(offset.saturating_add(MAX_RESERVED_MESSAGE_ID + 1))
}
/// Returns the matching shared capability for the given capability offset.
///
/// `offset` is the multiplexed message id offset of the capability that includes the reserved
/// message id space.
///
/// This will always return None if `offset` is less than or equal to
/// [`MAX_RESERVED_MESSAGE_ID`] because the reserved message id space is not shared.
#[inline]
pub fn find_by_offset(&self, offset: u8) -> Option<&SharedCapability> {
let mut iter = self.0.iter();
let mut cap = iter.next()?;
if offset < cap.message_id_offset() {
// reserved message id space
return None
}
for next in iter {
if offset < next.message_id_offset() {
return Some(cap)
}
cap = next
}
Some(cap)
}
/// Returns the shared capability for the given capability or an error if it's not compatible.
#[inline]
pub fn ensure_matching_capability(
&self,
cap: &Capability,
) -> Result<&SharedCapability, UnsupportedCapabilityError> {
self.find(cap).ok_or_else(|| UnsupportedCapabilityError { capability: cap.clone() })
}
/// Returns the number of shared capabilities.
#[inline]
pub const fn len(&self) -> usize {
self.0.len()
}
/// Returns true if there are no shared capabilities.
#[inline]
pub const fn is_empty(&self) -> bool {
self.0.is_empty()
}
}
/// Determines the offsets for each shared capability between the input list of peer
/// capabilities and the input list of locally supported [Protocol].
///
/// Additionally, the `p2p` capability version 5 is supported, but is
/// expected _not_ to be in neither `local_protocols` or `peer_capabilities`.
///
/// **Note**: For `local_protocols` this takes [Protocol] because we need to know the number of
/// messages per versioned capability. From the remote we only get the plain [Capability].
#[inline]
pub fn shared_capability_offsets(
local_protocols: Vec<Protocol>,
peer_capabilities: Vec<Capability>,
) -> Result<Vec<SharedCapability>, P2PStreamError> {
// find intersection of capabilities
let our_capabilities =
local_protocols.into_iter().map(Protocol::split).collect::<HashMap<_, _>>();
// map of capability name to version
let mut shared_capabilities: HashMap<_, ProtoVersion> = HashMap::default();
// The `Ord` implementation for capability names should be equivalent to geth (and every other
// client), since geth uses golang's default string comparison, which orders strings
// lexicographically.
// https://golang.org/pkg/strings/#Compare
//
// This is important because the capability name is used to determine the message id offset, so
// if the sorting is not identical, offsets for connected peers could be inconsistent.
// This would cause the peers to send messages with the wrong message id, which is usually a
// protocol violation.
//
// The `Ord` implementation for `str` orders strings lexicographically.
let mut shared_capability_names = BTreeSet::new();
// find highest shared version of each shared capability
for peer_capability in peer_capabilities {
// if we contain this specific capability both peers share it
if let Some(messages) = our_capabilities.get(&peer_capability).copied() {
// If multiple versions are shared of the same (equal name) capability, the numerically
// highest wins, others are ignored
if shared_capabilities
.get(&peer_capability.name)
.is_none_or(|v| peer_capability.version > v.version)
{
shared_capabilities.insert(
peer_capability.name.clone(),
ProtoVersion { version: peer_capability.version, messages },
);
shared_capability_names.insert(peer_capability.name);
}
}
}
// disconnect if we don't share any capabilities
if shared_capabilities.is_empty() {
return Err(P2PStreamError::HandshakeError(P2PHandshakeError::NoSharedCapabilities))
}
// order versions based on capability name (alphabetical) and select offsets based on
// BASE_OFFSET + prev_total_message
let mut shared_with_offsets = Vec::new();
// Message IDs are assumed to be compact from ID 0x10 onwards (0x00-0x0f is reserved for the
// "p2p" capability) and given to each shared (equal-version, equal-name) capability in
// alphabetic order.
let mut offset = MAX_RESERVED_MESSAGE_ID + 1;
for name in shared_capability_names {
let proto_version = &shared_capabilities[&name];
let shared_capability = SharedCapability::new(
&name,
proto_version.version as u8,
offset,
proto_version.messages,
)?;
offset += shared_capability.num_messages();
shared_with_offsets.push(shared_capability);
}
if shared_with_offsets.is_empty() {
return Err(P2PStreamError::HandshakeError(P2PHandshakeError::NoSharedCapabilities))
}
Ok(shared_with_offsets)
}
/// An error that may occur while creating a [`SharedCapability`].
#[derive(Debug, thiserror::Error)]
pub enum SharedCapabilityError {
/// Unsupported `eth` version.
#[error(transparent)]
UnsupportedVersion(#[from] ParseVersionError),
/// Thrown when the message id for a [`SharedCapability`] overlaps with the reserved p2p
/// message id space [`MAX_RESERVED_MESSAGE_ID`].
#[error("message id offset `{0}` is reserved")]
ReservedMessageIdOffset(u8),
}
/// An error thrown when capabilities mismatch.
#[derive(Debug, thiserror::Error)]
#[error("unsupported capability {capability}")]
pub struct UnsupportedCapabilityError {
capability: Capability,
}
impl UnsupportedCapabilityError {
/// Creates a new error with the given capability
pub const fn new(capability: Capability) -> Self {
Self { capability }
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{Capabilities, Capability};
use alloy_primitives::bytes::Bytes;
use alloy_rlp::{Decodable, Encodable};
use reth_eth_wire_types::RawCapabilityMessage;
#[test]
fn from_eth_68() {
let capability = SharedCapability::new("eth", 68, MAX_RESERVED_MESSAGE_ID + 1, 13).unwrap();
assert_eq!(capability.name(), "eth");
assert_eq!(capability.version(), 68);
assert_eq!(
capability,
SharedCapability::Eth {
version: EthVersion::Eth68,
offset: MAX_RESERVED_MESSAGE_ID + 1
}
);
}
#[test]
fn from_eth_67() {
let capability = SharedCapability::new("eth", 67, MAX_RESERVED_MESSAGE_ID + 1, 13).unwrap();
assert_eq!(capability.name(), "eth");
assert_eq!(capability.version(), 67);
assert_eq!(
capability,
SharedCapability::Eth {
version: EthVersion::Eth67,
offset: MAX_RESERVED_MESSAGE_ID + 1
}
);
}
#[test]
fn from_eth_66() {
let capability = SharedCapability::new("eth", 66, MAX_RESERVED_MESSAGE_ID + 1, 15).unwrap();
assert_eq!(capability.name(), "eth");
assert_eq!(capability.version(), 66);
assert_eq!(
capability,
SharedCapability::Eth {
version: EthVersion::Eth66,
offset: MAX_RESERVED_MESSAGE_ID + 1
}
);
}
#[test]
fn capabilities_supports_eth() {
let capabilities: Capabilities = vec![
Capability::new_static("eth", 66),
Capability::new_static("eth", 67),
Capability::new_static("eth", 68),
]
.into();
assert!(capabilities.supports_eth());
assert!(capabilities.supports_eth_v66());
assert!(capabilities.supports_eth_v67());
assert!(capabilities.supports_eth_v68());
}
#[test]
fn test_peer_capability_version_zero() {
let cap = Capability::new_static("TestName", 0);
let local_capabilities: Vec<Protocol> =
vec![Protocol::new(cap.clone(), 0), EthVersion::Eth67.into(), EthVersion::Eth68.into()];
let peer_capabilities = vec![cap.clone()];
let shared = shared_capability_offsets(local_capabilities, peer_capabilities).unwrap();
assert_eq!(shared.len(), 1);
assert_eq!(shared[0], SharedCapability::UnknownCapability { cap, offset: 16, messages: 0 })
}
#[test]
fn test_peer_lower_capability_version() {
let local_capabilities: Vec<Protocol> =
vec![EthVersion::Eth66.into(), EthVersion::Eth67.into(), EthVersion::Eth68.into()];
let peer_capabilities: Vec<Capability> = vec![EthVersion::Eth66.into()];
let shared_capability =
shared_capability_offsets(local_capabilities, peer_capabilities).unwrap()[0].clone();
assert_eq!(
shared_capability,
SharedCapability::Eth {
version: EthVersion::Eth66,
offset: MAX_RESERVED_MESSAGE_ID + 1
}
)
}
#[test]
fn test_peer_capability_version_too_low() {
let local: Vec<Protocol> = vec![EthVersion::Eth67.into()];
let peer_capabilities: Vec<Capability> = vec![EthVersion::Eth66.into()];
let shared_capability = shared_capability_offsets(local, peer_capabilities);
assert!(matches!(
shared_capability,
Err(P2PStreamError::HandshakeError(P2PHandshakeError::NoSharedCapabilities))
))
}
#[test]
fn test_peer_capability_version_too_high() {
let local_capabilities = vec![EthVersion::Eth66.into()];
let peer_capabilities = vec![EthVersion::Eth67.into()];
let shared_capability = shared_capability_offsets(local_capabilities, peer_capabilities);
assert!(matches!(
shared_capability,
Err(P2PStreamError::HandshakeError(P2PHandshakeError::NoSharedCapabilities))
))
}
#[test]
fn test_find_by_offset() {
let local_capabilities = vec![EthVersion::Eth66.into()];
let peer_capabilities = vec![EthVersion::Eth66.into()];
let shared = SharedCapabilities::try_new(local_capabilities, peer_capabilities).unwrap();
let shared_eth = shared.find_by_relative_offset(0).unwrap();
assert_eq!(shared_eth.name(), "eth");
let shared_eth = shared.find_by_offset(MAX_RESERVED_MESSAGE_ID + 1).unwrap();
assert_eq!(shared_eth.name(), "eth");
// reserved message id space
assert!(shared.find_by_offset(MAX_RESERVED_MESSAGE_ID).is_none());
}
#[test]
fn test_find_by_offset_many() {
let cap = Capability::new_static("aaa", 1);
let proto = Protocol::new(cap.clone(), 5);
let local_capabilities = vec![proto.clone(), EthVersion::Eth66.into()];
let peer_capabilities = vec![cap, EthVersion::Eth66.into()];
let shared = SharedCapabilities::try_new(local_capabilities, peer_capabilities).unwrap();
let shared_eth = shared.find_by_relative_offset(0).unwrap();
assert_eq!(shared_eth.name(), proto.cap.name);
let shared_eth = shared.find_by_offset(MAX_RESERVED_MESSAGE_ID + 1).unwrap();
assert_eq!(shared_eth.name(), proto.cap.name);
// the 5th shared message (0,1,2,3,4) is the last message of the aaa capability
let shared_eth = shared.find_by_relative_offset(4).unwrap();
assert_eq!(shared_eth.name(), proto.cap.name);
let shared_eth = shared.find_by_offset(MAX_RESERVED_MESSAGE_ID + 5).unwrap();
assert_eq!(shared_eth.name(), proto.cap.name);
// the 6th shared message is the first message of the eth capability
let shared_eth = shared.find_by_relative_offset(1 + proto.messages()).unwrap();
assert_eq!(shared_eth.name(), "eth");
}
#[test]
fn test_raw_capability_rlp() {
let msg = RawCapabilityMessage { id: 1, payload: Bytes::from(vec![0x01, 0x02, 0x03]) };
// Encode the message into bytes
let mut encoded = Vec::new();
msg.encode(&mut encoded);
// Decode the bytes back into RawCapabilityMessage
let decoded = RawCapabilityMessage::decode(&mut &encoded[..]).unwrap();
// Verify that the decoded message matches the original
assert_eq!(msg, decoded);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/eth-wire/src/errors/eth.rs | crates/net/eth-wire/src/errors/eth.rs | //! Error handling for (`EthStream`)[`crate::EthStream`]
use crate::{
errors::P2PStreamError, message::MessageError, version::ParseVersionError, DisconnectReason,
};
use alloy_chains::Chain;
use alloy_primitives::B256;
use reth_eth_wire_types::EthVersion;
use reth_ethereum_forks::ValidationError;
use reth_primitives_traits::{GotExpected, GotExpectedBoxed};
use std::io;
/// Errors when sending/receiving messages
#[derive(thiserror::Error, Debug)]
pub enum EthStreamError {
#[error(transparent)]
/// Error of the underlying P2P connection.
P2PStreamError(#[from] P2PStreamError),
#[error(transparent)]
/// Failed to parse peer's version.
ParseVersionError(#[from] ParseVersionError),
#[error(transparent)]
/// Failed Ethereum handshake.
EthHandshakeError(#[from] EthHandshakeError),
/// Thrown when decoding a message failed.
#[error(transparent)]
InvalidMessage(#[from] MessageError),
#[error("message size ({0}) exceeds max length (10MB)")]
/// Received a message whose size exceeds the standard limit.
MessageTooBig(usize),
#[error(
"TransactionHashes invalid len of fields: hashes_len={hashes_len} types_len={types_len} sizes_len={sizes_len}"
)]
/// Received malformed transaction hashes message with discrepancies in field lengths.
TransactionHashesInvalidLenOfFields {
/// The number of transaction hashes.
hashes_len: usize,
/// The number of transaction types.
types_len: usize,
/// The number of transaction sizes.
sizes_len: usize,
},
/// Error when data is not received from peer for a prolonged period.
#[error("never received data from remote peer")]
StreamTimeout,
/// Error triggered when an unknown or unsupported Ethereum message ID is received.
#[error("Received unknown ETH message ID: 0x{message_id:X}")]
UnsupportedMessage {
/// The identifier of the unknown Ethereum message.
message_id: u8,
},
}
// === impl EthStreamError ===
impl EthStreamError {
/// Returns the [`DisconnectReason`] if the error is a disconnect message
pub const fn as_disconnected(&self) -> Option<DisconnectReason> {
if let Self::P2PStreamError(err) = self {
err.as_disconnected()
} else {
None
}
}
/// Returns the [`io::Error`] if it was caused by IO
pub const fn as_io(&self) -> Option<&io::Error> {
if let Self::P2PStreamError(P2PStreamError::Io(io)) = self {
return Some(io)
}
None
}
}
impl From<io::Error> for EthStreamError {
fn from(err: io::Error) -> Self {
P2PStreamError::from(err).into()
}
}
/// Error that can occur during the `eth` sub-protocol handshake.
#[derive(thiserror::Error, Debug)]
pub enum EthHandshakeError {
/// Status message received or sent outside of the handshake process.
#[error("status message can only be recv/sent in handshake")]
StatusNotInHandshake,
/// Receiving a non-status message during the handshake phase.
#[error("received non-status message when trying to handshake")]
NonStatusMessageInHandshake,
#[error("no response received when sending out handshake")]
/// No response received during the handshake process.
NoResponse,
#[error(transparent)]
/// Invalid fork data.
InvalidFork(#[from] ValidationError),
#[error("mismatched genesis in status message: {0}")]
/// Mismatch in the genesis block during status exchange.
MismatchedGenesis(GotExpectedBoxed<B256>),
#[error("mismatched protocol version in status message: {0}")]
/// Mismatched protocol versions in status messages.
MismatchedProtocolVersion(GotExpected<EthVersion>),
#[error("mismatched chain in status message: {0}")]
/// Mismatch in chain details in status messages.
MismatchedChain(GotExpected<Chain>),
#[error("total difficulty bitlen is too large: got {got}, maximum {maximum}")]
/// Excessively large total difficulty bit lengths.
TotalDifficultyBitLenTooLarge {
/// The actual bit length of the total difficulty.
got: usize,
/// The maximum allowed bit length for the total difficulty.
maximum: usize,
},
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/eth-wire/src/errors/mod.rs | crates/net/eth-wire/src/errors/mod.rs | //! Error types for stream variants
mod eth;
mod p2p;
pub use eth::*;
pub use p2p::*;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/eth-wire/src/errors/p2p.rs | crates/net/eth-wire/src/errors/p2p.rs | //! Error handling for [`P2PStream`](crate::P2PStream).
use std::io;
use reth_eth_wire_types::{DisconnectReason, UnknownDisconnectReason};
use reth_primitives_traits::GotExpected;
use crate::{capability::SharedCapabilityError, ProtocolVersion};
/// Errors when sending/receiving p2p messages. These should result in kicking the peer.
#[derive(thiserror::Error, Debug)]
pub enum P2PStreamError {
/// I/O error.
#[error(transparent)]
Io(#[from] io::Error),
/// RLP encoding/decoding error.
#[error(transparent)]
Rlp(#[from] alloy_rlp::Error),
/// Error in compression/decompression using Snappy.
#[error(transparent)]
Snap(#[from] snap::Error),
/// Error during the P2P handshake.
#[error(transparent)]
HandshakeError(#[from] P2PHandshakeError),
/// Message size exceeds maximum length error.
#[error("message size ({message_size}) exceeds max length ({max_size})")]
MessageTooBig {
/// The actual size of the message received.
message_size: usize,
/// The maximum allowed size for the message.
max_size: usize,
},
/// Unknown reserved P2P message ID error.
#[error("unknown reserved p2p message id: {0}")]
UnknownReservedMessageId(u8),
/// Empty protocol message received error.
#[error("empty protocol message received")]
EmptyProtocolMessage,
/// Error related to the Pinger.
#[error(transparent)]
PingerError(#[from] PingerError),
/// Ping timeout error.
#[error("ping timed out with")]
PingTimeout,
/// Error parsing shared capabilities.
#[error(transparent)]
ParseSharedCapability(#[from] SharedCapabilityError),
/// Capability not supported on the stream to this peer.
#[error("capability not supported on stream to this peer")]
CapabilityNotShared,
/// Mismatched protocol version error.
#[error("mismatched protocol version in Hello message: {0}")]
MismatchedProtocolVersion(GotExpected<ProtocolVersion>),
/// Too many messages buffered before sending.
#[error("too many messages buffered before sending")]
SendBufferFull,
/// Disconnected error.
#[error("disconnected")]
Disconnected(DisconnectReason),
/// Unknown disconnect reason error.
#[error("unknown disconnect reason: {0}")]
UnknownDisconnectReason(#[from] UnknownDisconnectReason),
}
// === impl P2PStreamError ===
impl P2PStreamError {
/// Returns the [`DisconnectReason`] if it is the `Disconnected` variant.
pub const fn as_disconnected(&self) -> Option<DisconnectReason> {
let reason = match self {
Self::HandshakeError(P2PHandshakeError::Disconnected(reason)) |
Self::Disconnected(reason) => reason,
_ => return None,
};
Some(*reason)
}
}
/// Errors when conducting a p2p handshake.
#[derive(thiserror::Error, Debug, Clone, Eq, PartialEq)]
pub enum P2PHandshakeError {
/// Hello message received/sent outside of handshake error.
#[error("hello message can only be recv/sent in handshake")]
HelloNotInHandshake,
/// Received a non-hello message when trying to handshake.
#[error("received non-hello message when trying to handshake")]
NonHelloMessageInHandshake,
/// No capabilities shared with the peer.
#[error("no capabilities shared with peer")]
NoSharedCapabilities,
/// No response received when sending out handshake.
#[error("no response received when sending out handshake")]
NoResponse,
/// Handshake timed out.
#[error("handshake timed out")]
Timeout,
/// Disconnected by peer with a specific reason.
#[error("disconnected by peer: {0}")]
Disconnected(DisconnectReason),
/// Error decoding a message during handshake.
#[error("error decoding a message during handshake: {0}")]
DecodeError(#[from] alloy_rlp::Error),
}
/// An error that can occur when interacting with a pinger.
#[derive(Debug, thiserror::Error)]
pub enum PingerError {
/// An unexpected pong was received while the pinger was in the `Ready` state.
#[error("pong received while ready")]
UnexpectedPong,
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/eth-wire/tests/fuzz_roundtrip.rs | crates/net/eth-wire/tests/fuzz_roundtrip.rs | //! Round-trip encoding fuzzing for the `eth-wire` crate.
use alloy_rlp::{Decodable, Encodable};
use serde::Serialize;
use std::fmt::Debug;
/// Creates a fuzz test for a type that should be [`Encodable`](alloy_rlp::Encodable) and
/// [`Decodable`](alloy_rlp::Decodable).
///
/// The test will create a random instance of the type, encode it, and then decode it.
fn roundtrip_encoding<T>(thing: T)
where
T: Encodable + Decodable + Debug + PartialEq + Eq,
{
let mut encoded = Vec::new();
thing.encode(&mut encoded);
let decoded = T::decode(&mut &encoded[..]).unwrap();
assert_eq!(thing, decoded, "expected: {thing:?}, got: {decoded:?}");
}
/// This method delegates to `roundtrip_encoding`, but is used to enforce that each type input to
/// the macro has a proper Default, Clone, and Serialize impl. These trait implementations are
/// necessary for test-fuzz to autogenerate a corpus.
///
/// If it makes sense to remove a Default impl from a type that we fuzz, this should prevent the
/// fuzz test from compiling, rather than failing at runtime.
/// In this case, we should implement a wrapper for the type that should no longer implement
/// Default, Clone, or Serialize, and fuzz the wrapper type instead.
fn roundtrip_fuzz<T>(thing: T)
where
T: Encodable + Decodable + Clone + Serialize + Debug + PartialEq + Eq,
{
roundtrip_encoding::<T>(thing)
}
/// Creates a fuzz test for a rlp encodable and decodable type.
macro_rules! fuzz_type_and_name {
( $x:ty, $fuzzname:ident ) => {
/// Fuzzes the round-trip encoding of the type.
#[expect(non_snake_case)]
#[test_fuzz]
fn $fuzzname(thing: $x) {
crate::roundtrip_fuzz::<$x>(thing)
}
};
}
#[cfg(test)]
#[expect(missing_docs)]
pub mod fuzz_rlp {
use crate::roundtrip_encoding;
use alloy_rlp::{RlpDecodableWrapper, RlpEncodableWrapper};
use reth_codecs::add_arbitrary_tests;
use reth_eth_wire::{
BlockBodies, BlockHeaders, DisconnectReason, GetBlockBodies, GetBlockHeaders, GetNodeData,
GetPooledTransactions, GetReceipts, HelloMessage, NewBlock, NewBlockHashes,
NewPooledTransactionHashes66, NewPooledTransactionHashes68, NodeData, P2PMessage,
PooledTransactions, Receipts, Status, Transactions,
};
use serde::{Deserialize, Serialize};
use test_fuzz::test_fuzz;
// manually test Ping and Pong which are not covered by the above
/// Tests the round-trip encoding of Ping
#[test]
fn roundtrip_ping() {
roundtrip_encoding::<P2PMessage>(P2PMessage::Ping)
}
/// Tests the round-trip encoding of Pong
#[test]
fn roundtrip_pong() {
roundtrip_encoding::<P2PMessage>(P2PMessage::Pong)
}
// p2p subprotocol messages
// see message below for why wrapper types are necessary for fuzzing types that do not have a
// Default impl
#[derive(
Clone,
Debug,
PartialEq,
Eq,
Serialize,
Deserialize,
RlpEncodableWrapper,
RlpDecodableWrapper,
)]
#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))]
#[add_arbitrary_tests(rlp)]
struct HelloMessageWrapper(HelloMessage);
impl Default for HelloMessageWrapper {
fn default() -> Self {
Self(HelloMessage {
client_version: Default::default(),
capabilities: Default::default(),
protocol_version: Default::default(),
id: Default::default(),
port: Default::default(),
})
}
}
fuzz_type_and_name!(HelloMessageWrapper, fuzz_HelloMessage);
fuzz_type_and_name!(DisconnectReason, fuzz_DisconnectReason);
// eth subprotocol messages
fuzz_type_and_name!(Status, fuzz_Status);
fuzz_type_and_name!(NewBlockHashes, fuzz_NewBlockHashes);
fuzz_type_and_name!(Transactions, fuzz_Transactions);
// GetBlockHeaders implements all the traits required for roundtrip_encoding, so why is this
// wrapper type needed?
//
// While GetBlockHeaders implements all traits needed to work for test-fuzz, it does not have
// an obvious Default implementation since BlockHashOrNumber can be either a hash or number,
// and the default value of BlockHashOrNumber is not obvious.
//
// We just provide a default value here so test-fuzz can auto-generate a corpus file for the
// type.
#[derive(
Clone,
Debug,
PartialEq,
Eq,
Serialize,
Deserialize,
RlpEncodableWrapper,
RlpDecodableWrapper,
)]
#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))]
#[add_arbitrary_tests(rlp)]
struct GetBlockHeadersWrapper(GetBlockHeaders);
impl Default for GetBlockHeadersWrapper {
fn default() -> Self {
Self(GetBlockHeaders {
start_block: 0u64.into(),
limit: Default::default(),
skip: Default::default(),
direction: Default::default(),
})
}
}
fuzz_type_and_name!(GetBlockHeadersWrapper, fuzz_GetBlockHeaders);
fuzz_type_and_name!(BlockHeaders, fuzz_BlockHeaders);
fuzz_type_and_name!(GetBlockBodies, fuzz_GetBlockBodies);
fuzz_type_and_name!(BlockBodies, fuzz_BlockBodies);
fuzz_type_and_name!(NewBlock, fuzz_NewBlock);
fuzz_type_and_name!(NewPooledTransactionHashes66, fuzz_NewPooledTransactionHashes66);
fuzz_type_and_name!(NewPooledTransactionHashes68, fuzz_NewPooledTransactionHashes68);
fuzz_type_and_name!(GetPooledTransactions, fuzz_GetPooledTransactions);
fuzz_type_and_name!(PooledTransactions, fuzz_PooledTransactions);
// GetNodeData and NodeData are disabled for privacy - skip fuzz tests
// fuzz_type_and_name!(GetNodeData, fuzz_GetNodeData);
// fuzz_type_and_name!(NodeData, fuzz_NodeData);
fuzz_type_and_name!(GetReceipts, fuzz_GetReceipts);
fuzz_type_and_name!(Receipts, fuzz_Receipts);
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/eth-wire/tests/new_pooled_transactions.rs | crates/net/eth-wire/tests/new_pooled_transactions.rs | //! Decoding tests for [`NewPooledTransactions`]
use alloy_primitives::hex;
use alloy_rlp::Decodable;
use reth_eth_wire::NewPooledTransactionHashes66;
use std::{fs, path::PathBuf};
#[test]
fn decode_new_pooled_transaction_hashes_network() {
let network_data_path = PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("testdata/new_pooled_transactions_network_rlp");
let data = fs::read_to_string(network_data_path).expect("Unable to read file");
let hex_data = hex::decode(data.trim()).unwrap();
let _txs = NewPooledTransactionHashes66::decode(&mut &hex_data[..]).unwrap();
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/eth-wire/tests/pooled_transactions.rs | crates/net/eth-wire/tests/pooled_transactions.rs | //! Decoding tests for [`PooledTransactions`]
use alloy_consensus::transaction::PooledTransaction;
use alloy_eips::eip2718::Decodable2718;
use alloy_primitives::hex;
use alloy_rlp::{Decodable, Encodable};
use reth_eth_wire::{EthNetworkPrimitives, EthVersion, PooledTransactions, ProtocolMessage};
use std::{fs, path::PathBuf};
use test_fuzz::test_fuzz;
/// Helper function to ensure encode-decode roundtrip works for [`PooledTransactions`].
#[test_fuzz]
fn roundtrip_pooled_transactions(hex_data: Vec<u8>) -> Result<(), alloy_rlp::Error> {
let input_rlp = &mut &hex_data[..];
let txs: PooledTransactions = PooledTransactions::decode(input_rlp)?;
// get the amount of bytes decoded in `decode` by subtracting the length of the original buf,
// from the length of the remaining bytes
let decoded_len = hex_data.len() - input_rlp.len();
let expected_encoding = hex_data[..decoded_len].to_vec();
// do a roundtrip test
let mut buf = Vec::new();
txs.encode(&mut buf);
assert_eq!(expected_encoding, buf);
// now do another decoding, on what we encoded - this should succeed
let txs2: PooledTransactions = PooledTransactions::decode(&mut &buf[..]).unwrap();
// ensure that the payload length is the same
assert_eq!(txs.length(), txs2.length());
// ensure that the length is equal to the length of the encoded data
assert_eq!(txs.length(), buf.len());
Ok(())
}
#[test]
fn decode_pooled_transactions_data() {
let network_data_path =
PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("testdata/pooled_transactions_with_blob");
let data = fs::read_to_string(network_data_path).expect("Unable to read file");
let hex_data = hex::decode(data.trim()).expect("Unable to decode hex");
assert!(roundtrip_pooled_transactions(hex_data).is_ok());
}
#[test]
fn decode_request_pair_pooled_blob_transactions() {
let network_data_path = PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("testdata/request_pair_pooled_blob_transactions");
let data = fs::read_to_string(network_data_path).expect("Unable to read file");
let hex_data = hex::decode(data.trim()).unwrap();
let _txs: ProtocolMessage<EthNetworkPrimitives> =
ProtocolMessage::decode_message(EthVersion::Eth68, &mut &hex_data[..]).unwrap();
}
#[test]
fn decode_blob_transaction_data() {
let network_data_path =
PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("testdata/blob_transaction");
let data = fs::read_to_string(network_data_path).expect("Unable to read file");
let hex_data = hex::decode(data.trim()).unwrap();
let _txs = PooledTransaction::decode(&mut &hex_data[..]).unwrap();
}
#[test]
fn decode_blob_rpc_transaction() {
// test data pulled from hive test that sends blob transactions
let network_data_path =
PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("testdata/rpc_blob_transaction");
let data = fs::read_to_string(network_data_path).expect("Unable to read file");
let hex_data = hex::decode(data.trim()).unwrap();
let _txs = PooledTransaction::decode_2718(&mut hex_data.as_ref()).unwrap();
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/eth-wire/tests/new_block.rs | crates/net/eth-wire/tests/new_block.rs | //! Decoding tests for [`NewBlock`]
use alloy_primitives::hex;
use alloy_rlp::Decodable;
use reth_eth_wire::NewBlock;
use std::{fs, path::PathBuf};
#[test]
fn decode_new_block_network() {
let network_data_path =
PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("testdata/new_block_network_rlp");
let data = fs::read_to_string(network_data_path).expect("Unable to read file");
let hex_data = hex::decode(data.trim()).unwrap();
let _txs: NewBlock = NewBlock::decode(&mut &hex_data[..]).unwrap();
}
#[test]
fn decode_new_block_network_bsc_one() {
let network_data_path =
PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("testdata/bsc_new_block_network_one");
let data = fs::read_to_string(network_data_path).expect("Unable to read file");
let hex_data = hex::decode(data.trim()).unwrap();
let _txs: NewBlock = NewBlock::decode(&mut &hex_data[..]).unwrap();
}
#[test]
fn decode_new_block_network_bsc_two() {
let network_data_path =
PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("testdata/bsc_new_block_network_two");
let data = fs::read_to_string(network_data_path).expect("Unable to read file");
let hex_data = hex::decode(data.trim()).unwrap();
let _txs: NewBlock = NewBlock::decode(&mut &hex_data[..]).unwrap();
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/discv4/src/config.rs | crates/net/discv4/src/config.rs | //! A set of configuration parameters to tune the discovery protocol.
//!
//! This basis of this file has been taken from the discv5 codebase:
//! <https://github.com/sigp/discv5>
use alloy_primitives::bytes::Bytes;
use alloy_rlp::Encodable;
use reth_net_banlist::BanList;
use reth_net_nat::{NatResolver, ResolveNatInterval};
use reth_network_peers::NodeRecord;
use std::{
collections::{HashMap, HashSet},
time::Duration,
};
/// Configuration parameters that define the performance of the discovery network.
#[derive(Clone, Debug)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct Discv4Config {
/// Whether to enable the incoming packet filter. Default: false.
pub enable_packet_filter: bool,
/// Size of the channel buffer for outgoing messages.
pub udp_egress_message_buffer: usize,
/// Size of the channel buffer for incoming messages.
pub udp_ingress_message_buffer: usize,
/// The number of allowed consecutive failures for `FindNode` requests. Default: 5.
pub max_find_node_failures: u8,
/// The interval to use when checking for expired nodes that need to be re-pinged. Default:
/// 10min.
pub ping_interval: Duration,
/// The duration of we consider a ping timed out.
pub ping_expiration: Duration,
/// The rate at which new random lookups should be triggered.
pub lookup_interval: Duration,
/// The duration of we consider a `FindNode` request timed out.
pub request_timeout: Duration,
/// The duration after which we consider an enr request timed out.
pub enr_expiration: Duration,
/// The duration we set for neighbours responses.
pub neighbours_expiration: Duration,
/// Provides a way to ban peers and ips.
#[cfg_attr(feature = "serde", serde(skip))]
pub ban_list: BanList,
/// Set the default duration for which nodes are banned for. This timeouts are checked every 5
/// minutes, so the precision will be to the nearest 5 minutes. If set to `None`, bans from
/// the filter will last indefinitely. Default is 1 hour.
pub ban_duration: Option<Duration>,
/// Nodes to boot from.
pub bootstrap_nodes: HashSet<NodeRecord>,
/// Whether to randomly discover new peers.
///
/// If true, the node will automatically randomly walk the DHT in order to find new peers.
pub enable_dht_random_walk: bool,
/// Whether to automatically lookup peers.
pub enable_lookup: bool,
/// Whether to enforce EIP-868 extension.
pub enable_eip868: bool,
/// Whether to respect expiration timestamps in messages.
pub enforce_expiration_timestamps: bool,
/// Additional pairs to include in The [`Enr`](enr::Enr) if EIP-868 extension is enabled <https://eips.ethereum.org/EIPS/eip-868>
pub additional_eip868_rlp_pairs: HashMap<Vec<u8>, Bytes>,
/// If configured, try to resolve public ip
pub external_ip_resolver: Option<NatResolver>,
/// If configured and a `external_ip_resolver` is configured, try to resolve the external ip
/// using this interval.
pub resolve_external_ip_interval: Option<Duration>,
/// The duration after which we consider a bond expired.
pub bond_expiration: Duration,
}
impl Discv4Config {
/// Returns a new default builder instance
pub fn builder() -> Discv4ConfigBuilder {
Default::default()
}
/// Add another key value pair to include in the ENR
pub fn add_eip868_pair(&mut self, key: impl Into<Vec<u8>>, value: impl Encodable) -> &mut Self {
self.add_eip868_rlp_pair(key, Bytes::from(alloy_rlp::encode(&value)))
}
/// Add another key value pair to include in the ENR
pub fn add_eip868_rlp_pair(&mut self, key: impl Into<Vec<u8>>, rlp: Bytes) -> &mut Self {
self.additional_eip868_rlp_pairs.insert(key.into(), rlp);
self
}
/// Extend additional key value pairs to include in the ENR
pub fn extend_eip868_rlp_pairs(
&mut self,
pairs: impl IntoIterator<Item = (impl Into<Vec<u8>>, Bytes)>,
) -> &mut Self {
for (k, v) in pairs {
self.add_eip868_rlp_pair(k, v);
}
self
}
/// Returns the corresponding [`ResolveNatInterval`], if a [`NatResolver`] and an interval was
/// configured
pub fn resolve_external_ip_interval(&self) -> Option<ResolveNatInterval> {
let resolver = self.external_ip_resolver?;
let interval = self.resolve_external_ip_interval?;
Some(ResolveNatInterval::interval(resolver, interval))
}
}
impl Default for Discv4Config {
fn default() -> Self {
Self {
enable_packet_filter: false,
// This should be high enough to cover an entire recursive FindNode lookup which is
// includes sending FindNode to nodes it discovered in the rounds using the concurrency
// factor ALPHA
udp_egress_message_buffer: 1024,
// Every outgoing request will eventually lead to an incoming response
udp_ingress_message_buffer: 1024,
max_find_node_failures: 5,
ping_interval: Duration::from_secs(10),
// Unified expiration and timeout durations, mirrors geth's `expiration` duration
ping_expiration: Duration::from_secs(20),
bond_expiration: Duration::from_secs(60 * 60),
enr_expiration: Duration::from_secs(20),
neighbours_expiration: Duration::from_secs(20),
request_timeout: Duration::from_secs(20),
lookup_interval: Duration::from_secs(20),
ban_list: Default::default(),
ban_duration: Some(Duration::from_secs(60 * 60)), // 1 hour
bootstrap_nodes: Default::default(),
enable_dht_random_walk: true,
enable_lookup: true,
enable_eip868: true,
enforce_expiration_timestamps: true,
additional_eip868_rlp_pairs: Default::default(),
external_ip_resolver: Some(Default::default()),
// By default retry public IP using a 5min interval
resolve_external_ip_interval: Some(Duration::from_secs(60 * 5)),
}
}
}
/// Builder type for [`Discv4Config`]
#[derive(Clone, Debug, Default)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct Discv4ConfigBuilder {
config: Discv4Config,
}
impl Discv4ConfigBuilder {
/// Whether to enable the incoming packet filter.
pub const fn enable_packet_filter(&mut self) -> &mut Self {
self.config.enable_packet_filter = true;
self
}
/// Sets the channel size for incoming messages
pub const fn udp_ingress_message_buffer(
&mut self,
udp_ingress_message_buffer: usize,
) -> &mut Self {
self.config.udp_ingress_message_buffer = udp_ingress_message_buffer;
self
}
/// Sets the channel size for outgoing messages
pub const fn udp_egress_message_buffer(
&mut self,
udp_egress_message_buffer: usize,
) -> &mut Self {
self.config.udp_egress_message_buffer = udp_egress_message_buffer;
self
}
/// The number of allowed request failures for `findNode` requests.
pub const fn max_find_node_failures(&mut self, max_find_node_failures: u8) -> &mut Self {
self.config.max_find_node_failures = max_find_node_failures;
self
}
/// The time between pings to ensure connectivity amongst connected nodes.
pub const fn ping_interval(&mut self, interval: Duration) -> &mut Self {
self.config.ping_interval = interval;
self
}
/// Sets the timeout after which requests are considered timed out
pub const fn request_timeout(&mut self, duration: Duration) -> &mut Self {
self.config.request_timeout = duration;
self
}
/// Sets the expiration duration for pings
pub const fn ping_expiration(&mut self, duration: Duration) -> &mut Self {
self.config.ping_expiration = duration;
self
}
/// Sets the expiration duration for enr requests
pub const fn enr_request_expiration(&mut self, duration: Duration) -> &mut Self {
self.config.enr_expiration = duration;
self
}
/// Sets the expiration duration for lookup neighbor requests
pub const fn lookup_neighbours_expiration(&mut self, duration: Duration) -> &mut Self {
self.config.neighbours_expiration = duration;
self
}
/// Sets the expiration duration for a bond with a peer
pub const fn bond_expiration(&mut self, duration: Duration) -> &mut Self {
self.config.bond_expiration = duration;
self
}
/// Whether to discover random nodes in the DHT.
pub const fn enable_dht_random_walk(&mut self, enable_dht_random_walk: bool) -> &mut Self {
self.config.enable_dht_random_walk = enable_dht_random_walk;
self
}
/// Whether to automatically lookup
pub const fn enable_lookup(&mut self, enable_lookup: bool) -> &mut Self {
self.config.enable_lookup = enable_lookup;
self
}
/// Whether to enforce expiration timestamps in messages.
pub const fn enable_eip868(&mut self, enable_eip868: bool) -> &mut Self {
self.config.enable_eip868 = enable_eip868;
self
}
/// Whether to enable EIP-868
pub const fn enforce_expiration_timestamps(
&mut self,
enforce_expiration_timestamps: bool,
) -> &mut Self {
self.config.enforce_expiration_timestamps = enforce_expiration_timestamps;
self
}
/// Add another key value pair to include in the ENR
pub fn add_eip868_pair(&mut self, key: impl Into<Vec<u8>>, value: impl Encodable) -> &mut Self {
self.add_eip868_rlp_pair(key, Bytes::from(alloy_rlp::encode(&value)))
}
/// Add another key value pair to include in the ENR
pub fn add_eip868_rlp_pair(&mut self, key: impl Into<Vec<u8>>, rlp: Bytes) -> &mut Self {
self.config.additional_eip868_rlp_pairs.insert(key.into(), rlp);
self
}
/// Extend additional key value pairs to include in the ENR
pub fn extend_eip868_rlp_pairs(
&mut self,
pairs: impl IntoIterator<Item = (impl Into<Vec<u8>>, Bytes)>,
) -> &mut Self {
for (k, v) in pairs {
self.add_eip868_rlp_pair(k, v);
}
self
}
/// A set of lists that can ban IP's or `PeerIds` from the server. See
/// [`BanList`].
pub fn ban_list(&mut self, ban_list: BanList) -> &mut Self {
self.config.ban_list = ban_list;
self
}
/// Sets the lookup interval duration.
pub const fn lookup_interval(&mut self, lookup_interval: Duration) -> &mut Self {
self.config.lookup_interval = lookup_interval;
self
}
/// Set the default duration for which nodes are banned for. This timeouts are checked every 5
/// minutes, so the precision will be to the nearest 5 minutes. If set to `None`, bans from
/// the filter will last indefinitely. Default is 1 hour.
pub const fn ban_duration(&mut self, ban_duration: Option<Duration>) -> &mut Self {
self.config.ban_duration = ban_duration;
self
}
/// Adds a boot node
pub fn add_boot_node(&mut self, node: NodeRecord) -> &mut Self {
self.config.bootstrap_nodes.insert(node);
self
}
/// Adds multiple boot nodes
pub fn add_boot_nodes(&mut self, nodes: impl IntoIterator<Item = NodeRecord>) -> &mut Self {
self.config.bootstrap_nodes.extend(nodes);
self
}
/// Configures if and how the external IP of the node should be resolved.
pub const fn external_ip_resolver(
&mut self,
external_ip_resolver: Option<NatResolver>,
) -> &mut Self {
self.config.external_ip_resolver = external_ip_resolver;
self
}
/// Sets the interval at which the external IP is to be resolved.
pub const fn resolve_external_ip_interval(
&mut self,
resolve_external_ip_interval: Option<Duration>,
) -> &mut Self {
self.config.resolve_external_ip_interval = resolve_external_ip_interval;
self
}
/// Returns the configured [`Discv4Config`]
pub fn build(&self) -> Discv4Config {
self.config.clone()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_config_builder() {
let mut builder = Discv4Config::builder();
let _ = builder
.enable_lookup(true)
.enable_dht_random_walk(true)
.add_boot_nodes(HashSet::new())
.ban_duration(None)
.lookup_interval(Duration::from_secs(3))
.enable_lookup(true)
.build();
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/discv4/src/node.rs | crates/net/discv4/src/node.rs | use alloy_primitives::keccak256;
use generic_array::GenericArray;
use reth_network_peers::{NodeRecord, PeerId};
/// The key type for the table.
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub(crate) struct NodeKey(pub(crate) PeerId);
impl From<PeerId> for NodeKey {
fn from(value: PeerId) -> Self {
Self(value)
}
}
impl From<NodeKey> for discv5::Key<NodeKey> {
fn from(value: NodeKey) -> Self {
let hash = keccak256(value.0.as_slice());
let hash = *GenericArray::from_slice(hash.as_slice());
Self::new_raw(value, hash)
}
}
impl From<&NodeRecord> for NodeKey {
fn from(node: &NodeRecord) -> Self {
Self(node.id)
}
}
/// Converts a `PeerId` into the required `Key` type for the table
#[inline]
pub(crate) fn kad_key(node: PeerId) -> discv5::Key<NodeKey> {
discv5::kbucket::Key::from(NodeKey::from(node))
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/discv4/src/lib.rs | crates/net/discv4/src/lib.rs | //! Discovery v4 implementation: <https://github.com/ethereum/devp2p/blob/master/discv4.md>
//!
//! Discv4 employs a kademlia-like routing table to store and manage discovered peers and topics.
//! The protocol allows for external IP discovery in NAT environments through regular PING/PONG's
//! with discovered nodes. Nodes return the external IP address that they have received and a simple
//! majority is chosen as our external IP address. If an external IP address is updated, this is
//! produced as an event to notify the swarm (if one is used for this behaviour).
//!
//! This implementation consists of a [`Discv4`] and [`Discv4Service`] pair. The service manages the
//! state and drives the UDP socket. The (optional) [`Discv4`] serves as the frontend to interact
//! with the service via a channel. Whenever the underlying table changes service produces a
//! [`DiscoveryUpdate`] that listeners will receive.
//!
//! ## Feature Flags
//!
//! - `serde` (default): Enable serde support
//! - `test-utils`: Export utilities for testing
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
use crate::{
error::{DecodePacketError, Discv4Error},
proto::{FindNode, Message, Neighbours, Packet, Ping, Pong},
};
use alloy_primitives::{bytes::Bytes, hex, B256};
use discv5::{
kbucket,
kbucket::{
BucketInsertResult, Distance, Entry as BucketEntry, InsertResult, KBucketsTable,
NodeStatus, MAX_NODES_PER_BUCKET,
},
ConnectionDirection, ConnectionState,
};
use enr::Enr;
use itertools::Itertools;
use parking_lot::Mutex;
use proto::{EnrRequest, EnrResponse};
use reth_ethereum_forks::ForkId;
use reth_network_peers::{pk2id, PeerId};
use secp256k1::SecretKey;
use std::{
cell::RefCell,
collections::{btree_map, hash_map::Entry, BTreeMap, HashMap, VecDeque},
fmt,
future::poll_fn,
io,
net::{IpAddr, Ipv4Addr, SocketAddr, SocketAddrV4},
pin::Pin,
rc::Rc,
sync::Arc,
task::{ready, Context, Poll},
time::{Duration, Instant, SystemTime, UNIX_EPOCH},
};
use tokio::{
net::UdpSocket,
sync::{mpsc, mpsc::error::TrySendError, oneshot, oneshot::Sender as OneshotSender},
task::{JoinHandle, JoinSet},
time::Interval,
};
use tokio_stream::{wrappers::ReceiverStream, Stream, StreamExt};
use tracing::{debug, trace};
pub mod error;
pub mod proto;
mod config;
pub use config::{Discv4Config, Discv4ConfigBuilder};
mod node;
use node::{kad_key, NodeKey};
mod table;
// reexport NodeRecord primitive
pub use reth_network_peers::NodeRecord;
#[cfg(any(test, feature = "test-utils"))]
pub mod test_utils;
use crate::table::PongTable;
use reth_net_nat::ResolveNatInterval;
/// reexport to get public ip.
pub use reth_net_nat::{external_ip, NatResolver};
/// The default address for discv4 via UDP
///
/// Note: the default TCP address is the same.
pub const DEFAULT_DISCOVERY_ADDR: IpAddr = IpAddr::V4(Ipv4Addr::UNSPECIFIED);
/// The default port for discv4 via UDP
///
/// Note: the default TCP port is the same.
pub const DEFAULT_DISCOVERY_PORT: u16 = 30303;
/// The default address for discv4 via UDP: "0.0.0.0:30303"
///
/// Note: The default TCP address is the same.
pub const DEFAULT_DISCOVERY_ADDRESS: SocketAddr =
SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, DEFAULT_DISCOVERY_PORT));
/// The maximum size of any packet is 1280 bytes.
const MAX_PACKET_SIZE: usize = 1280;
/// Length of the UDP datagram packet-header: Hash(32b) + Signature(65b) + Packet Type(1b)
const MIN_PACKET_SIZE: usize = 32 + 65 + 1;
/// Concurrency factor for `FindNode` requests to pick `ALPHA` closest nodes, <https://github.com/ethereum/devp2p/blob/master/discv4.md#recursive-lookup>
const ALPHA: usize = 3;
/// Maximum number of nodes to ping at concurrently.
///
/// This corresponds to 2 full `Neighbours` responses with 16 _new_ nodes. This will apply some
/// backpressure in recursive lookups.
const MAX_NODES_PING: usize = 2 * MAX_NODES_PER_BUCKET;
/// Maximum number of pings to keep queued.
///
/// If we are currently sending too many pings, any new pings will be queued. To prevent unbounded
/// growth of the queue, the queue has a maximum capacity, after which any additional pings will be
/// discarded.
///
/// This corresponds to 2 full `Neighbours` responses with 16 new nodes.
const MAX_QUEUED_PINGS: usize = 2 * MAX_NODES_PER_BUCKET;
/// The size of the datagram is limited [`MAX_PACKET_SIZE`], 16 nodes, as the discv4 specifies don't
/// fit in one datagram. The safe number of nodes that always fit in a datagram is 12, with worst
/// case all of them being IPv6 nodes. This is calculated by `(MAX_PACKET_SIZE - (header + expire +
/// rlp overhead) / size(rlp(Node_IPv6))`
/// Even in the best case where all nodes are IPv4, only 14 nodes fit into one packet.
const SAFE_MAX_DATAGRAM_NEIGHBOUR_RECORDS: usize = (MAX_PACKET_SIZE - 109) / 91;
/// The timeout used to identify expired nodes, 24h
///
/// Mirrors geth's `bondExpiration` of 24h
const ENDPOINT_PROOF_EXPIRATION: Duration = Duration::from_secs(24 * 60 * 60);
/// Duration used to expire nodes from the routing table 1hr
const EXPIRE_DURATION: Duration = Duration::from_secs(60 * 60);
// Restricts how many udp messages can be processed in a single [Discv4Service::poll] call.
//
// This will act as a manual yield point when draining the socket messages where the most CPU
// expensive part is handling outgoing messages: encoding and hashing the packet
const UDP_MESSAGE_POLL_LOOP_BUDGET: i32 = 4;
type EgressSender = mpsc::Sender<(Bytes, SocketAddr)>;
type EgressReceiver = mpsc::Receiver<(Bytes, SocketAddr)>;
pub(crate) type IngressSender = mpsc::Sender<IngressEvent>;
pub(crate) type IngressReceiver = mpsc::Receiver<IngressEvent>;
type NodeRecordSender = OneshotSender<Vec<NodeRecord>>;
/// The Discv4 frontend.
///
/// This is a cloneable type that communicates with the [`Discv4Service`] by sending commands over a
/// shared channel.
///
/// See also [`Discv4::spawn`]
#[derive(Debug, Clone)]
pub struct Discv4 {
/// The address of the udp socket
local_addr: SocketAddr,
/// channel to send commands over to the service
to_service: mpsc::UnboundedSender<Discv4Command>,
/// Tracks the local node record.
///
/// This includes the currently tracked external IP address of the node.
node_record: Arc<Mutex<NodeRecord>>,
}
impl Discv4 {
/// Same as [`Self::bind`] but also spawns the service onto a new task.
///
/// See also: [`Discv4Service::spawn()`]
pub async fn spawn(
local_address: SocketAddr,
local_enr: NodeRecord,
secret_key: SecretKey,
config: Discv4Config,
) -> io::Result<Self> {
let (discv4, service) = Self::bind(local_address, local_enr, secret_key, config).await?;
service.spawn();
Ok(discv4)
}
/// Returns a new instance with the given channel directly
///
/// NOTE: this is only intended for test setups.
#[cfg(feature = "test-utils")]
pub fn noop() -> Self {
let (to_service, _rx) = mpsc::unbounded_channel();
let local_addr =
(IpAddr::from(std::net::Ipv4Addr::UNSPECIFIED), DEFAULT_DISCOVERY_PORT).into();
Self {
local_addr,
to_service,
node_record: Arc::new(Mutex::new(NodeRecord::new(
"127.0.0.1:3030".parse().unwrap(),
PeerId::random(),
))),
}
}
/// Binds a new `UdpSocket` and creates the service
///
/// ```
/// # use std::io;
/// use reth_discv4::{Discv4, Discv4Config};
/// use reth_network_peers::{pk2id, NodeRecord, PeerId};
/// use secp256k1::SECP256K1;
/// use std::{net::SocketAddr, str::FromStr};
/// # async fn t() -> io::Result<()> {
/// // generate a (random) keypair
/// let (secret_key, pk) = SECP256K1.generate_keypair(&mut rand_08::thread_rng());
/// let id = pk2id(&pk);
///
/// let socket = SocketAddr::from_str("0.0.0.0:0").unwrap();
/// let local_enr =
/// NodeRecord { address: socket.ip(), tcp_port: socket.port(), udp_port: socket.port(), id };
/// let config = Discv4Config::default();
///
/// let (discv4, mut service) = Discv4::bind(socket, local_enr, secret_key, config).await.unwrap();
///
/// // get an update strea
/// let updates = service.update_stream();
///
/// let _handle = service.spawn();
///
/// // lookup the local node in the DHT
/// let _discovered = discv4.lookup_self().await.unwrap();
///
/// # Ok(())
/// # }
/// ```
pub async fn bind(
local_address: SocketAddr,
mut local_node_record: NodeRecord,
secret_key: SecretKey,
config: Discv4Config,
) -> io::Result<(Self, Discv4Service)> {
let socket = UdpSocket::bind(local_address).await?;
let local_addr = socket.local_addr()?;
local_node_record.udp_port = local_addr.port();
trace!(target: "discv4", ?local_addr,"opened UDP socket");
let mut service =
Discv4Service::new(socket, local_addr, local_node_record, secret_key, config);
// resolve the external address immediately
service.resolve_external_ip();
let discv4 = service.handle();
Ok((discv4, service))
}
/// Returns the address of the UDP socket.
pub const fn local_addr(&self) -> SocketAddr {
self.local_addr
}
/// Returns the [`NodeRecord`] of the local node.
///
/// This includes the currently tracked external IP address of the node.
pub fn node_record(&self) -> NodeRecord {
*self.node_record.lock()
}
/// Returns the currently tracked external IP of the node.
pub fn external_ip(&self) -> IpAddr {
self.node_record.lock().address
}
/// Sets the [Interval] used for periodically looking up targets over the network
pub fn set_lookup_interval(&self, duration: Duration) {
self.send_to_service(Discv4Command::SetLookupInterval(duration))
}
/// Starts a `FindNode` recursive lookup that locates the closest nodes to the given node id. See also: <https://github.com/ethereum/devp2p/blob/master/discv4.md#recursive-lookup>
///
/// The lookup initiator starts by picking α closest nodes to the target it knows of. The
/// initiator then sends concurrent `FindNode` packets to those nodes. α is a system-wide
/// concurrency parameter, such as 3. In the recursive step, the initiator resends `FindNode` to
/// nodes it has learned about from previous queries. Of the k nodes the initiator has heard of
/// closest to the target, it picks α that it has not yet queried and resends `FindNode` to
/// them. Nodes that fail to respond quickly are removed from consideration until and unless
/// they do respond.
//
// If a round of FindNode queries fails to return a node any closer than the closest already
// seen, the initiator resends the find node to all of the k closest nodes it has not already
// queried. The lookup terminates when the initiator has queried and gotten responses from the k
// closest nodes it has seen.
pub async fn lookup_self(&self) -> Result<Vec<NodeRecord>, Discv4Error> {
self.lookup_node(None).await
}
/// Looks up the given node id.
///
/// Returning the closest nodes to the given node id.
pub async fn lookup(&self, node_id: PeerId) -> Result<Vec<NodeRecord>, Discv4Error> {
self.lookup_node(Some(node_id)).await
}
/// Performs a random lookup for node records.
pub async fn lookup_random(&self) -> Result<Vec<NodeRecord>, Discv4Error> {
let target = PeerId::random();
self.lookup_node(Some(target)).await
}
/// Sends a message to the service to lookup the closest nodes
pub fn send_lookup(&self, node_id: PeerId) {
let cmd = Discv4Command::Lookup { node_id: Some(node_id), tx: None };
self.send_to_service(cmd);
}
async fn lookup_node(&self, node_id: Option<PeerId>) -> Result<Vec<NodeRecord>, Discv4Error> {
let (tx, rx) = oneshot::channel();
let cmd = Discv4Command::Lookup { node_id, tx: Some(tx) };
self.to_service.send(cmd)?;
Ok(rx.await?)
}
/// Triggers a new self lookup without expecting a response
pub fn send_lookup_self(&self) {
let cmd = Discv4Command::Lookup { node_id: None, tx: None };
self.send_to_service(cmd);
}
/// Removes the peer from the table, if it exists.
pub fn remove_peer(&self, node_id: PeerId) {
let cmd = Discv4Command::Remove(node_id);
self.send_to_service(cmd);
}
/// Adds the node to the table, if it is not already present.
pub fn add_node(&self, node_record: NodeRecord) {
let cmd = Discv4Command::Add(node_record);
self.send_to_service(cmd);
}
/// Adds the peer and id to the ban list.
///
/// This will prevent any future inclusion in the table
pub fn ban(&self, node_id: PeerId, ip: IpAddr) {
let cmd = Discv4Command::Ban(node_id, ip);
self.send_to_service(cmd);
}
/// Adds the ip to the ban list.
///
/// This will prevent any future inclusion in the table
pub fn ban_ip(&self, ip: IpAddr) {
let cmd = Discv4Command::BanIp(ip);
self.send_to_service(cmd);
}
/// Adds the peer to the ban list.
///
/// This will prevent any future inclusion in the table
pub fn ban_node(&self, node_id: PeerId) {
let cmd = Discv4Command::BanPeer(node_id);
self.send_to_service(cmd);
}
/// Sets the tcp port
///
/// This will update our [`NodeRecord`]'s tcp port.
pub fn set_tcp_port(&self, port: u16) {
let cmd = Discv4Command::SetTcpPort(port);
self.send_to_service(cmd);
}
/// Sets the pair in the EIP-868 [`Enr`] of the node.
///
/// If the key already exists, this will update it.
///
/// CAUTION: The value **must** be rlp encoded
pub fn set_eip868_rlp_pair(&self, key: Vec<u8>, rlp: Bytes) {
let cmd = Discv4Command::SetEIP868RLPPair { key, rlp };
self.send_to_service(cmd);
}
/// Sets the pair in the EIP-868 [`Enr`] of the node.
///
/// If the key already exists, this will update it.
pub fn set_eip868_rlp(&self, key: Vec<u8>, value: impl alloy_rlp::Encodable) {
self.set_eip868_rlp_pair(key, Bytes::from(alloy_rlp::encode(&value)))
}
#[inline]
fn send_to_service(&self, cmd: Discv4Command) {
let _ = self.to_service.send(cmd).map_err(|err| {
debug!(
target: "discv4",
%err,
"channel capacity reached, dropping command",
)
});
}
/// Returns the receiver half of new listener channel that streams [`DiscoveryUpdate`]s.
pub async fn update_stream(&self) -> Result<ReceiverStream<DiscoveryUpdate>, Discv4Error> {
let (tx, rx) = oneshot::channel();
let cmd = Discv4Command::Updates(tx);
self.to_service.send(cmd)?;
Ok(rx.await?)
}
/// Terminates the spawned [`Discv4Service`].
pub fn terminate(&self) {
self.send_to_service(Discv4Command::Terminated);
}
}
/// Manages discv4 peer discovery over UDP.
///
/// This is a [Stream] to handles incoming and outgoing discv4 messages and emits updates via:
/// [`Discv4Service::update_stream`].
///
/// This type maintains the discv Kademlia routing table and is responsible for performing lookups.
///
/// ## Lookups
///
/// See also [Recursive Lookups](https://github.com/ethereum/devp2p/blob/master/discv4.md#recursive-lookup).
/// Lookups are either triggered periodically or performaned on demand: [`Discv4::lookup`]
/// Newly discovered nodes are emitted as [`DiscoveryUpdate::Added`] event to all subscribers:
/// [`Discv4Service::update_stream`].
#[must_use = "Stream does nothing unless polled"]
pub struct Discv4Service {
/// Local address of the UDP socket.
local_address: SocketAddr,
/// The local ENR for EIP-868 <https://eips.ethereum.org/EIPS/eip-868>
local_eip_868_enr: Enr<SecretKey>,
/// Local ENR of the server.
local_node_record: NodeRecord,
/// Keeps track of the node record of the local node.
shared_node_record: Arc<Mutex<NodeRecord>>,
/// The secret key used to sign payloads
secret_key: SecretKey,
/// The UDP socket for sending and receiving messages.
_socket: Arc<UdpSocket>,
/// The spawned UDP tasks.
///
/// Note: If dropped, the spawned send+receive tasks are aborted.
_tasks: JoinSet<()>,
/// The routing table.
kbuckets: KBucketsTable<NodeKey, NodeEntry>,
/// Receiver for incoming messages
///
/// Receives incoming messages from the UDP task.
ingress: IngressReceiver,
/// Sender for sending outgoing messages
///
/// Sends outgoind messages to the UDP task.
egress: EgressSender,
/// Buffered pending pings to apply backpressure.
///
/// Lookups behave like bursts of requests: Endpoint proof followed by `FindNode` request. [Recursive lookups](https://github.com/ethereum/devp2p/blob/master/discv4.md#recursive-lookup) can trigger multiple followup Pings+FindNode requests.
/// A cap on concurrent `Ping` prevents escalation where: A large number of new nodes
/// discovered via `FindNode` in a recursive lookup triggers a large number of `Ping`s, and
/// followup `FindNode` requests.... Buffering them effectively prevents high `Ping` peaks.
queued_pings: VecDeque<(NodeRecord, PingReason)>,
/// Currently active pings to specific nodes.
pending_pings: HashMap<PeerId, PingRequest>,
/// Currently active endpoint proof verification lookups to specific nodes.
///
/// Entries here means we've proven the peer's endpoint but haven't completed our end of the
/// endpoint proof
pending_lookup: HashMap<PeerId, (Instant, LookupContext)>,
/// Currently active `FindNode` requests
pending_find_nodes: HashMap<PeerId, FindNodeRequest>,
/// Currently active ENR requests
pending_enr_requests: HashMap<PeerId, EnrRequestState>,
/// Copy of he sender half of the commands channel for [Discv4]
to_service: mpsc::UnboundedSender<Discv4Command>,
/// Receiver half of the commands channel for [Discv4]
commands_rx: mpsc::UnboundedReceiver<Discv4Command>,
/// All subscribers for table updates
update_listeners: Vec<mpsc::Sender<DiscoveryUpdate>>,
/// The interval when to trigger random lookups
lookup_interval: Interval,
/// Used to rotate targets to lookup
lookup_rotator: LookupTargetRotator,
/// Interval when to recheck active requests
evict_expired_requests_interval: Interval,
/// Interval when to resend pings.
ping_interval: Interval,
/// The interval at which to attempt resolving external IP again.
resolve_external_ip_interval: Option<ResolveNatInterval>,
/// How this services is configured
config: Discv4Config,
/// Buffered events populated during poll.
queued_events: VecDeque<Discv4Event>,
/// Keeps track of nodes from which we have received a `Pong` message.
received_pongs: PongTable,
/// Interval used to expire additionally tracked nodes
expire_interval: Interval,
}
impl Discv4Service {
/// Create a new instance for a bound [`UdpSocket`].
pub(crate) fn new(
socket: UdpSocket,
local_address: SocketAddr,
local_node_record: NodeRecord,
secret_key: SecretKey,
config: Discv4Config,
) -> Self {
let socket = Arc::new(socket);
let (ingress_tx, ingress_rx) = mpsc::channel(config.udp_ingress_message_buffer);
let (egress_tx, egress_rx) = mpsc::channel(config.udp_egress_message_buffer);
let mut tasks = JoinSet::<()>::new();
let udp = Arc::clone(&socket);
tasks.spawn(receive_loop(udp, ingress_tx, local_node_record.id));
let udp = Arc::clone(&socket);
tasks.spawn(send_loop(udp, egress_rx));
let kbuckets = KBucketsTable::new(
NodeKey::from(&local_node_record).into(),
Duration::from_secs(60),
MAX_NODES_PER_BUCKET,
None,
None,
);
let self_lookup_interval = tokio::time::interval(config.lookup_interval);
// Wait `ping_interval` and then start pinging every `ping_interval` because we want to wait
// for
let ping_interval = tokio::time::interval_at(
tokio::time::Instant::now() + config.ping_interval,
config.ping_interval,
);
let evict_expired_requests_interval = tokio::time::interval_at(
tokio::time::Instant::now() + config.request_timeout,
config.request_timeout,
);
let lookup_rotator = if config.enable_dht_random_walk {
LookupTargetRotator::default()
} else {
LookupTargetRotator::local_only()
};
// for EIP-868 construct an ENR
let local_eip_868_enr = {
let mut builder = Enr::builder();
builder.ip(local_node_record.address);
if local_node_record.address.is_ipv4() {
builder.udp4(local_node_record.udp_port);
builder.tcp4(local_node_record.tcp_port);
} else {
builder.udp6(local_node_record.udp_port);
builder.tcp6(local_node_record.tcp_port);
}
for (key, val) in &config.additional_eip868_rlp_pairs {
builder.add_value_rlp(key, val.clone());
}
builder.build(&secret_key).expect("v4 is set")
};
let (to_service, commands_rx) = mpsc::unbounded_channel();
let shared_node_record = Arc::new(Mutex::new(local_node_record));
Self {
local_address,
local_eip_868_enr,
local_node_record,
shared_node_record,
_socket: socket,
kbuckets,
secret_key,
_tasks: tasks,
ingress: ingress_rx,
egress: egress_tx,
queued_pings: VecDeque::with_capacity(MAX_QUEUED_PINGS),
pending_pings: Default::default(),
pending_lookup: Default::default(),
pending_find_nodes: Default::default(),
pending_enr_requests: Default::default(),
commands_rx,
to_service,
update_listeners: Vec::with_capacity(1),
lookup_interval: self_lookup_interval,
ping_interval,
evict_expired_requests_interval,
lookup_rotator,
resolve_external_ip_interval: config.resolve_external_ip_interval(),
config,
queued_events: Default::default(),
received_pongs: Default::default(),
expire_interval: tokio::time::interval(EXPIRE_DURATION),
}
}
/// Returns the frontend handle that can communicate with the service via commands.
pub fn handle(&self) -> Discv4 {
Discv4 {
local_addr: self.local_address,
to_service: self.to_service.clone(),
node_record: self.shared_node_record.clone(),
}
}
/// Returns the current enr sequence of the local record.
fn enr_seq(&self) -> Option<u64> {
self.config.enable_eip868.then(|| self.local_eip_868_enr.seq())
}
/// Sets the [Interval] used for periodically looking up targets over the network
pub fn set_lookup_interval(&mut self, duration: Duration) {
self.lookup_interval = tokio::time::interval(duration);
}
/// Sets the external Ip to the configured external IP if [`NatResolver::ExternalIp`].
fn resolve_external_ip(&mut self) {
if let Some(r) = &self.resolve_external_ip_interval {
if let Some(external_ip) = r.resolver().as_external_ip() {
self.set_external_ip_addr(external_ip);
}
}
}
/// Sets the given ip address as the node's external IP in the node record announced in
/// discovery
pub fn set_external_ip_addr(&mut self, external_ip: IpAddr) {
if self.local_node_record.address != external_ip {
debug!(target: "discv4", ?external_ip, "Updating external ip");
self.local_node_record.address = external_ip;
let _ = self.local_eip_868_enr.set_ip(external_ip, &self.secret_key);
let mut lock = self.shared_node_record.lock();
*lock = self.local_node_record;
debug!(target: "discv4", enr=?self.local_eip_868_enr, "Updated local ENR");
}
}
/// Returns the [`PeerId`] that identifies this node
pub const fn local_peer_id(&self) -> &PeerId {
&self.local_node_record.id
}
/// Returns the address of the UDP socket
pub const fn local_addr(&self) -> SocketAddr {
self.local_address
}
/// Returns the ENR of this service.
///
/// Note: this will include the external address if resolved.
pub const fn local_enr(&self) -> NodeRecord {
self.local_node_record
}
/// Returns mutable reference to ENR for testing.
#[cfg(test)]
pub const fn local_enr_mut(&mut self) -> &mut NodeRecord {
&mut self.local_node_record
}
/// Returns true if the given `PeerId` is currently in the bucket
pub fn contains_node(&self, id: PeerId) -> bool {
let key = kad_key(id);
self.kbuckets.get_index(&key).is_some()
}
/// Bootstraps the local node to join the DHT.
///
/// Bootstrapping is a multi-step operation that starts with a lookup of the local node's
/// own ID in the DHT. This introduces the local node to the other nodes
/// in the DHT and populates its routing table with the closest proven neighbours.
///
/// This is similar to adding all bootnodes via [`Self::add_node`], but does not fire a
/// [`DiscoveryUpdate::Added`] event for the given bootnodes. So boot nodes don't appear in the
/// update stream, which is usually desirable, since bootnodes should not be connected to.
///
/// If adding the configured bootnodes should result in a [`DiscoveryUpdate::Added`], see
/// [`Self::add_all_nodes`].
///
/// **Note:** This is a noop if there are no bootnodes.
pub fn bootstrap(&mut self) {
for record in self.config.bootstrap_nodes.clone() {
debug!(target: "discv4", ?record, "pinging boot node");
let key = kad_key(record.id);
let entry = NodeEntry::new(record);
// insert the boot node in the table
match self.kbuckets.insert_or_update(
&key,
entry,
NodeStatus {
state: ConnectionState::Disconnected,
direction: ConnectionDirection::Outgoing,
},
) {
InsertResult::Failed(_) => {}
_ => {
self.try_ping(record, PingReason::InitialInsert);
}
}
}
}
/// Spawns this services onto a new task
///
/// Note: requires a running tokio runtime
pub fn spawn(mut self) -> JoinHandle<()> {
tokio::task::spawn(async move {
self.bootstrap();
while let Some(event) = self.next().await {
trace!(target: "discv4", ?event, "processed");
}
trace!(target: "discv4", "service terminated");
})
}
/// Creates a new bounded channel for [`DiscoveryUpdate`]s.
pub fn update_stream(&mut self) -> ReceiverStream<DiscoveryUpdate> {
let (tx, rx) = mpsc::channel(512);
self.update_listeners.push(tx);
ReceiverStream::new(rx)
}
/// Looks up the local node in the DHT.
pub fn lookup_self(&mut self) {
self.lookup(self.local_node_record.id)
}
/// Looks up the given node in the DHT
///
/// A `FindNode` packet requests information about nodes close to target. The target is a
/// 64-byte secp256k1 public key. When `FindNode` is received, the recipient should reply
/// with Neighbors packets containing the closest 16 nodes to target found in its local
/// table.
//
// To guard against traffic amplification attacks, Neighbors replies should only be sent if the
// sender of FindNode has been verified by the endpoint proof procedure.
pub fn lookup(&mut self, target: PeerId) {
self.lookup_with(target, None)
}
/// Starts the recursive lookup process for the given target, <https://github.com/ethereum/devp2p/blob/master/discv4.md#recursive-lookup>.
///
/// At first the `ALPHA` (==3, defined concurrency factor) nodes that are closest to the target
/// in the underlying DHT are selected to seed the lookup via `FindNode` requests. In the
/// recursive step, the initiator resends `FindNode` to nodes it has learned about from previous
/// queries.
///
/// This takes an optional Sender through which all successfully discovered nodes are sent once
/// the request has finished.
fn lookup_with(&mut self, target: PeerId, tx: Option<NodeRecordSender>) {
trace!(target: "discv4", ?target, "Starting lookup");
let target_key = kad_key(target);
// Start a lookup context with the 16 (MAX_NODES_PER_BUCKET) closest nodes to which we have
// a valid endpoint proof
let ctx = LookupContext::new(
target_key.clone(),
self.kbuckets
.closest_values(&target_key)
.filter(|node| {
node.value.has_endpoint_proof &&
!self.pending_find_nodes.contains_key(&node.key.preimage().0)
})
.take(MAX_NODES_PER_BUCKET)
.map(|n| (target_key.distance(&n.key), n.value.record)),
tx,
);
// From those 16, pick the 3 closest to start the concurrent lookup.
let closest = ctx.closest(ALPHA);
if closest.is_empty() && self.pending_find_nodes.is_empty() {
// no closest nodes, and no lookup in progress: table is empty.
// This could happen if all records were deleted from the table due to missed pongs
// (e.g. connectivity problems over a long period of time, or issues during initial
// bootstrapping) so we attempt to bootstrap again
self.bootstrap();
return
}
trace!(target: "discv4", ?target, num = closest.len(), "Start lookup closest nodes");
for node in closest {
// here we still want to check against previous request failures and if necessary
// re-establish a new endpoint proof because it can be the case that the other node lost
// our entry and no longer has an endpoint proof on their end
self.find_node_checked(&node, ctx.clone());
}
}
/// Sends a new `FindNode` packet to the node with `target` as the lookup target.
///
/// CAUTION: This expects there's a valid Endpoint proof to the given `node`.
fn find_node(&mut self, node: &NodeRecord, ctx: LookupContext) {
trace!(target: "discv4", ?node, lookup=?ctx.target(), "Sending FindNode");
ctx.mark_queried(node.id);
let id = ctx.target();
let msg = Message::FindNode(FindNode { id, expire: self.find_node_expiration() });
self.send_packet(msg, node.udp_addr());
self.pending_find_nodes.insert(node.id, FindNodeRequest::new(ctx));
}
/// Sends a new `FindNode` packet to the node with `target` as the lookup target but checks
/// whether we should send a new ping first to renew the endpoint proof by checking the
/// previously failed findNode requests. It could be that the node is no longer reachable or
/// lost our entry.
fn find_node_checked(&mut self, node: &NodeRecord, ctx: LookupContext) {
let max_failures = self.config.max_find_node_failures;
let needs_ping = self
.on_entry(node.id, |entry| entry.exceeds_find_node_failures(max_failures))
.unwrap_or(true);
if needs_ping {
self.try_ping(*node, PingReason::Lookup(*node, ctx))
} else {
self.find_node(node, ctx)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | true |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/discv4/src/table.rs | crates/net/discv4/src/table.rs | //! Additional support for tracking nodes.
use reth_network_peers::PeerId;
use std::{collections::HashMap, net::IpAddr, time::Instant};
/// Keeps track of nodes from which we have received a `Pong` message.
#[derive(Debug, Clone, Default)]
pub(crate) struct PongTable {
/// The nodes we have received a `Pong` from.
nodes: HashMap<NodeKey, Instant>,
}
impl PongTable {
/// Updates the timestamp we received a `Pong` from the given node.
pub(crate) fn on_pong(&mut self, remote_id: PeerId, remote_ip: IpAddr) {
let key = NodeKey { remote_id, remote_ip };
self.nodes.insert(key, Instant::now());
}
/// Returns the timestamp we received a `Pong` from the given node.
pub(crate) fn last_pong(&self, remote_id: PeerId, remote_ip: IpAddr) -> Option<Instant> {
self.nodes.get(&NodeKey { remote_id, remote_ip }).copied()
}
/// Removes all nodes from the table that have not sent a `Pong` for at least `timeout`.
pub(crate) fn evict_expired(&mut self, now: Instant, timeout: std::time::Duration) {
self.nodes.retain(|_, last_pong| now - *last_pong < timeout);
}
}
#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)]
pub(crate) struct NodeKey {
pub(crate) remote_id: PeerId,
pub(crate) remote_ip: IpAddr,
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/discv4/src/test_utils.rs | crates/net/discv4/src/test_utils.rs | //! Mock discovery support
// TODO(rand): update ::random calls after rand_09 migration
use crate::{
proto::{FindNode, Message, Neighbours, NodeEndpoint, Packet, Ping, Pong},
receive_loop, send_loop, Discv4, Discv4Config, Discv4Service, EgressSender, IngressEvent,
IngressReceiver, PeerId, SAFE_MAX_DATAGRAM_NEIGHBOUR_RECORDS,
};
use alloy_primitives::{hex, B256, B512};
use rand_08::{thread_rng, Rng, RngCore};
use reth_ethereum_forks::{ForkHash, ForkId};
use reth_network_peers::{pk2id, NodeRecord};
use secp256k1::{SecretKey, SECP256K1};
use std::{
collections::{HashMap, HashSet},
io,
net::{IpAddr, SocketAddr},
pin::Pin,
str::FromStr,
sync::Arc,
task::{Context, Poll},
time::{Duration, SystemTime, UNIX_EPOCH},
};
use tokio::{
net::UdpSocket,
sync::mpsc,
task::{JoinHandle, JoinSet},
};
use tokio_stream::{Stream, StreamExt};
use tracing::debug;
/// Mock discovery node
#[derive(Debug)]
pub struct MockDiscovery {
local_addr: SocketAddr,
local_enr: NodeRecord,
secret_key: SecretKey,
_udp: Arc<UdpSocket>,
_tasks: JoinSet<()>,
/// Receiver for incoming messages
ingress: IngressReceiver,
/// Sender for sending outgoing messages
egress: EgressSender,
pending_pongs: HashSet<PeerId>,
pending_neighbours: HashMap<PeerId, Vec<NodeRecord>>,
command_rx: mpsc::Receiver<MockCommand>,
}
impl MockDiscovery {
/// Creates a new instance and opens a socket
pub async fn new() -> io::Result<(Self, mpsc::Sender<MockCommand>)> {
let mut rng = thread_rng();
let socket = SocketAddr::from_str("0.0.0.0:0").unwrap();
let (secret_key, pk) = SECP256K1.generate_keypair(&mut rng);
let id = pk2id(&pk);
let socket = Arc::new(UdpSocket::bind(socket).await?);
let local_addr = socket.local_addr()?;
let local_enr = NodeRecord {
address: local_addr.ip(),
tcp_port: local_addr.port(),
udp_port: local_addr.port(),
id,
};
let (ingress_tx, ingress_rx) = mpsc::channel(128);
let (egress_tx, egress_rx) = mpsc::channel(128);
let mut tasks = JoinSet::<()>::new();
let udp = Arc::clone(&socket);
tasks.spawn(receive_loop(udp, ingress_tx, local_enr.id));
let udp = Arc::clone(&socket);
tasks.spawn(send_loop(udp, egress_rx));
let (tx, command_rx) = mpsc::channel(128);
let this = Self {
_tasks: tasks,
ingress: ingress_rx,
egress: egress_tx,
local_addr,
local_enr,
secret_key,
_udp: socket,
pending_pongs: Default::default(),
pending_neighbours: Default::default(),
command_rx,
};
Ok((this, tx))
}
/// Spawn and consume the stream.
pub fn spawn(self) -> JoinHandle<()> {
tokio::task::spawn(async move {
let _: Vec<_> = self.collect().await;
})
}
/// Queue a pending pong.
pub fn queue_pong(&mut self, from: PeerId) {
self.pending_pongs.insert(from);
}
/// Queue a pending Neighbours response.
pub fn queue_neighbours(&mut self, target: PeerId, nodes: Vec<NodeRecord>) {
self.pending_neighbours.insert(target, nodes);
}
/// Returns the local socket address associated with the service.
pub const fn local_addr(&self) -> SocketAddr {
self.local_addr
}
/// Returns the local [`NodeRecord`] associated with the service.
pub const fn local_enr(&self) -> NodeRecord {
self.local_enr
}
/// Encodes the packet, sends it and returns the hash.
fn send_packet(&self, msg: Message, to: SocketAddr) -> B256 {
let (payload, hash) = msg.encode(&self.secret_key);
let _ = self.egress.try_send((payload, to));
hash
}
fn send_neighbours_timeout(&self) -> u64 {
(SystemTime::now().duration_since(UNIX_EPOCH).unwrap() + Duration::from_secs(30)).as_secs()
}
}
impl Stream for MockDiscovery {
type Item = MockEvent;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = self.get_mut();
// process all incoming commands
while let Poll::Ready(maybe_cmd) = this.command_rx.poll_recv(cx) {
let Some(cmd) = maybe_cmd else { return Poll::Ready(None) };
match cmd {
MockCommand::MockPong { node_id } => {
this.queue_pong(node_id);
}
MockCommand::MockNeighbours { target, nodes } => {
this.queue_neighbours(target, nodes);
}
}
}
while let Poll::Ready(Some(event)) = this.ingress.poll_recv(cx) {
match event {
IngressEvent::RecvError(_) => {}
IngressEvent::BadPacket(from, err, data) => {
debug!(target: "discv4", ?from, %err, packet=?hex::encode(&data), "bad packet");
}
IngressEvent::Packet(remote_addr, Packet { msg, node_id, hash }) => match msg {
Message::Ping(ping) => {
if this.pending_pongs.remove(&node_id) {
let pong = Pong {
to: ping.from,
echo: hash,
expire: ping.expire,
enr_sq: None,
};
let msg = Message::Pong(pong.clone());
this.send_packet(msg, remote_addr);
return Poll::Ready(Some(MockEvent::Pong {
ping,
pong,
to: remote_addr,
}))
}
}
Message::Pong(_) | Message::Neighbours(_) => {}
Message::FindNode(msg) => {
if let Some(nodes) = this.pending_neighbours.remove(&msg.id) {
let msg = Message::Neighbours(Neighbours {
nodes: nodes.clone(),
expire: this.send_neighbours_timeout(),
});
this.send_packet(msg, remote_addr);
return Poll::Ready(Some(MockEvent::Neighbours {
nodes,
to: remote_addr,
}))
}
}
Message::EnrRequest(_) | Message::EnrResponse(_) => todo!(),
},
}
}
Poll::Pending
}
}
/// Represents the event types produced by the mock service.
#[derive(Debug)]
pub enum MockEvent {
/// A Pong event, consisting of the original Ping packet, the corresponding Pong packet,
/// and the recipient's socket address.
Pong {
/// The original Ping packet.
ping: Ping,
/// The corresponding Pong packet.
pong: Pong,
/// The recipient's socket address.
to: SocketAddr,
},
/// A Neighbours event, containing a list of node records and the recipient's socket address.
Neighbours {
/// The list of node records.
nodes: Vec<NodeRecord>,
/// The recipient's socket address.
to: SocketAddr,
},
}
/// Represents commands for interacting with the `MockDiscovery` service.
#[derive(Debug)]
pub enum MockCommand {
/// A command to simulate a Pong event, including the node ID of the recipient.
MockPong {
/// The node ID of the recipient.
node_id: PeerId,
},
/// A command to simulate a Neighbours event, including the target node ID and a list of node
/// records.
MockNeighbours {
/// The target node ID.
target: PeerId,
/// The list of node records.
nodes: Vec<NodeRecord>,
},
}
/// Creates a new testing instance for [`Discv4`] and its service
pub async fn create_discv4() -> (Discv4, Discv4Service) {
let fork_id = ForkId { hash: ForkHash(hex!("743f3d89")), next: 16191202 };
create_discv4_with_config(Discv4Config::builder().add_eip868_pair("eth", fork_id).build()).await
}
/// Creates a new testing instance for [`Discv4`] and its service with the given config.
pub async fn create_discv4_with_config(config: Discv4Config) -> (Discv4, Discv4Service) {
let mut rng = thread_rng();
let socket = SocketAddr::from_str("0.0.0.0:0").unwrap();
let (secret_key, pk) = SECP256K1.generate_keypair(&mut rng);
let id = pk2id(&pk);
let local_enr =
NodeRecord { address: socket.ip(), tcp_port: socket.port(), udp_port: socket.port(), id };
Discv4::bind(socket, local_enr, secret_key, config).await.unwrap()
}
/// Generates a random [`NodeEndpoint`] using the provided random number generator.
pub fn rng_endpoint(rng: &mut impl Rng) -> NodeEndpoint {
let address = if rng.r#gen() {
let mut ip = [0u8; 4];
rng.fill_bytes(&mut ip);
IpAddr::V4(ip.into())
} else {
let mut ip = [0u8; 16];
rng.fill_bytes(&mut ip);
IpAddr::V6(ip.into())
};
NodeEndpoint { address, tcp_port: rng.r#gen(), udp_port: rng.r#gen() }
}
/// Generates a random [`NodeRecord`] using the provided random number generator.
pub fn rng_record(rng: &mut impl RngCore) -> NodeRecord {
let NodeEndpoint { address, udp_port, tcp_port } = rng_endpoint(rng);
// TODO(rand)
NodeRecord { address, tcp_port, udp_port, id: B512::random() }
}
/// Generates a random IPv6 [`NodeRecord`] using the provided random number generator.
pub fn rng_ipv6_record(rng: &mut impl RngCore) -> NodeRecord {
let mut ip = [0u8; 16];
rng.fill_bytes(&mut ip);
let address = IpAddr::V6(ip.into());
// TODO(rand)
NodeRecord { address, tcp_port: rng.r#gen(), udp_port: rng.r#gen(), id: B512::random() }
}
/// Generates a random IPv4 [`NodeRecord`] using the provided random number generator.
pub fn rng_ipv4_record(rng: &mut impl RngCore) -> NodeRecord {
let mut ip = [0u8; 4];
rng.fill_bytes(&mut ip);
let address = IpAddr::V4(ip.into());
// TODO(rand)
NodeRecord { address, tcp_port: rng.r#gen(), udp_port: rng.r#gen(), id: B512::random() }
}
/// Generates a random [`Message`] using the provided random number generator.
pub fn rng_message(rng: &mut impl RngCore) -> Message {
match rng.gen_range(1..=4) {
1 => Message::Ping(Ping {
from: rng_endpoint(rng),
to: rng_endpoint(rng),
expire: rng.r#gen(),
enr_sq: None,
}),
2 => Message::Pong(Pong {
to: rng_endpoint(rng),
echo: B256::random(),
expire: rng.r#gen(),
enr_sq: None,
}),
3 => Message::FindNode(FindNode { id: B512::random(), expire: rng.r#gen() }),
4 => {
let num: usize = rng.gen_range(1..=SAFE_MAX_DATAGRAM_NEIGHBOUR_RECORDS);
Message::Neighbours(Neighbours {
nodes: std::iter::repeat_with(|| rng_record(rng)).take(num).collect(),
expire: rng.r#gen(),
})
}
_ => unreachable!(),
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::Discv4Event;
use std::net::Ipv4Addr;
/// This test creates two local UDP sockets. The mocked discovery service responds to specific
/// messages and we check the actual service receives answers
#[tokio::test]
async fn can_mock_discovery() {
reth_tracing::init_test_tracing();
let mut rng = thread_rng();
let (_, mut service) = create_discv4().await;
let (mut mockv4, _cmd) = MockDiscovery::new().await.unwrap();
let mock_enr = mockv4.local_enr();
// we only want to test internally
service.local_enr_mut().address = IpAddr::V4(Ipv4Addr::UNSPECIFIED);
let discv_addr = service.local_addr();
let discv_enr = service.local_enr();
// make sure it responds with a Pong
mockv4.queue_pong(discv_enr.id);
// This sends a ping to the mock service
service.add_node(mock_enr);
// process the mock pong
let event = mockv4.next().await.unwrap();
match event {
MockEvent::Pong { ping: _, pong: _, to } => {
assert_eq!(to, SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), discv_addr.port()));
}
MockEvent::Neighbours { .. } => {
unreachable!("invalid response")
}
}
// discovery service received mocked pong
let event = service.next().await.unwrap();
assert_eq!(event, Discv4Event::Pong);
assert!(service.contains_node(mock_enr.id));
let mock_nodes =
std::iter::repeat_with(|| rng_record(&mut rng)).take(5).collect::<Vec<_>>();
mockv4.queue_neighbours(discv_enr.id, mock_nodes.clone());
// start lookup
service.lookup_self();
let event = mockv4.next().await.unwrap();
match event {
MockEvent::Pong { .. } => {
unreachable!("invalid response")
}
MockEvent::Neighbours { nodes, to } => {
assert_eq!(to, SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), discv_addr.port()));
assert_eq!(nodes, mock_nodes);
}
}
// discovery service received mocked pong
let event = service.next().await.unwrap();
assert_eq!(event, Discv4Event::Neighbours);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/discv4/src/error.rs | crates/net/discv4/src/error.rs | //! Error types that can occur in this crate.
use tokio::sync::{mpsc::error::SendError, oneshot::error::RecvError};
/// Error thrown when decoding a UDP packet.
#[derive(Debug, thiserror::Error)]
pub enum DecodePacketError {
/// Failed to RLP decode the packet.
#[error("failed to rlp decode: {0}")]
/// Indicates a failure to RLP decode the packet.
Rlp(#[from] alloy_rlp::Error),
/// Received packet length is too short.
#[error("received packet length is too short")]
/// Indicates the received packet length is insufficient.
PacketTooShort,
/// Header/data hash mismatch.
#[error("header/data hash mismatch")]
/// Indicates a mismatch between header and data hashes.
HashMismatch,
/// Unsupported message ID.
#[error("message ID {0} is not supported")]
/// Indicates an unsupported message ID.
UnknownMessage(u8),
/// Failed to recover public key.
#[error("failed to recover public key: {0}")]
/// Indicates a failure to recover the public key.
Secp256k1(#[from] secp256k1::Error),
}
/// High level errors that can occur when interacting with the discovery service
#[derive(Debug, thiserror::Error)]
pub enum Discv4Error {
/// Failed to send a command over the channel
#[error("failed to send on a closed channel")]
Send,
/// Failed to receive a command response
#[error(transparent)]
Receive(#[from] RecvError),
}
impl<T> From<SendError<T>> for Discv4Error {
fn from(_: SendError<T>) -> Self {
Self::Send
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/discv4/src/proto.rs | crates/net/discv4/src/proto.rs | //! Discovery v4 protocol implementation.
use crate::{error::DecodePacketError, MAX_PACKET_SIZE, MIN_PACKET_SIZE};
use alloy_primitives::{
bytes::{Buf, BufMut, Bytes, BytesMut},
keccak256, B256,
};
use alloy_rlp::{
Decodable, Encodable, Error as RlpError, Header, RlpDecodable, RlpEncodable,
RlpEncodableWrapper,
};
use enr::Enr;
use reth_ethereum_forks::{EnrForkIdEntry, ForkId};
use reth_network_peers::{pk2id, NodeRecord, PeerId};
use secp256k1::{
ecdsa::{RecoverableSignature, RecoveryId},
SecretKey, SECP256K1,
};
use std::net::{IpAddr, Ipv4Addr};
// Note: this is adapted from https://github.com/vorot93/discv4
/// Represents the identifier for message variants.
///
/// This enumeration assigns unique identifiers (u8 values) to different message types.
#[derive(Debug)]
#[repr(u8)]
pub enum MessageId {
/// Ping message identifier.
Ping = 1,
/// Pong message identifier.
Pong = 2,
/// Find node message identifier.
FindNode = 3,
/// Neighbours message identifier.
Neighbours = 4,
/// ENR request message identifier.
EnrRequest = 5,
/// ENR response message identifier.
EnrResponse = 6,
}
impl MessageId {
/// Converts the byte that represents the message id to the enum.
const fn from_u8(msg: u8) -> Result<Self, u8> {
Ok(match msg {
1 => Self::Ping,
2 => Self::Pong,
3 => Self::FindNode,
4 => Self::Neighbours,
5 => Self::EnrRequest,
6 => Self::EnrResponse,
_ => return Err(msg),
})
}
}
/// Enum representing various message types exchanged in the Discovery v4 protocol.
#[derive(Debug, Eq, PartialEq)]
pub enum Message {
/// Represents a ping message sent during liveness checks.
Ping(Ping),
/// Represents a pong message, which is a reply to a PING message.
Pong(Pong),
/// Represents a query for nodes in the given bucket.
FindNode(FindNode),
/// Represents a neighbour message, providing information about nearby nodes.
Neighbours(Neighbours),
/// Represents an ENR request message, a request for Ethereum Node Records (ENR) as per [EIP-778](https://eips.ethereum.org/EIPS/eip-778).
EnrRequest(EnrRequest),
/// Represents an ENR response message, a response to an ENR request with Ethereum Node Records (ENR) as per [EIP-778](https://eips.ethereum.org/EIPS/eip-778).
EnrResponse(EnrResponse),
}
// === impl Message ===
impl Message {
/// Returns the id for this type
pub const fn msg_type(&self) -> MessageId {
match self {
Self::Ping(_) => MessageId::Ping,
Self::Pong(_) => MessageId::Pong,
Self::FindNode(_) => MessageId::FindNode,
Self::Neighbours(_) => MessageId::Neighbours,
Self::EnrRequest(_) => MessageId::EnrRequest,
Self::EnrResponse(_) => MessageId::EnrResponse,
}
}
/// Encodes the UDP datagram, See <https://github.com/ethereum/devp2p/blob/master/discv4.md#wire-protocol>
///
/// The datagram is `header || payload`
/// where header is `hash || signature || packet-type`
pub fn encode(&self, secret_key: &SecretKey) -> (Bytes, B256) {
// allocate max packet size
let mut datagram = BytesMut::with_capacity(MAX_PACKET_SIZE);
// since signature has fixed len, we can split and fill the datagram buffer at fixed
// positions, this way we can encode the message directly in the datagram buffer
let mut sig_bytes = datagram.split_off(B256::len_bytes());
let mut payload = sig_bytes.split_off(secp256k1::constants::COMPACT_SIGNATURE_SIZE + 1);
// Put the message type at the beginning of the payload
payload.put_u8(self.msg_type() as u8);
// Match the message type and encode the corresponding message into the payload
match self {
Self::Ping(message) => message.encode(&mut payload),
Self::Pong(message) => message.encode(&mut payload),
Self::FindNode(message) => message.encode(&mut payload),
Self::Neighbours(message) => message.encode(&mut payload),
Self::EnrRequest(message) => message.encode(&mut payload),
Self::EnrResponse(message) => message.encode(&mut payload),
}
// Sign the payload with the secret key using recoverable ECDSA
let signature: RecoverableSignature = SECP256K1.sign_ecdsa_recoverable(
&secp256k1::Message::from_digest(keccak256(&payload).0),
secret_key,
);
// Serialize the signature and append it to the signature bytes
let (rec, sig) = signature.serialize_compact();
sig_bytes.extend_from_slice(&sig);
sig_bytes.put_u8(i32::from(rec) as u8);
sig_bytes.unsplit(payload);
// Calculate the hash of the signature bytes and append it to the datagram
let hash = keccak256(&sig_bytes);
datagram.extend_from_slice(hash.as_slice());
// Append the signature bytes to the datagram
datagram.unsplit(sig_bytes);
// Return the frozen datagram and the hash
(datagram.freeze(), hash)
}
/// Decodes the [`Message`] from the given buffer.
///
/// Returns the decoded message and the public key of the sender.
pub fn decode(packet: &[u8]) -> Result<Packet, DecodePacketError> {
if packet.len() < MIN_PACKET_SIZE {
return Err(DecodePacketError::PacketTooShort)
}
// parses the wire-protocol, every packet starts with a header:
// packet-header = hash || signature || packet-type
// hash = keccak256(signature || packet-type || packet-data)
// signature = sign(packet-type || packet-data)
let header_hash = keccak256(&packet[32..]);
let data_hash = B256::from_slice(&packet[..32]);
if data_hash != header_hash {
return Err(DecodePacketError::HashMismatch)
}
let signature = &packet[32..96];
let recovery_id = RecoveryId::try_from(packet[96] as i32)?;
let recoverable_sig = RecoverableSignature::from_compact(signature, recovery_id)?;
// recover the public key
let msg = secp256k1::Message::from_digest(keccak256(&packet[97..]).0);
let pk = SECP256K1.recover_ecdsa(&msg, &recoverable_sig)?;
let node_id = pk2id(&pk);
let msg_type = packet[97];
let payload = &mut &packet[98..];
let msg = match MessageId::from_u8(msg_type).map_err(DecodePacketError::UnknownMessage)? {
MessageId::Ping => Self::Ping(Ping::decode(payload)?),
MessageId::Pong => Self::Pong(Pong::decode(payload)?),
MessageId::FindNode => Self::FindNode(FindNode::decode(payload)?),
MessageId::Neighbours => Self::Neighbours(Neighbours::decode(payload)?),
MessageId::EnrRequest => Self::EnrRequest(EnrRequest::decode(payload)?),
MessageId::EnrResponse => Self::EnrResponse(EnrResponse::decode(payload)?),
};
Ok(Packet { msg, node_id, hash: header_hash })
}
}
/// Represents a decoded packet.
///
/// This struct holds information about a decoded packet, including the message, node ID, and hash.
#[derive(Debug)]
pub struct Packet {
/// The decoded message from the packet.
pub msg: Message,
/// The ID of the peer that sent the packet.
pub node_id: PeerId,
/// The hash of the packet.
pub hash: B256,
}
/// Represents the `from` field in the `Ping` packet
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, RlpEncodableWrapper)]
struct PingNodeEndpoint(NodeEndpoint);
impl alloy_rlp::Decodable for PingNodeEndpoint {
#[inline]
fn decode(b: &mut &[u8]) -> alloy_rlp::Result<Self> {
let alloy_rlp::Header { list, payload_length } = alloy_rlp::Header::decode(b)?;
if !list {
return Err(alloy_rlp::Error::UnexpectedString);
}
let started_len = b.len();
if started_len < payload_length {
return Err(alloy_rlp::Error::InputTooShort);
}
// Geth allows the ipaddr to be possibly empty:
// <https://github.com/ethereum/go-ethereum/blob/380688c636a654becc8f114438c2a5d93d2db032/p2p/discover/v4_udp.go#L206-L209>
// <https://github.com/ethereum/go-ethereum/blob/380688c636a654becc8f114438c2a5d93d2db032/p2p/enode/node.go#L189-L189>
//
// Therefore, if we see an empty list instead of a properly formed `IpAddr`, we will
// instead use `IpV4Addr::UNSPECIFIED`
let address =
if *b.first().ok_or(alloy_rlp::Error::InputTooShort)? == alloy_rlp::EMPTY_STRING_CODE {
let addr = IpAddr::V4(Ipv4Addr::UNSPECIFIED);
b.advance(1);
addr
} else {
alloy_rlp::Decodable::decode(b)?
};
let this = NodeEndpoint {
address,
udp_port: alloy_rlp::Decodable::decode(b)?,
tcp_port: alloy_rlp::Decodable::decode(b)?,
};
let consumed = started_len - b.len();
if consumed != payload_length {
return Err(alloy_rlp::Error::ListLengthMismatch {
expected: payload_length,
got: consumed,
});
}
Ok(Self(this))
}
}
/// Represents the `from`, `to` fields in the packets
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, RlpEncodable, RlpDecodable)]
pub struct NodeEndpoint {
/// The IP address of the network endpoint. It can be either IPv4 or IPv6.
pub address: IpAddr,
/// The UDP port used for communication in the discovery protocol.
pub udp_port: u16,
/// The TCP port used for communication in the `RLPx` protocol.
pub tcp_port: u16,
}
impl From<NodeRecord> for NodeEndpoint {
fn from(NodeRecord { address, tcp_port, udp_port, .. }: NodeRecord) -> Self {
Self { address, tcp_port, udp_port }
}
}
impl NodeEndpoint {
/// Creates a new [`NodeEndpoint`] from a given UDP address and TCP port.
pub const fn from_udp_address(udp_address: &std::net::SocketAddr, tcp_port: u16) -> Self {
Self { address: udp_address.ip(), udp_port: udp_address.port(), tcp_port }
}
}
/// A [FindNode packet](https://github.com/ethereum/devp2p/blob/master/discv4.md#findnode-packet-0x03).
#[derive(Clone, Copy, Debug, Eq, PartialEq, RlpEncodable)]
pub struct FindNode {
/// The target node's ID, a 64-byte secp256k1 public key.
pub id: PeerId,
/// The expiration timestamp of the packet, an absolute UNIX time stamp.
pub expire: u64,
}
impl Decodable for FindNode {
// NOTE(onbjerg): Manual implementation to satisfy EIP-8.
//
// See https://eips.ethereum.org/EIPS/eip-8
fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
let b = &mut &**buf;
let rlp_head = Header::decode(b)?;
if !rlp_head.list {
return Err(RlpError::UnexpectedString)
}
let started_len = b.len();
let this = Self { id: Decodable::decode(b)?, expire: Decodable::decode(b)? };
// NOTE(onbjerg): Because of EIP-8, we only check that we did not consume *more* than the
// payload length, i.e. it is ok if payload length is greater than what we consumed, as we
// just discard the remaining list items
let consumed = started_len - b.len();
if consumed > rlp_head.payload_length {
return Err(RlpError::ListLengthMismatch {
expected: rlp_head.payload_length,
got: consumed,
})
}
let rem = rlp_head.payload_length - consumed;
b.advance(rem);
*buf = *b;
Ok(this)
}
}
/// A [Neighbours packet](https://github.com/ethereum/devp2p/blob/master/discv4.md#neighbors-packet-0x04).
#[derive(Clone, Debug, Eq, PartialEq, RlpEncodable)]
pub struct Neighbours {
/// The list of nodes containing IP, UDP port, TCP port, and node ID.
pub nodes: Vec<NodeRecord>,
/// The expiration timestamp of the packet, an absolute UNIX time stamp.
pub expire: u64,
}
impl Decodable for Neighbours {
// NOTE(onbjerg): Manual implementation to satisfy EIP-8.
//
// See https://eips.ethereum.org/EIPS/eip-8
fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
let b = &mut &**buf;
let rlp_head = Header::decode(b)?;
if !rlp_head.list {
return Err(RlpError::UnexpectedString)
}
let started_len = b.len();
let this = Self { nodes: Decodable::decode(b)?, expire: Decodable::decode(b)? };
// NOTE(onbjerg): Because of EIP-8, we only check that we did not consume *more* than the
// payload length, i.e. it is ok if payload length is greater than what we consumed, as we
// just discard the remaining list items
let consumed = started_len - b.len();
if consumed > rlp_head.payload_length {
return Err(RlpError::ListLengthMismatch {
expected: rlp_head.payload_length,
got: consumed,
})
}
let rem = rlp_head.payload_length - consumed;
b.advance(rem);
*buf = *b;
Ok(this)
}
}
/// A [ENRRequest packet](https://github.com/ethereum/devp2p/blob/master/discv4.md#enrrequest-packet-0x05).
///
/// This packet is used to request the current version of a node's Ethereum Node Record (ENR).
#[derive(Clone, Copy, Debug, Eq, PartialEq, RlpEncodable)]
pub struct EnrRequest {
/// The expiration timestamp for the request. No reply should be sent if it refers to a time in
/// the past.
pub expire: u64,
}
impl Decodable for EnrRequest {
// NOTE(onbjerg): Manual implementation to satisfy EIP-8.
//
// See https://eips.ethereum.org/EIPS/eip-8
fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
let b = &mut &**buf;
let rlp_head = Header::decode(b)?;
if !rlp_head.list {
return Err(RlpError::UnexpectedString)
}
let started_len = b.len();
let this = Self { expire: Decodable::decode(b)? };
// NOTE(onbjerg): Because of EIP-8, we only check that we did not consume *more* than the
// payload length, i.e. it is ok if payload length is greater than what we consumed, as we
// just discard the remaining list items
let consumed = started_len - b.len();
if consumed > rlp_head.payload_length {
return Err(RlpError::ListLengthMismatch {
expected: rlp_head.payload_length,
got: consumed,
})
}
let rem = rlp_head.payload_length - consumed;
b.advance(rem);
*buf = *b;
Ok(this)
}
}
/// A [ENRResponse packet](https://github.com/ethereum/devp2p/blob/master/discv4.md#enrresponse-packet-0x06).
///
/// This packet is used to respond to an `ENRRequest` packet and includes the requested ENR along
/// with the hash of the original request.
#[derive(Clone, Debug, Eq, PartialEq, RlpEncodable, RlpDecodable)]
pub struct EnrResponse {
/// The hash of the `ENRRequest` packet being replied to.
pub request_hash: B256,
/// The ENR (Ethereum Node Record) for the responding node.
pub enr: Enr<SecretKey>,
}
// === impl EnrResponse ===
impl EnrResponse {
/// Returns the [`ForkId`] if set
///
/// See also <https://github.com/ethereum/go-ethereum/blob/9244d5cd61f3ea5a7645fdf2a1a96d53421e412f/eth/protocols/eth/discovery.go#L36>
pub fn eth_fork_id(&self) -> Option<ForkId> {
let mut maybe_fork_id = self.enr.get_raw_rlp(b"eth")?;
EnrForkIdEntry::decode(&mut maybe_fork_id).ok().map(Into::into)
}
}
/// Represents a Ping packet.
///
/// A [Ping packet](https://github.com/ethereum/devp2p/blob/master/discv4.md#ping-packet-0x01).
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct Ping {
/// The sender's endpoint.
pub from: NodeEndpoint,
/// The recipient's endpoint.
pub to: NodeEndpoint,
/// The expiration timestamp.
pub expire: u64,
/// Optional `enr_seq` for <https://eips.ethereum.org/EIPS/eip-868>
pub enr_sq: Option<u64>,
}
impl Encodable for Ping {
fn encode(&self, out: &mut dyn BufMut) {
#[derive(RlpEncodable)]
struct V4PingMessage<'a> {
version: u32,
from: &'a NodeEndpoint,
to: &'a NodeEndpoint,
expire: u64,
}
#[derive(RlpEncodable)]
struct V4PingMessageEIP868<'a> {
version: u32,
from: &'a NodeEndpoint,
to: &'a NodeEndpoint,
expire: u64,
enr_seq: u64,
}
if let Some(enr_seq) = self.enr_sq {
V4PingMessageEIP868 {
version: 4, // version 4
from: &self.from,
to: &self.to,
expire: self.expire,
enr_seq,
}
.encode(out);
} else {
V4PingMessage {
version: 4, // version 4
from: &self.from,
to: &self.to,
expire: self.expire,
}
.encode(out);
}
}
}
impl Decodable for Ping {
fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
let b = &mut &**buf;
let rlp_head = Header::decode(b)?;
if !rlp_head.list {
return Err(RlpError::UnexpectedString)
}
let started_len = b.len();
// > Implementations should ignore any mismatches in version:
// <https://github.com/ethereum/devp2p/blob/master/discv4.md#ping-packet-0x01>
let _version = u32::decode(b)?;
// see `Decodable` implementation in `PingNodeEndpoint` for why this is needed
let from = PingNodeEndpoint::decode(b)?.0;
let mut this =
Self { from, to: Decodable::decode(b)?, expire: Decodable::decode(b)?, enr_sq: None };
// only decode the ENR sequence if there's more data in the datagram to decode else skip
if b.has_remaining() {
this.enr_sq = Some(Decodable::decode(b)?);
}
let consumed = started_len - b.len();
if consumed > rlp_head.payload_length {
return Err(RlpError::ListLengthMismatch {
expected: rlp_head.payload_length,
got: consumed,
})
}
let rem = rlp_head.payload_length - consumed;
b.advance(rem);
*buf = *b;
Ok(this)
}
}
/// Represents a Pong packet.
///
/// A [Pong packet](https://github.com/ethereum/devp2p/blob/master/discv4.md#pong-packet-0x02).
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct Pong {
/// The recipient's endpoint.
pub to: NodeEndpoint,
/// The hash of the corresponding ping packet.
pub echo: B256,
/// The expiration timestamp.
pub expire: u64,
/// Optional `enr_seq` for <https://eips.ethereum.org/EIPS/eip-868>
pub enr_sq: Option<u64>,
}
impl Encodable for Pong {
fn encode(&self, out: &mut dyn BufMut) {
#[derive(RlpEncodable)]
struct PongMessageEIP868<'a> {
to: &'a NodeEndpoint,
echo: &'a B256,
expire: u64,
enr_seq: u64,
}
#[derive(RlpEncodable)]
struct PongMessage<'a> {
to: &'a NodeEndpoint,
echo: &'a B256,
expire: u64,
}
if let Some(enr_seq) = self.enr_sq {
PongMessageEIP868 { to: &self.to, echo: &self.echo, expire: self.expire, enr_seq }
.encode(out);
} else {
PongMessage { to: &self.to, echo: &self.echo, expire: self.expire }.encode(out);
}
}
}
impl Decodable for Pong {
fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
let b = &mut &**buf;
let rlp_head = Header::decode(b)?;
if !rlp_head.list {
return Err(RlpError::UnexpectedString)
}
let started_len = b.len();
let mut this = Self {
to: Decodable::decode(b)?,
echo: Decodable::decode(b)?,
expire: Decodable::decode(b)?,
enr_sq: None,
};
// only decode the ENR sequence if there's more data in the datagram to decode else skip
if b.has_remaining() {
this.enr_sq = Some(Decodable::decode(b)?);
}
let consumed = started_len - b.len();
if consumed > rlp_head.payload_length {
return Err(RlpError::ListLengthMismatch {
expected: rlp_head.payload_length,
got: consumed,
})
}
let rem = rlp_head.payload_length - consumed;
b.advance(rem);
*buf = *b;
Ok(this)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
test_utils::{rng_endpoint, rng_ipv4_record, rng_ipv6_record, rng_message},
DEFAULT_DISCOVERY_PORT, SAFE_MAX_DATAGRAM_NEIGHBOUR_RECORDS,
};
use alloy_primitives::hex;
use assert_matches::assert_matches;
use enr::EnrPublicKey;
use rand_08::{thread_rng as rng, Rng, RngCore};
use reth_ethereum_forks::ForkHash;
#[test]
fn test_endpoint_ipv_v4() {
let mut rng = rng();
for _ in 0..100 {
let mut ip = [0u8; 4];
rng.fill_bytes(&mut ip);
let msg = NodeEndpoint {
address: IpAddr::V4(ip.into()),
tcp_port: rng.r#gen(),
udp_port: rng.r#gen(),
};
let decoded = NodeEndpoint::decode(&mut alloy_rlp::encode(msg).as_slice()).unwrap();
assert_eq!(msg, decoded);
}
}
#[test]
fn test_endpoint_ipv_64() {
let mut rng = rng();
for _ in 0..100 {
let mut ip = [0u8; 16];
rng.fill_bytes(&mut ip);
let msg = NodeEndpoint {
address: IpAddr::V6(ip.into()),
tcp_port: rng.r#gen(),
udp_port: rng.r#gen(),
};
let decoded = NodeEndpoint::decode(&mut alloy_rlp::encode(msg).as_slice()).unwrap();
assert_eq!(msg, decoded);
}
}
#[test]
fn test_ping_message() {
let mut rng = rng();
for _ in 0..100 {
let mut ip = [0u8; 16];
rng.fill_bytes(&mut ip);
let msg = Ping {
from: rng_endpoint(&mut rng),
to: rng_endpoint(&mut rng),
expire: 0,
enr_sq: None,
};
let decoded = Ping::decode(&mut alloy_rlp::encode(&msg).as_slice()).unwrap();
assert_eq!(msg, decoded);
}
}
#[test]
fn test_ping_message_with_enr() {
let mut rng = rng();
for _ in 0..100 {
let mut ip = [0u8; 16];
rng.fill_bytes(&mut ip);
let msg = Ping {
from: rng_endpoint(&mut rng),
to: rng_endpoint(&mut rng),
expire: 0,
enr_sq: Some(rng.r#gen()),
};
let decoded = Ping::decode(&mut alloy_rlp::encode(&msg).as_slice()).unwrap();
assert_eq!(msg, decoded);
}
}
#[test]
fn test_pong_message() {
let mut rng = rng();
for _ in 0..100 {
let mut ip = [0u8; 16];
rng.fill_bytes(&mut ip);
let msg = Pong {
to: rng_endpoint(&mut rng),
echo: B256::random(),
expire: rng.r#gen(),
enr_sq: None,
};
let decoded = Pong::decode(&mut alloy_rlp::encode(&msg).as_slice()).unwrap();
assert_eq!(msg, decoded);
}
}
#[test]
fn test_pong_message_with_enr() {
let mut rng = rng();
for _ in 0..100 {
let mut ip = [0u8; 16];
rng.fill_bytes(&mut ip);
let msg = Pong {
to: rng_endpoint(&mut rng),
echo: B256::random(),
expire: rng.r#gen(),
enr_sq: Some(rng.r#gen()),
};
let decoded = Pong::decode(&mut alloy_rlp::encode(&msg).as_slice()).unwrap();
assert_eq!(msg, decoded);
}
}
#[test]
fn test_hash_mismatch() {
let mut rng = rng();
let msg = rng_message(&mut rng);
let (secret_key, _) = SECP256K1.generate_keypair(&mut rng);
let (buf, _) = msg.encode(&secret_key);
let mut buf_vec = buf.to_vec();
buf_vec.push(0);
match Message::decode(buf_vec.as_slice()).unwrap_err() {
DecodePacketError::HashMismatch => {}
err => {
unreachable!("unexpected err {}", err)
}
}
}
#[test]
fn neighbours_max_ipv4() {
let mut rng = rng();
let msg = Message::Neighbours(Neighbours {
nodes: std::iter::repeat_with(|| rng_ipv4_record(&mut rng)).take(16).collect(),
expire: rng.r#gen(),
});
let (secret_key, _) = SECP256K1.generate_keypair(&mut rng);
let (encoded, _) = msg.encode(&secret_key);
// Assert that 16 nodes never fit into one packet
assert!(encoded.len() > MAX_PACKET_SIZE, "{} {msg:?}", encoded.len());
}
#[test]
fn neighbours_max_nodes() {
let mut rng = rng();
for _ in 0..1000 {
let msg = Message::Neighbours(Neighbours {
nodes: std::iter::repeat_with(|| rng_ipv6_record(&mut rng))
.take(SAFE_MAX_DATAGRAM_NEIGHBOUR_RECORDS)
.collect(),
expire: rng.r#gen(),
});
let (secret_key, _) = SECP256K1.generate_keypair(&mut rng);
let (encoded, _) = msg.encode(&secret_key);
assert!(encoded.len() <= MAX_PACKET_SIZE, "{} {msg:?}", encoded.len());
let mut neighbours = Neighbours {
nodes: std::iter::repeat_with(|| rng_ipv6_record(&mut rng))
.take(SAFE_MAX_DATAGRAM_NEIGHBOUR_RECORDS - 1)
.collect(),
expire: rng.r#gen(),
};
neighbours.nodes.push(rng_ipv4_record(&mut rng));
let msg = Message::Neighbours(neighbours);
let (encoded, _) = msg.encode(&secret_key);
assert!(encoded.len() <= MAX_PACKET_SIZE, "{} {msg:?}", encoded.len());
}
}
#[test]
fn test_encode_decode_message() {
let mut rng = rng();
for _ in 0..100 {
let msg = rng_message(&mut rng);
let (secret_key, pk) = SECP256K1.generate_keypair(&mut rng);
let sender_id = pk2id(&pk);
let (buf, _) = msg.encode(&secret_key);
let packet = Message::decode(buf.as_ref()).unwrap();
assert_eq!(msg, packet.msg);
assert_eq!(sender_id, packet.node_id);
}
}
#[test]
fn decode_pong_packet() {
let packet = "2ad84c37327a06c2522cf7bc039621da89f68907441b755935bb308dc4cd17d6fe550e90329ad6a516ca7db18e08900067928a0dfa3b5c75d55a42c984497373698d98616662c048983ea85895ea2da765eabeb15525478384e106337bfd8ed50002f3c9843ed8cae682fd1c80a008ad4dead0922211df47593e7d837b2b23d13954285871ca23250ea594993ded84635690e5829670";
let data = hex::decode(packet).unwrap();
Message::decode(&data).unwrap();
}
#[test]
fn decode_ping_packet() {
let packet = "05ae5bf922cf2a93f97632a4ab0943dc252a0dab0c42d86dd62e5d91e1a0966e9b628fbf4763fdfbb928540460b797e6be2e7058a82f6083f6d2e7391bb021741459976d4152aa16bbee0c3609dcfac6668db1ef78b7ee9f8b4ced10dd5ae2900101df04cb8403d12d4f82765f82765fc9843ed8cae6828aa6808463569916829670";
let data = hex::decode(packet).unwrap();
Message::decode(&data).unwrap();
}
#[test]
fn encode_decode_enr_msg() {
use alloy_rlp::Decodable;
use enr::secp256k1::SecretKey;
use std::net::Ipv4Addr;
let mut rng = rand_08::rngs::OsRng;
let key = SecretKey::new(&mut rng);
let ip = Ipv4Addr::new(127, 0, 0, 1);
let tcp = 3000;
let fork_id: ForkId = ForkId { hash: ForkHash([220, 233, 108, 45]), next: 0u64 };
let enr = {
let mut builder = Enr::builder();
builder.ip(ip.into());
builder.tcp4(tcp);
let mut buf = Vec::new();
let forkentry = EnrForkIdEntry { fork_id };
forkentry.encode(&mut buf);
builder.add_value_rlp("eth", buf.into());
builder.build(&key).unwrap()
};
let enr_response = EnrResponse { request_hash: B256::random(), enr };
let mut buf = Vec::new();
enr_response.encode(&mut buf);
let decoded = EnrResponse::decode(&mut &buf[..]).unwrap();
let fork_id_decoded = decoded.eth_fork_id().unwrap();
assert_eq!(fork_id, fork_id_decoded);
}
// test vector from the enr library rlp encoding tests
// <https://github.com/sigp/enr/blob/e59dcb45ea07e423a7091d2a6ede4ad6d8ef2840/src/lib.rs#L1019>
#[test]
fn encode_known_rlp_enr() {
use alloy_rlp::Decodable;
use enr::{secp256k1::SecretKey, EnrPublicKey};
use std::net::Ipv4Addr;
let valid_record = hex!(
"f884b8407098ad865b00a582051940cb9cf36836572411a47278783077011599ed5cd16b76f2635f4e234738f30813a89eb9137e3e3df5266e3a1f11df72ecf1145ccb9c01826964827634826970847f00000189736563703235366b31a103ca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd31388375647082765f"
);
let signature = hex!(
"7098ad865b00a582051940cb9cf36836572411a47278783077011599ed5cd16b76f2635f4e234738f30813a89eb9137e3e3df5266e3a1f11df72ecf1145ccb9c"
);
let expected_pubkey =
hex!("03ca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd3138");
let enr = Enr::<SecretKey>::decode(&mut &valid_record[..]).unwrap();
let pubkey = enr.public_key().encode();
assert_eq!(enr.ip4(), Some(Ipv4Addr::new(127, 0, 0, 1)));
assert_eq!(enr.id(), Some(String::from("v4")));
assert_eq!(enr.udp4(), Some(DEFAULT_DISCOVERY_PORT));
assert_eq!(enr.tcp4(), None);
assert_eq!(enr.signature(), &signature[..]);
assert_eq!(pubkey.to_vec(), expected_pubkey);
assert!(enr.verify());
assert_eq!(&alloy_rlp::encode(&enr)[..], &valid_record[..]);
// ensure the length is equal
assert_eq!(enr.length(), valid_record.len());
}
// test vector from the enr library rlp encoding tests
// <https://github.com/sigp/enr/blob/e59dcb45ea07e423a7091d2a6ede4ad6d8ef2840/src/lib.rs#L1019>
#[test]
fn decode_enr_rlp() {
use enr::secp256k1::SecretKey;
use std::net::Ipv4Addr;
let valid_record = hex!(
"f884b8407098ad865b00a582051940cb9cf36836572411a47278783077011599ed5cd16b76f2635f4e234738f30813a89eb9137e3e3df5266e3a1f11df72ecf1145ccb9c01826964827634826970847f00000189736563703235366b31a103ca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd31388375647082765f"
);
let signature = hex!(
"7098ad865b00a582051940cb9cf36836572411a47278783077011599ed5cd16b76f2635f4e234738f30813a89eb9137e3e3df5266e3a1f11df72ecf1145ccb9c"
);
let expected_pubkey =
hex!("03ca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd3138");
let mut valid_record_buf = valid_record.as_slice();
let enr = Enr::<SecretKey>::decode(&mut valid_record_buf).unwrap();
let pubkey = enr.public_key().encode();
// Byte array must be consumed after enr has finished decoding
assert!(valid_record_buf.is_empty());
assert_eq!(enr.ip4(), Some(Ipv4Addr::new(127, 0, 0, 1)));
assert_eq!(enr.id(), Some(String::from("v4")));
assert_eq!(enr.udp4(), Some(DEFAULT_DISCOVERY_PORT));
assert_eq!(enr.tcp4(), None);
assert_eq!(enr.signature(), &signature[..]);
assert_eq!(pubkey.to_vec(), expected_pubkey);
assert!(enr.verify());
}
// test for failing message decode
#[test]
fn decode_failing_packet() {
let packet = hex!(
"2467ab56952aedf4cfb8bb7830ddc8922d0f992185229919dad9de3841fe95d9b3a7b52459398235f6d3805644666d908b45edb3670414ed97f357afba51f71f7d35c1f45878ba732c3868b04ca42ff0ed347c99efcf3a5768afed68eb21ef960001db04c3808080c9840a480e8f82765f808466a9a06386019106833efe"
);
let _message = Message::decode(&packet[..]).unwrap();
}
// test for failing message decode
#[test]
fn decode_node() {
let packet = hex!("cb840000000082115c82115d");
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | true |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/ecies/src/stream.rs | crates/net/ecies/src/stream.rs | //! The ECIES Stream implementation which wraps over [`AsyncRead`] and [`AsyncWrite`].
use crate::{
codec::ECIESCodec, error::ECIESErrorImpl, ECIESError, EgressECIESValue, IngressECIESValue,
};
use alloy_primitives::{
bytes::{Bytes, BytesMut},
B512 as PeerId,
};
use futures::{ready, Sink, SinkExt};
use secp256k1::SecretKey;
use std::{
fmt::Debug,
io,
pin::Pin,
task::{Context, Poll},
time::Duration,
};
use tokio::{
io::{AsyncRead, AsyncWrite},
time::timeout,
};
use tokio_stream::{Stream, StreamExt};
use tokio_util::codec::{Decoder, Framed};
use tracing::{instrument, trace};
const HANDSHAKE_TIMEOUT: Duration = Duration::from_secs(10);
/// `ECIES` stream over TCP exchanging raw bytes
#[derive(Debug)]
#[pin_project::pin_project]
pub struct ECIESStream<Io> {
#[pin]
stream: Framed<Io, ECIESCodec>,
remote_id: PeerId,
}
impl<Io> ECIESStream<Io>
where
Io: AsyncRead + AsyncWrite + Unpin,
{
/// Connect to an `ECIES` server
#[instrument(skip(transport, secret_key))]
pub async fn connect(
transport: Io,
secret_key: SecretKey,
remote_id: PeerId,
) -> Result<Self, ECIESError> {
Self::connect_with_timeout(transport, secret_key, remote_id, HANDSHAKE_TIMEOUT).await
}
/// Wrapper around `connect_no_timeout` which enforces a timeout.
pub async fn connect_with_timeout(
transport: Io,
secret_key: SecretKey,
remote_id: PeerId,
timeout_limit: Duration,
) -> Result<Self, ECIESError> {
timeout(timeout_limit, Self::connect_without_timeout(transport, secret_key, remote_id))
.await
.map_err(|_| ECIESError::from(ECIESErrorImpl::StreamTimeout))?
}
/// Connect to an `ECIES` server with no timeout.
pub async fn connect_without_timeout(
transport: Io,
secret_key: SecretKey,
remote_id: PeerId,
) -> Result<Self, ECIESError> {
let ecies = ECIESCodec::new_client(secret_key, remote_id)
.map_err(|_| io::Error::other("invalid handshake"))?;
let mut transport = ecies.framed(transport);
trace!("sending ecies auth ...");
transport.send(EgressECIESValue::Auth).await?;
trace!("waiting for ecies ack ...");
let msg = transport.try_next().await?;
// `Framed` returns `None` if the underlying stream is no longer readable, and the codec is
// unable to decode another message from the (partially filled) buffer. This usually happens
// if the remote drops the TcpStream.
let msg = msg.ok_or(ECIESErrorImpl::UnreadableStream)?;
trace!("parsing ecies ack ...");
if matches!(msg, IngressECIESValue::Ack) {
Ok(Self { stream: transport, remote_id })
} else {
Err(ECIESErrorImpl::InvalidHandshake {
expected: IngressECIESValue::Ack,
msg: Some(msg),
}
.into())
}
}
/// Listen on a just connected ECIES client
pub async fn incoming(transport: Io, secret_key: SecretKey) -> Result<Self, ECIESError> {
let ecies = ECIESCodec::new_server(secret_key)?;
trace!("incoming ecies stream");
let mut transport = ecies.framed(transport);
let msg = transport.try_next().await?;
trace!("receiving ecies auth");
let remote_id = match &msg {
Some(IngressECIESValue::AuthReceive(remote_id)) => *remote_id,
_ => {
return Err(ECIESErrorImpl::InvalidHandshake {
expected: IngressECIESValue::AuthReceive(Default::default()),
msg,
}
.into())
}
};
trace!("sending ecies ack");
transport.send(EgressECIESValue::Ack).await?;
Ok(Self { stream: transport, remote_id })
}
/// Get the remote id
pub const fn remote_id(&self) -> PeerId {
self.remote_id
}
}
impl<Io> Stream for ECIESStream<Io>
where
Io: AsyncRead + Unpin,
{
type Item = Result<BytesMut, io::Error>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
match ready!(self.project().stream.poll_next(cx)) {
Some(Ok(IngressECIESValue::Message(body))) => Poll::Ready(Some(Ok(body))),
Some(other) => Poll::Ready(Some(Err(io::Error::other(format!(
"ECIES stream protocol error: expected message, received {other:?}"
))))),
None => Poll::Ready(None),
}
}
}
impl<Io> Sink<Bytes> for ECIESStream<Io>
where
Io: AsyncWrite + Unpin,
{
type Error = io::Error;
fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.project().stream.poll_ready(cx)
}
fn start_send(self: Pin<&mut Self>, item: Bytes) -> Result<(), Self::Error> {
self.project().stream.start_send(EgressECIESValue::Message(item))?;
Ok(())
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.project().stream.poll_flush(cx)
}
fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.project().stream.poll_close(cx)
}
}
#[cfg(test)]
mod tests {
use super::*;
use reth_network_peers::pk2id;
use secp256k1::SECP256K1;
use tokio::net::{TcpListener, TcpStream};
#[tokio::test]
async fn can_write_and_read() {
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let addr = listener.local_addr().unwrap();
let server_key = SecretKey::new(&mut rand_08::thread_rng());
let handle = tokio::spawn(async move {
// roughly based off of the design of tokio::net::TcpListener
let (incoming, _) = listener.accept().await.unwrap();
let mut stream = ECIESStream::incoming(incoming, server_key).await.unwrap();
// use the stream to get the next message
let message = stream.next().await.unwrap().unwrap();
assert_eq!(message, Bytes::from("hello"));
});
// create the server pubkey
let server_id = pk2id(&server_key.public_key(SECP256K1));
let client_key = SecretKey::new(&mut rand_08::thread_rng());
let outgoing = TcpStream::connect(addr).await.unwrap();
let mut client_stream =
ECIESStream::connect(outgoing, client_key, server_id).await.unwrap();
client_stream.send(Bytes::from("hello")).await.unwrap();
// make sure the server receives the message and asserts before ending the test
handle.await.unwrap();
}
#[tokio::test]
async fn connection_should_timeout() {
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let addr = listener.local_addr().unwrap();
let server_key = SecretKey::new(&mut rand_08::thread_rng());
let _handle = tokio::spawn(async move {
// Delay accepting the connection for longer than the client's timeout period
tokio::time::sleep(Duration::from_secs(11)).await;
let (incoming, _) = listener.accept().await.unwrap();
let mut stream = ECIESStream::incoming(incoming, server_key).await.unwrap();
// use the stream to get the next message
let message = stream.next().await.unwrap().unwrap();
assert_eq!(message, Bytes::from("hello"));
});
// create the server pubkey
let server_id = pk2id(&server_key.public_key(SECP256K1));
let client_key = SecretKey::new(&mut rand_08::thread_rng());
let outgoing = TcpStream::connect(addr).await.unwrap();
// Attempt to connect, expecting a timeout due to the server's delayed response
let connect_result = ECIESStream::connect_with_timeout(
outgoing,
client_key,
server_id,
Duration::from_secs(1),
)
.await;
// Assert that a timeout error occurred
assert!(
matches!(connect_result, Err(e) if e.to_string() == ECIESErrorImpl::StreamTimeout.to_string())
);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/ecies/src/lib.rs | crates/net/ecies/src/lib.rs | //! `RLPx` ECIES framed transport protocol.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
pub mod algorithm;
pub mod mac;
pub mod stream;
pub mod util;
mod error;
pub use error::{ECIESError, ECIESErrorImpl};
pub mod codec;
use alloy_primitives::{
bytes::{Bytes, BytesMut},
B512 as PeerId,
};
/// Raw egress values for an ECIES protocol
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum EgressECIESValue {
/// The AUTH message being sent out
Auth,
/// The ACK message being sent out
Ack,
/// The message being sent out (wrapped bytes)
Message(Bytes),
}
/// Raw ingress values for an ECIES protocol
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum IngressECIESValue {
/// Receiving a message from a [`PeerId`]
AuthReceive(PeerId),
/// Receiving an ACK message
Ack,
/// Receiving a message
Message(BytesMut),
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/ecies/src/algorithm.rs | crates/net/ecies/src/algorithm.rs | #![allow(missing_docs)]
use crate::{
error::ECIESErrorImpl,
mac::{HeaderBytes, MAC},
util::{hmac_sha256, sha256},
ECIESError,
};
use aes::{cipher::StreamCipher, Aes128, Aes256};
use alloy_primitives::{
bytes::{BufMut, Bytes, BytesMut},
B128, B256, B512 as PeerId,
};
use alloy_rlp::{Encodable, Rlp, RlpEncodable, RlpMaxEncodedLen};
use byteorder::{BigEndian, ByteOrder, ReadBytesExt};
use ctr::Ctr64BE;
use digest::{crypto_common::KeyIvInit, Digest};
use rand_08::{thread_rng as rng, Rng};
use reth_network_peers::{id2pk, pk2id};
use secp256k1::{
ecdsa::{RecoverableSignature, RecoveryId},
PublicKey, SecretKey, SECP256K1,
};
use sha2::Sha256;
use sha3::Keccak256;
const PROTOCOL_VERSION: usize = 4;
/// Computes the shared secret with ECDH and strips the y coordinate after computing the shared
/// secret.
///
/// This uses the given remote public key and local (ephemeral) secret key to [compute a shared
/// secp256k1 point](secp256k1::ecdh::shared_secret_point) and slices off the y coordinate from the
/// returned pair, returning only the bytes of the x coordinate as a [`B256`].
fn ecdh_x(public_key: &PublicKey, secret_key: &SecretKey) -> B256 {
B256::from_slice(&secp256k1::ecdh::shared_secret_point(public_key, secret_key)[..32])
}
/// This is the NIST SP 800-56A Concatenation Key Derivation Function (KDF) using SHA-256.
///
/// Internally this uses [`concat_kdf::derive_key_into`] to derive a key into the given `dest`
/// slice.
///
/// # Panics
/// * If the `dest` is empty
/// * If the `dest` len is greater than or equal to the hash output len * the max counter value. In
/// this case, the hash output len is 32 bytes, and the max counter value is 2^32 - 1. So the dest
/// cannot have a len greater than 32 * 2^32 - 1.
fn kdf(secret: B256, s1: &[u8], dest: &mut [u8]) {
concat_kdf::derive_key_into::<Sha256>(secret.as_slice(), s1, dest).unwrap();
}
pub struct ECIES {
secret_key: SecretKey,
public_key: PublicKey,
remote_public_key: Option<PublicKey>,
pub(crate) remote_id: Option<PeerId>,
ephemeral_secret_key: SecretKey,
ephemeral_public_key: PublicKey,
ephemeral_shared_secret: Option<B256>,
remote_ephemeral_public_key: Option<PublicKey>,
nonce: B256,
remote_nonce: Option<B256>,
ingress_aes: Option<Ctr64BE<Aes256>>,
egress_aes: Option<Ctr64BE<Aes256>>,
ingress_mac: Option<MAC>,
egress_mac: Option<MAC>,
init_msg: Option<Bytes>,
remote_init_msg: Option<Bytes>,
body_size: Option<usize>,
}
impl core::fmt::Debug for ECIES {
#[inline]
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
f.debug_struct("ECIES")
.field("public_key", &self.public_key)
.field("remote_public_key", &self.remote_public_key)
.field("remote_id", &self.remote_id)
.field("ephemeral_public_key", &self.ephemeral_public_key)
.field("ephemeral_shared_secret", &self.ephemeral_shared_secret)
.field("remote_ephemeral_public_key", &self.remote_ephemeral_public_key)
.field("nonce", &self.nonce)
.field("remote_nonce", &self.remote_nonce)
.field("ingress_mac", &self.ingress_mac)
.field("egress_mac", &self.egress_mac)
.field("init_msg", &self.init_msg)
.field("remote_init_msg", &self.remote_init_msg)
.field("body_size", &self.body_size)
.finish()
}
}
fn split_at_mut<T>(arr: &mut [T], idx: usize) -> Result<(&mut [T], &mut [T]), ECIESError> {
if idx > arr.len() {
return Err(ECIESErrorImpl::OutOfBounds { idx, len: arr.len() }.into())
}
Ok(arr.split_at_mut(idx))
}
/// A parsed `RLPx` encrypted message
///
/// From the devp2p spec, this should help perform the following operations:
///
/// For Bob to decrypt the message `R || iv || c || d`, he derives the shared secret `S = Px` where
/// `(Px, Py) = kB * R` as well as the encryption and authentication keys `kE || kM = KDF(S, 32)`.
///
/// Bob verifies the authenticity of the message by checking whether `d == MAC(sha256(kM), iv ||
/// c)` then obtains the plaintext as `m = AES(kE, iv || c)`.
#[derive(Debug)]
pub struct EncryptedMessage<'a> {
/// The auth data, used when checking the `tag` with HMAC-SHA256.
///
/// This is not mentioned in the `RLPx` spec, but included in implementations.
///
/// See source comments of [`Self::check_integrity`] for more information.
auth_data: [u8; 2],
/// The remote secp256k1 public key
public_key: PublicKey,
/// The IV, for use in AES during decryption, in the tag check
iv: B128,
/// The encrypted data
encrypted_data: &'a mut [u8],
/// The message tag
tag: B256,
}
impl<'a> EncryptedMessage<'a> {
/// Parse the given `data` into an [`EncryptedMessage`].
///
/// If the data is not long enough to contain the expected fields, this returns an error.
pub fn parse(data: &mut [u8]) -> Result<EncryptedMessage<'_>, ECIESError> {
// Auth data is 2 bytes, public key is 65 bytes
if data.len() < 65 + 2 {
return Err(ECIESErrorImpl::EncryptedDataTooSmall.into())
}
let (auth_data, encrypted) = data.split_at_mut(2);
// convert the auth data to a fixed size array
//
// NOTE: this will not panic because we've already checked that the data is long enough
let auth_data = auth_data.try_into().unwrap();
let (pubkey_bytes, encrypted) = encrypted.split_at_mut(65);
let public_key = PublicKey::from_slice(pubkey_bytes)?;
// return an error if the encrypted len is currently less than 32
let tag_index =
encrypted.len().checked_sub(32).ok_or(ECIESErrorImpl::EncryptedDataTooSmall)?;
// NOTE: we've already checked that the encrypted data is long enough to contain the
// encrypted data and tag
let (data_iv, tag_bytes) = encrypted.split_at_mut(tag_index);
// NOTE: this will not panic because we are splitting at length minus 32 bytes, which
// causes tag_bytes to be 32 bytes long
let tag = B256::from_slice(tag_bytes);
// now we can check if the encrypted data is long enough to contain the IV
if data_iv.len() < 16 {
return Err(ECIESErrorImpl::EncryptedDataTooSmall.into())
}
let (iv, encrypted_data) = data_iv.split_at_mut(16);
// NOTE: this will not panic because we are splitting at 16 bytes
let iv = B128::from_slice(iv);
Ok(EncryptedMessage { auth_data, public_key, iv, encrypted_data, tag })
}
/// Use the given secret and this encrypted message to derive the shared secret, and use the
/// shared secret to derive the mac and encryption keys.
pub fn derive_keys(&self, secret_key: &SecretKey) -> RLPxSymmetricKeys {
// perform ECDH to get the shared secret, using the remote public key from the message and
// the given secret key
let x = ecdh_x(&self.public_key, secret_key);
let mut key = [0u8; 32];
// The RLPx spec describes the key derivation process as:
//
// kE || kM = KDF(S, 32)
//
// where kE is the encryption key, and kM is used to determine the MAC key (see below)
//
// NOTE: The RLPx spec does not define an `OtherInfo` parameter, and this is unused in
// other implementations, so we use an empty slice.
kdf(x, &[], &mut key);
let enc_key = B128::from_slice(&key[..16]);
// The MAC tag check operation described is:
//
// d == MAC(sha256(kM), iv || c)
//
// where kM is the result of the above KDF, iv is the IV, and c is the encrypted data.
// Because the hash of kM is ultimately used as the mac key, we perform that hashing here.
let mac_key = sha256(&key[16..32]);
RLPxSymmetricKeys { enc_key, mac_key }
}
/// Use the given ECIES keys to check the message integrity using the contained tag.
pub fn check_integrity(&self, keys: &RLPxSymmetricKeys) -> Result<(), ECIESError> {
// The MAC tag check operation described is:
//
// d == MAC(sha256(kM), iv || c)
//
// NOTE: The RLPx spec does not show here that the `auth_data` is required for checking the
// tag.
//
// Geth refers to SEC 1's definition of ECIES:
//
// Encrypt encrypts a message using ECIES as specified in SEC 1, section 5.1.
//
// s1 and s2 contain shared information that is not part of the resulting
// ciphertext. s1 is fed into key derivation, s2 is fed into the MAC. If the
// shared information parameters aren't being used, they should be nil.
//
// ```
// prefix := make([]byte, 2)
// binary.BigEndian.PutUint16(prefix, uint16(len(h.wbuf.data)+eciesOverhead))
//
// enc, err := ecies.Encrypt(rand.Reader, h.remote, h.wbuf.data, nil, prefix)
// ```
let check_tag = hmac_sha256(
keys.mac_key.as_ref(),
&[self.iv.as_slice(), self.encrypted_data],
&self.auth_data,
);
if check_tag != self.tag {
return Err(ECIESErrorImpl::TagCheckDecryptFailed.into())
}
Ok(())
}
/// Use the given ECIES keys to decrypt the contained encrypted data, consuming the message and
/// returning the decrypted data.
pub fn decrypt(self, keys: &RLPxSymmetricKeys) -> &'a mut [u8] {
let Self { iv, encrypted_data, .. } = self;
// rename for clarity once it's decrypted
let decrypted_data = encrypted_data;
let mut decryptor = Ctr64BE::<Aes128>::new((&keys.enc_key.0).into(), (&*iv).into());
decryptor.apply_keystream(decrypted_data);
decrypted_data
}
/// Use the given ECIES keys to check the integrity of the message, returning an error if the
/// tag check fails, and then decrypt the message, returning the decrypted data.
pub fn check_and_decrypt(self, keys: RLPxSymmetricKeys) -> Result<&'a mut [u8], ECIESError> {
self.check_integrity(&keys)?;
Ok(self.decrypt(&keys))
}
}
/// The symmetric keys derived from an ECIES message.
#[derive(Debug)]
pub struct RLPxSymmetricKeys {
/// The key used for decryption, specifically with AES-128 in CTR mode, using a 64-bit big
/// endian counter.
pub enc_key: B128,
/// The key used for verifying message integrity, specifically with the NIST SP 800-56A Concat
/// KDF.
pub mac_key: B256,
}
impl ECIES {
/// Create a new client with the given static secret key, remote peer id, nonce, and ephemeral
/// secret key.
fn new_static_client(
secret_key: SecretKey,
remote_id: PeerId,
nonce: B256,
ephemeral_secret_key: SecretKey,
) -> Result<Self, ECIESError> {
let public_key = PublicKey::from_secret_key(SECP256K1, &secret_key);
let remote_public_key = id2pk(remote_id)?;
let ephemeral_public_key = PublicKey::from_secret_key(SECP256K1, &ephemeral_secret_key);
Ok(Self {
secret_key,
public_key,
ephemeral_secret_key,
ephemeral_public_key,
nonce,
remote_public_key: Some(remote_public_key),
remote_ephemeral_public_key: None,
remote_nonce: None,
ephemeral_shared_secret: None,
init_msg: None,
remote_init_msg: None,
remote_id: Some(remote_id),
body_size: None,
egress_aes: None,
ingress_aes: None,
egress_mac: None,
ingress_mac: None,
})
}
/// Create a new ECIES client with the given static secret key and remote peer ID.
pub fn new_client(secret_key: SecretKey, remote_id: PeerId) -> Result<Self, ECIESError> {
// TODO(rand): use rng for nonce
let mut rng = rng();
let nonce = B256::random();
let ephemeral_secret_key = SecretKey::new(&mut rng);
Self::new_static_client(secret_key, remote_id, nonce, ephemeral_secret_key)
}
/// Create a new server with the given static secret key, remote peer id, and ephemeral secret
/// key.
pub fn new_static_server(
secret_key: SecretKey,
nonce: B256,
ephemeral_secret_key: SecretKey,
) -> Result<Self, ECIESError> {
let public_key = PublicKey::from_secret_key(SECP256K1, &secret_key);
let ephemeral_public_key = PublicKey::from_secret_key(SECP256K1, &ephemeral_secret_key);
Ok(Self {
secret_key,
public_key,
ephemeral_secret_key,
ephemeral_public_key,
nonce,
remote_public_key: None,
remote_ephemeral_public_key: None,
remote_nonce: None,
ephemeral_shared_secret: None,
init_msg: None,
remote_init_msg: None,
remote_id: None,
body_size: None,
egress_aes: None,
ingress_aes: None,
egress_mac: None,
ingress_mac: None,
})
}
/// Create a new ECIES server with the given static secret key.
pub fn new_server(secret_key: SecretKey) -> Result<Self, ECIESError> {
let mut rng = rng();
let nonce = B256::random();
let ephemeral_secret_key = SecretKey::new(&mut rng);
Self::new_static_server(secret_key, nonce, ephemeral_secret_key)
}
/// Return the contained remote peer ID.
pub const fn remote_id(&self) -> PeerId {
self.remote_id.unwrap()
}
fn encrypt_message(&self, data: &[u8], out: &mut BytesMut) {
let mut rng = rng();
out.reserve(secp256k1::constants::UNCOMPRESSED_PUBLIC_KEY_SIZE + 16 + data.len() + 32);
let secret_key = SecretKey::new(&mut rng);
out.extend_from_slice(
&PublicKey::from_secret_key(SECP256K1, &secret_key).serialize_uncompressed(),
);
let x = ecdh_x(&self.remote_public_key.unwrap(), &secret_key);
let mut key = [0u8; 32];
kdf(x, &[], &mut key);
let enc_key = B128::from_slice(&key[..16]);
let mac_key = sha256(&key[16..32]);
let iv = B128::random();
let mut encryptor = Ctr64BE::<Aes128>::new((&enc_key.0).into(), (&iv.0).into());
let mut encrypted = data.to_vec();
encryptor.apply_keystream(&mut encrypted);
let total_size: u16 = u16::try_from(65 + 16 + data.len() + 32).unwrap();
let tag =
hmac_sha256(mac_key.as_ref(), &[iv.as_slice(), &encrypted], &total_size.to_be_bytes());
out.extend_from_slice(iv.as_slice());
out.extend_from_slice(&encrypted);
out.extend_from_slice(tag.as_ref());
}
fn decrypt_message<'a>(&self, data: &'a mut [u8]) -> Result<&'a mut [u8], ECIESError> {
// parse the encrypted message from bytes
let encrypted_message = EncryptedMessage::parse(data)?;
// derive keys from the secret key and the encrypted message
let keys = encrypted_message.derive_keys(&self.secret_key);
// check message integrity and decrypt the message
encrypted_message.check_and_decrypt(keys)
}
fn create_auth_unencrypted(&self) -> BytesMut {
let x = ecdh_x(&self.remote_public_key.unwrap(), &self.secret_key);
let msg = x ^ self.nonce;
let (rec_id, sig) = SECP256K1
.sign_ecdsa_recoverable(
&secp256k1::Message::from_digest(msg.0),
&self.ephemeral_secret_key,
)
.serialize_compact();
let mut sig_bytes = [0u8; 65];
sig_bytes[..64].copy_from_slice(&sig);
sig_bytes[64] = i32::from(rec_id) as u8;
let id = pk2id(&self.public_key);
#[derive(RlpEncodable)]
struct S<'a> {
sig_bytes: &'a [u8; 65],
id: &'a PeerId,
nonce: &'a B256,
protocol_version: u8,
}
let mut out = BytesMut::new();
S {
sig_bytes: &sig_bytes,
id: &id,
nonce: &self.nonce,
protocol_version: PROTOCOL_VERSION as u8,
}
.encode(&mut out);
out.resize(out.len() + rng().gen_range(100..=300), 0);
out
}
#[cfg(test)]
fn create_auth(&mut self) -> BytesMut {
let mut buf = BytesMut::new();
self.write_auth(&mut buf);
buf
}
/// Write an auth message to the given buffer.
pub fn write_auth(&mut self, buf: &mut BytesMut) {
let unencrypted = self.create_auth_unencrypted();
let mut out = buf.split_off(buf.len());
out.put_u16(0);
let mut encrypted = out.split_off(out.len());
self.encrypt_message(&unencrypted, &mut encrypted);
let len_bytes = u16::try_from(encrypted.len()).unwrap().to_be_bytes();
out[..len_bytes.len()].copy_from_slice(&len_bytes);
out.unsplit(encrypted);
self.init_msg = Some(Bytes::copy_from_slice(&out));
buf.unsplit(out);
}
fn parse_auth_unencrypted(&mut self, data: &[u8]) -> Result<(), ECIESError> {
let mut data = Rlp::new(data)?;
let sigdata = data.get_next::<[u8; 65]>()?.ok_or(ECIESErrorImpl::InvalidAuthData)?;
let signature = RecoverableSignature::from_compact(
&sigdata[..64],
RecoveryId::try_from(sigdata[64] as i32)?,
)?;
let remote_id = data.get_next()?.ok_or(ECIESErrorImpl::InvalidAuthData)?;
self.remote_id = Some(remote_id);
self.remote_public_key = Some(id2pk(remote_id)?);
self.remote_nonce = Some(data.get_next()?.ok_or(ECIESErrorImpl::InvalidAuthData)?);
let x = ecdh_x(&self.remote_public_key.unwrap(), &self.secret_key);
self.remote_ephemeral_public_key = Some(SECP256K1.recover_ecdsa(
&secp256k1::Message::from_digest((x ^ self.remote_nonce.unwrap()).0),
&signature,
)?);
self.ephemeral_shared_secret =
Some(ecdh_x(&self.remote_ephemeral_public_key.unwrap(), &self.ephemeral_secret_key));
Ok(())
}
/// Read and verify an auth message from the input data.
#[tracing::instrument(skip_all)]
pub fn read_auth(&mut self, data: &mut [u8]) -> Result<(), ECIESError> {
self.remote_init_msg = Some(Bytes::copy_from_slice(data));
let unencrypted = self.decrypt_message(data)?;
self.parse_auth_unencrypted(unencrypted)
}
/// Create an `ack` message using the internal nonce, local ephemeral public key, and `RLPx`
/// ECIES protocol version.
fn create_ack_unencrypted(&self) -> impl AsRef<[u8]> {
#[derive(RlpEncodable, RlpMaxEncodedLen)]
struct S {
id: PeerId,
nonce: B256,
protocol_version: u8,
}
alloy_rlp::encode_fixed_size(&S {
id: pk2id(&self.ephemeral_public_key),
nonce: self.nonce,
protocol_version: PROTOCOL_VERSION as u8,
})
}
#[cfg(test)]
pub fn create_ack(&mut self) -> BytesMut {
let mut buf = BytesMut::new();
self.write_ack(&mut buf);
buf
}
/// Write an `ack` message to the given buffer.
pub fn write_ack(&mut self, out: &mut BytesMut) {
let mut buf = out.split_off(out.len());
// reserve space for length
buf.put_u16(0);
// encrypt and append
let mut encrypted = buf.split_off(buf.len());
self.encrypt_message(self.create_ack_unencrypted().as_ref(), &mut encrypted);
let len_bytes = u16::try_from(encrypted.len()).unwrap().to_be_bytes();
buf.unsplit(encrypted);
// write length
buf[..len_bytes.len()].copy_from_slice(&len_bytes[..]);
self.init_msg = Some(buf.clone().freeze());
out.unsplit(buf);
self.setup_frame(true);
}
/// Parse the incoming `ack` message from the given `data` bytes, which are assumed to be
/// unencrypted. This parses the remote ephemeral pubkey and nonce from the message, and uses
/// ECDH to compute the shared secret. The shared secret is the x coordinate of the point
/// returned by ECDH.
///
/// This sets the `remote_ephemeral_public_key` and `remote_nonce`, and
/// `ephemeral_shared_secret` fields in the ECIES state.
fn parse_ack_unencrypted(&mut self, data: &[u8]) -> Result<(), ECIESError> {
let mut data = Rlp::new(data)?;
self.remote_ephemeral_public_key =
Some(id2pk(data.get_next()?.ok_or(ECIESErrorImpl::InvalidAckData)?)?);
self.remote_nonce = Some(data.get_next()?.ok_or(ECIESErrorImpl::InvalidAckData)?);
self.ephemeral_shared_secret =
Some(ecdh_x(&self.remote_ephemeral_public_key.unwrap(), &self.ephemeral_secret_key));
Ok(())
}
/// Read and verify an ack message from the input data.
#[tracing::instrument(skip_all)]
pub fn read_ack(&mut self, data: &mut [u8]) -> Result<(), ECIESError> {
self.remote_init_msg = Some(Bytes::copy_from_slice(data));
let unencrypted = self.decrypt_message(data)?;
self.parse_ack_unencrypted(unencrypted)?;
self.setup_frame(false);
Ok(())
}
fn setup_frame(&mut self, incoming: bool) {
let mut hasher = Keccak256::new();
for el in &if incoming {
[self.nonce, self.remote_nonce.unwrap()]
} else {
[self.remote_nonce.unwrap(), self.nonce]
} {
hasher.update(el);
}
let h_nonce = B256::from(hasher.finalize().as_ref());
let iv = B128::default();
let shared_secret: B256 = {
let mut hasher = Keccak256::new();
hasher.update(self.ephemeral_shared_secret.unwrap().0.as_ref());
hasher.update(h_nonce.0.as_ref());
B256::from(hasher.finalize().as_ref())
};
let aes_secret: B256 = {
let mut hasher = Keccak256::new();
hasher.update(self.ephemeral_shared_secret.unwrap().0.as_ref());
hasher.update(shared_secret.0.as_ref());
B256::from(hasher.finalize().as_ref())
};
self.ingress_aes = Some(Ctr64BE::<Aes256>::new((&aes_secret.0).into(), (&iv.0).into()));
self.egress_aes = Some(Ctr64BE::<Aes256>::new((&aes_secret.0).into(), (&iv.0).into()));
let mac_secret: B256 = {
let mut hasher = Keccak256::new();
hasher.update(self.ephemeral_shared_secret.unwrap().0.as_ref());
hasher.update(aes_secret.0.as_ref());
B256::from(hasher.finalize().as_ref())
};
self.ingress_mac = Some(MAC::new(mac_secret));
self.ingress_mac.as_mut().unwrap().update((mac_secret ^ self.nonce).as_ref());
self.ingress_mac.as_mut().unwrap().update(self.remote_init_msg.as_ref().unwrap());
self.egress_mac = Some(MAC::new(mac_secret));
self.egress_mac
.as_mut()
.unwrap()
.update((mac_secret ^ self.remote_nonce.unwrap()).as_ref());
self.egress_mac.as_mut().unwrap().update(self.init_msg.as_ref().unwrap());
}
#[cfg(test)]
fn create_header(&mut self, size: usize) -> BytesMut {
let mut out = BytesMut::new();
self.write_header(&mut out, size);
out
}
pub fn write_header(&mut self, out: &mut BytesMut, size: usize) {
let mut buf = [0u8; 8];
BigEndian::write_uint(&mut buf, size as u64, 3);
let mut header = [0u8; 16];
header[..3].copy_from_slice(&buf[..3]);
header[3..6].copy_from_slice(&[194, 128, 128]);
let mut header = HeaderBytes::from(header);
self.egress_aes.as_mut().unwrap().apply_keystream(&mut header);
self.egress_mac.as_mut().unwrap().update_header(&header);
let tag = self.egress_mac.as_mut().unwrap().digest();
out.reserve(Self::header_len());
out.extend_from_slice(&header[..]);
out.extend_from_slice(tag.as_slice());
}
/// Reads the `RLPx` header from the slice, setting up the MAC and AES, returning the body
/// size contained in the header.
pub fn read_header(&mut self, data: &mut [u8]) -> Result<usize, ECIESError> {
// If the data is not large enough to fit the header and mac bytes, return an error
//
// The header is 16 bytes, and the mac is 16 bytes, so the data must be at least 32 bytes
if data.len() < 32 {
return Err(ECIESErrorImpl::InvalidHeader.into())
}
let (header_bytes, mac_bytes) = split_at_mut(data, 16)?;
let header = HeaderBytes::from_mut_slice(header_bytes);
let mac = B128::from_slice(&mac_bytes[..16]);
self.ingress_mac.as_mut().unwrap().update_header(header);
let check_mac = self.ingress_mac.as_mut().unwrap().digest();
if check_mac != mac {
return Err(ECIESErrorImpl::TagCheckHeaderFailed.into())
}
self.ingress_aes.as_mut().unwrap().apply_keystream(header);
if header.as_slice().len() < 3 {
return Err(ECIESErrorImpl::InvalidHeader.into())
}
let body_size = usize::try_from(header.as_slice().read_uint::<BigEndian>(3)?)?;
self.body_size = Some(body_size);
Ok(body_size)
}
pub const fn header_len() -> usize {
32
}
pub const fn body_len(&self) -> usize {
let len = self.body_size.unwrap();
Self::align_16(len) + 16
}
#[cfg(test)]
fn create_body(&mut self, data: &[u8]) -> BytesMut {
let mut out = BytesMut::new();
self.write_body(&mut out, data);
out
}
pub fn write_body(&mut self, out: &mut BytesMut, data: &[u8]) {
let len = Self::align_16(data.len());
let old_len = out.len();
out.resize(old_len + len, 0);
let encrypted = &mut out[old_len..old_len + len];
encrypted[..data.len()].copy_from_slice(data);
self.egress_aes.as_mut().unwrap().apply_keystream(encrypted);
self.egress_mac.as_mut().unwrap().update_body(encrypted);
let tag = self.egress_mac.as_mut().unwrap().digest();
out.extend_from_slice(tag.as_slice());
}
pub fn read_body<'a>(&mut self, data: &'a mut [u8]) -> Result<&'a mut [u8], ECIESError> {
// error if the data is too small to contain the tag
// TODO: create a custom type similar to EncryptedMessage for parsing, checking MACs, and
// decrypting the body
let mac_index = data.len().checked_sub(16).ok_or(ECIESErrorImpl::EncryptedDataTooSmall)?;
let (body, mac_bytes) = split_at_mut(data, mac_index)?;
let mac = B128::from_slice(mac_bytes);
self.ingress_mac.as_mut().unwrap().update_body(body);
let check_mac = self.ingress_mac.as_mut().unwrap().digest();
if check_mac != mac {
return Err(ECIESErrorImpl::TagCheckBodyFailed.into())
}
let size = self.body_size.unwrap();
self.body_size = None;
let ret = body;
self.ingress_aes.as_mut().unwrap().apply_keystream(ret);
Ok(split_at_mut(ret, size)?.0)
}
/// Returns `num` aligned to 16.
///
/// `<https://stackoverflow.com/questions/14561402/how-is-this-size-alignment-working>`
#[inline]
const fn align_16(num: usize) -> usize {
(num + (16 - 1)) & !(16 - 1)
}
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_primitives::{b256, hex};
#[test]
fn ecdh() {
let our_secret_key = SecretKey::from_slice(&hex!(
"202a36e24c3eb39513335ec99a7619bad0e7dc68d69401b016253c7d26dc92f8"
))
.unwrap();
let remote_public_key = id2pk(hex!("d860a01f9722d78051619d1e2351aba3f43f943f6f00718d1b9baa4101932a1f5011f16bb2b1bb35db20d6fe28fa0bf09636d26a87d31de9ec6203eeedb1f666").into()).unwrap();
assert_eq!(
ecdh_x(&remote_public_key, &our_secret_key),
hex!("821ce7e01ea11b111a52b2dafae8a3031a372d83bdf1a78109fa0783c2b9d5d3")
)
}
#[test]
fn communicate() {
let mut rng = rng();
let server_secret_key = SecretKey::new(&mut rng);
let server_public_key = PublicKey::from_secret_key(SECP256K1, &server_secret_key);
let client_secret_key = SecretKey::new(&mut rng);
let mut server_ecies = ECIES::new_server(server_secret_key).unwrap();
let mut client_ecies =
ECIES::new_client(client_secret_key, pk2id(&server_public_key)).unwrap();
// Handshake
let mut auth = client_ecies.create_auth();
server_ecies.read_auth(&mut auth).unwrap();
let mut ack = server_ecies.create_ack();
client_ecies.read_ack(&mut ack).unwrap();
let mut ack = client_ecies.create_ack();
server_ecies.read_ack(&mut ack).unwrap();
let server_to_client_data = [0u8, 1u8, 2u8, 3u8, 4u8];
let client_to_server_data = [5u8, 6u8, 7u8];
// Test server to client 1
let mut header = server_ecies.create_header(server_to_client_data.len());
assert_eq!(header.len(), ECIES::header_len());
client_ecies.read_header(&mut header).unwrap();
let mut body = server_ecies.create_body(&server_to_client_data);
assert_eq!(body.len(), client_ecies.body_len());
let ret = client_ecies.read_body(&mut body).unwrap();
assert_eq!(ret, server_to_client_data);
// Test client to server 1
server_ecies
.read_header(&mut client_ecies.create_header(client_to_server_data.len()))
.unwrap();
let mut b = client_ecies.create_body(&client_to_server_data);
let ret = server_ecies.read_body(&mut b).unwrap();
assert_eq!(ret, client_to_server_data);
// Test server to client 2
client_ecies
.read_header(&mut server_ecies.create_header(server_to_client_data.len()))
.unwrap();
let mut b = server_ecies.create_body(&server_to_client_data);
let ret = client_ecies.read_body(&mut b).unwrap();
assert_eq!(ret, server_to_client_data);
// Test server to client 3
client_ecies
.read_header(&mut server_ecies.create_header(server_to_client_data.len()))
.unwrap();
let mut b = server_ecies.create_body(&server_to_client_data);
let ret = client_ecies.read_body(&mut b).unwrap();
assert_eq!(ret, server_to_client_data);
// Test client to server 2
server_ecies
.read_header(&mut client_ecies.create_header(client_to_server_data.len()))
.unwrap();
let mut b = client_ecies.create_body(&client_to_server_data);
let ret = server_ecies.read_body(&mut b).unwrap();
assert_eq!(ret, client_to_server_data);
// Test client to server 3
server_ecies
.read_header(&mut client_ecies.create_header(client_to_server_data.len()))
.unwrap();
let mut b = client_ecies.create_body(&client_to_server_data);
let ret = server_ecies.read_body(&mut b).unwrap();
assert_eq!(ret, client_to_server_data);
}
fn eip8_test_server_key() -> SecretKey {
SecretKey::from_slice(&hex!(
"b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291"
))
.unwrap()
}
fn eip8_test_client() -> ECIES {
let client_static_key = SecretKey::from_slice(&hex!(
"49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee"
))
.unwrap();
let client_ephemeral_key = SecretKey::from_slice(&hex!(
"869d6ecf5211f1cc60418a13b9d870b22959d0c16f02bec714c960dd2298a32d"
))
.unwrap();
let client_nonce =
b256!("0x7e968bba13b6c50e2c4cd7f241cc0d64d1ac25c7f5952df231ac6a2bda8ee5d6");
let server_id = pk2id(&PublicKey::from_secret_key(SECP256K1, &eip8_test_server_key()));
ECIES::new_static_client(client_static_key, server_id, client_nonce, client_ephemeral_key)
.unwrap()
}
fn eip8_test_server() -> ECIES {
let server_ephemeral_key = SecretKey::from_slice(&hex!(
"e238eb8e04fee6511ab04c6dd3c89ce097b11f25d584863ac2b6d5b35b1847e4"
))
.unwrap();
let server_nonce =
b256!("0x559aead08264d5795d3909718cdd05abd49572e84fe55590eef31a88a08fdffd");
ECIES::new_static_server(eip8_test_server_key(), server_nonce, server_ephemeral_key)
.unwrap()
}
#[test]
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | true |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/ecies/src/codec.rs | crates/net/ecies/src/codec.rs | //! This contains the main codec for `RLPx` ECIES messages
use crate::{algorithm::ECIES, ECIESError, ECIESErrorImpl, EgressECIESValue, IngressECIESValue};
use alloy_primitives::{bytes::BytesMut, B512 as PeerId};
use secp256k1::SecretKey;
use std::{fmt::Debug, io};
use tokio_util::codec::{Decoder, Encoder};
use tracing::{instrument, trace};
/// The max size that the initial handshake packet can be. Currently 2KiB.
const MAX_INITIAL_HANDSHAKE_SIZE: usize = 2048;
/// Tokio codec for ECIES
#[derive(Debug)]
pub struct ECIESCodec {
ecies: ECIES,
state: ECIESState,
}
/// Current ECIES state of a connection
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum ECIESState {
/// The first stage of the ECIES handshake, where each side of the connection sends an auth
/// message containing the ephemeral public key, signature of the public key, nonce, and other
/// metadata.
Auth,
/// The second stage of the ECIES handshake, where each side of the connection sends an ack
/// message containing the nonce and other metadata.
Ack,
/// This is the same as the [`ECIESState::Header`] stage, but occurs only after the first
/// [`ECIESState::Ack`] message. This is so that the initial handshake message can be properly
/// validated.
InitialHeader,
/// The third stage of the ECIES handshake, where header is parsed, message integrity checks
/// performed, and message is decrypted.
Header,
/// The final stage, where the ECIES message is actually read and returned by the ECIES codec.
Body,
}
impl ECIESCodec {
/// Create a new server codec using the given secret key
pub(crate) fn new_server(secret_key: SecretKey) -> Result<Self, ECIESError> {
Ok(Self { ecies: ECIES::new_server(secret_key)?, state: ECIESState::Auth })
}
/// Create a new client codec using the given secret key and the server's public id
pub(crate) fn new_client(secret_key: SecretKey, remote_id: PeerId) -> Result<Self, ECIESError> {
Ok(Self { ecies: ECIES::new_client(secret_key, remote_id)?, state: ECIESState::Auth })
}
}
impl Decoder for ECIESCodec {
type Item = IngressECIESValue;
type Error = ECIESError;
#[instrument(level = "trace", skip_all, fields(peer=?self.ecies.remote_id, state=?self.state))]
fn decode(&mut self, buf: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
loop {
match self.state {
ECIESState::Auth => {
trace!("parsing auth");
if buf.len() < 2 {
return Ok(None)
}
let payload_size = u16::from_be_bytes([buf[0], buf[1]]) as usize;
let total_size = payload_size + 2;
if buf.len() < total_size {
trace!("current len {}, need {}", buf.len(), total_size);
return Ok(None)
}
self.ecies.read_auth(&mut buf.split_to(total_size))?;
self.state = ECIESState::InitialHeader;
return Ok(Some(IngressECIESValue::AuthReceive(self.ecies.remote_id())))
}
ECIESState::Ack => {
trace!("parsing ack with len {}", buf.len());
if buf.len() < 2 {
return Ok(None)
}
let payload_size = u16::from_be_bytes([buf[0], buf[1]]) as usize;
let total_size = payload_size + 2;
if buf.len() < total_size {
trace!("current len {}, need {}", buf.len(), total_size);
return Ok(None)
}
self.ecies.read_ack(&mut buf.split_to(total_size))?;
self.state = ECIESState::InitialHeader;
return Ok(Some(IngressECIESValue::Ack))
}
ECIESState::InitialHeader => {
if buf.len() < ECIES::header_len() {
trace!("current len {}, need {}", buf.len(), ECIES::header_len());
return Ok(None)
}
let body_size =
self.ecies.read_header(&mut buf.split_to(ECIES::header_len()))?;
if body_size > MAX_INITIAL_HANDSHAKE_SIZE {
trace!(?body_size, max=?MAX_INITIAL_HANDSHAKE_SIZE, "Header exceeds max initial handshake size");
return Err(ECIESErrorImpl::InitialHeaderBodyTooLarge {
body_size,
max_body_size: MAX_INITIAL_HANDSHAKE_SIZE,
}
.into())
}
self.state = ECIESState::Body;
}
ECIESState::Header => {
if buf.len() < ECIES::header_len() {
trace!("current len {}, need {}", buf.len(), ECIES::header_len());
return Ok(None)
}
self.ecies.read_header(&mut buf.split_to(ECIES::header_len()))?;
self.state = ECIESState::Body;
}
ECIESState::Body => {
if buf.len() < self.ecies.body_len() {
return Ok(None)
}
let mut data = buf.split_to(self.ecies.body_len());
let mut ret = BytesMut::new();
ret.extend_from_slice(self.ecies.read_body(&mut data)?);
self.state = ECIESState::Header;
return Ok(Some(IngressECIESValue::Message(ret)))
}
}
}
}
}
impl Encoder<EgressECIESValue> for ECIESCodec {
type Error = io::Error;
#[instrument(level = "trace", skip(self, buf), fields(peer=?self.ecies.remote_id, state=?self.state))]
fn encode(&mut self, item: EgressECIESValue, buf: &mut BytesMut) -> Result<(), Self::Error> {
match item {
EgressECIESValue::Auth => {
self.state = ECIESState::Ack;
self.ecies.write_auth(buf);
Ok(())
}
EgressECIESValue::Ack => {
self.state = ECIESState::InitialHeader;
self.ecies.write_ack(buf);
Ok(())
}
EgressECIESValue::Message(data) => {
self.ecies.write_header(buf, data.len());
self.ecies.write_body(buf, &data);
Ok(())
}
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/ecies/src/error.rs | crates/net/ecies/src/error.rs | use crate::IngressECIESValue;
use std::fmt;
use thiserror::Error;
/// An error that occurs while reading or writing to an ECIES stream.
#[derive(Debug, Error)]
pub struct ECIESError {
inner: Box<ECIESErrorImpl>,
}
impl ECIESError {
/// Consumes the type and returns the error enum
pub fn into_inner(self) -> ECIESErrorImpl {
*self.inner
}
/// Returns a reference to the inner error
pub const fn inner(&self) -> &ECIESErrorImpl {
&self.inner
}
}
impl fmt::Display for ECIESError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(&*self.inner, f)
}
}
/// An error that occurs while reading or writing to an ECIES stream.
#[derive(Debug, Error)]
pub enum ECIESErrorImpl {
/// Error during IO
#[error(transparent)]
IO(std::io::Error),
/// Error when checking the HMAC tag against the tag on the message being decrypted
#[error("tag check failure in read_header")]
TagCheckDecryptFailed,
/// Error when checking the HMAC tag against the tag on the header
#[error("tag check failure in read_header")]
TagCheckHeaderFailed,
/// Error when checking the HMAC tag against the tag on the body
#[error("tag check failure in read_body")]
TagCheckBodyFailed,
/// Error when parsing AUTH data
#[error("invalid auth data")]
InvalidAuthData,
/// Error when parsing ACK data
#[error("invalid ack data")]
InvalidAckData,
/// Error when reading the header if its length is <3
#[error("invalid body data")]
InvalidHeader,
/// Error when interacting with secp256k1
#[error(transparent)]
Secp256k1(secp256k1::Error),
/// Error when decoding RLP data
#[error(transparent)]
RLPDecoding(alloy_rlp::Error),
/// Error when converting to integer
#[error(transparent)]
FromInt(std::num::TryFromIntError),
/// The encrypted data is not large enough for all fields
#[error("encrypted data is not large enough for all fields")]
EncryptedDataTooSmall,
/// The initial header body is too large.
#[error("initial header body is {body_size} but the max is {max_body_size}")]
InitialHeaderBodyTooLarge {
/// The body size from the header
body_size: usize,
/// The max body size
max_body_size: usize,
},
/// Error when trying to split an array beyond its length
#[error("requested {idx} but array len is {len}")]
OutOfBounds {
/// The index you are trying to split at
idx: usize,
/// The length of the array
len: usize,
},
/// Error when handshaking with a peer (ack / auth)
#[error("invalid handshake: expected {expected:?}, got {msg:?} instead")]
InvalidHandshake {
/// The expected return value from the peer
expected: IngressECIESValue,
/// The actual value returned from the peer
msg: Option<IngressECIESValue>,
},
/// Error when the stream was closed by the peer for being unreadable.
///
/// This exact error case happens when the wrapped stream in
/// [`Framed`](tokio_util::codec::Framed) is closed by the peer, See
/// [`ConnectionReset`](std::io::ErrorKind::ConnectionReset) and the ecies codec fails to
/// decode a message from the (partially filled) buffer.
#[error("stream closed due to not being readable")]
UnreadableStream,
/// Error when data is not received from peer for a prolonged period.
#[error("never received data from remote peer")]
StreamTimeout,
}
impl From<ECIESErrorImpl> for ECIESError {
fn from(source: ECIESErrorImpl) -> Self {
Self { inner: Box::new(source) }
}
}
impl From<std::io::Error> for ECIESError {
fn from(source: std::io::Error) -> Self {
ECIESErrorImpl::IO(source).into()
}
}
impl From<secp256k1::Error> for ECIESError {
fn from(source: secp256k1::Error) -> Self {
ECIESErrorImpl::Secp256k1(source).into()
}
}
impl From<alloy_rlp::Error> for ECIESError {
fn from(source: alloy_rlp::Error) -> Self {
ECIESErrorImpl::RLPDecoding(source).into()
}
}
impl From<std::num::TryFromIntError> for ECIESError {
fn from(source: std::num::TryFromIntError) -> Self {
ECIESErrorImpl::FromInt(source).into()
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/ecies/src/util.rs | crates/net/ecies/src/util.rs | //! Utility functions for hashing and encoding.
use alloy_primitives::B256;
use hmac::{Hmac, Mac};
use sha2::{Digest, Sha256};
/// Hashes the input data with SHA256 - <https://en.wikipedia.org/wiki/SHA-2>
pub(crate) fn sha256(data: &[u8]) -> B256 {
B256::from(Sha256::digest(data).as_ref())
}
/// Produces a `HMAC_SHA256` digest of the `input_data` and `auth_data` with the given `key`.
/// This is done by accumulating each slice in `input_data` into the HMAC state, then accumulating
/// the `auth_data` and returning the resulting digest.
pub(crate) fn hmac_sha256(key: &[u8], input: &[&[u8]], auth_data: &[u8]) -> B256 {
let mut hmac = Hmac::<Sha256>::new_from_slice(key).unwrap();
for input in input {
hmac.update(input);
}
hmac.update(auth_data);
B256::from_slice(&hmac.finalize().into_bytes())
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/ecies/src/mac.rs | crates/net/ecies/src/mac.rs | //! # Ethereum MAC Module
//!
//! This module provides the implementation of the Ethereum MAC (Message Authentication Code)
//! construction, as specified in the Ethereum `RLPx` protocol.
//!
//! The Ethereum MAC is a nonstandard MAC construction that utilizes AES-256 (as a block cipher)
//! and Keccak-256. It is specifically designed for messages of 128 bits in length and is not
//! intended for general MAC use.
//!
//! For more information, refer to the [Ethereum MAC specification](https://github.com/ethereum/devp2p/blob/master/rlpx.md#mac).
use aes::Aes256Enc;
use alloy_primitives::{B128, B256};
use block_padding::NoPadding;
use cipher::BlockEncrypt;
use digest::KeyInit;
use generic_array::GenericArray;
use sha3::{Digest, Keccak256};
use typenum::U16;
/// Type alias for a fixed-size array of 16 bytes used as headers.
///
/// This type is defined as [`GenericArray<u8, U16>`] and is commonly employed in Ethereum `RLPx`
/// protocol-related structures for headers. It represents 16 bytes of data used in various
/// cryptographic operations, such as MAC (Message Authentication Code) computation.
pub type HeaderBytes = GenericArray<u8, U16>;
/// [`Ethereum MAC`](https://github.com/ethereum/devp2p/blob/master/rlpx.md#mac) state.
///
/// The ethereum MAC is a cursed MAC construction.
///
/// The ethereum MAC is a nonstandard MAC construction that uses AES-256 (without a mode, as a
/// block cipher) and Keccak-256. However, it only ever encrypts messages that are 128 bits long,
/// and is not defined as a general MAC.
#[derive(Debug)]
pub struct MAC {
secret: B256,
hasher: Keccak256,
}
impl MAC {
/// Initialize the MAC with the given secret
pub fn new(secret: B256) -> Self {
Self { secret, hasher: Keccak256::new() }
}
/// Update the internal keccak256 hasher with the given data
pub fn update(&mut self, data: &[u8]) {
self.hasher.update(data)
}
/// Accumulate the given [`HeaderBytes`] into the MAC's internal state.
pub fn update_header(&mut self, data: &HeaderBytes) {
let aes = Aes256Enc::new_from_slice(self.secret.as_ref()).unwrap();
let mut encrypted = self.digest().0;
aes.encrypt_padded::<NoPadding>(&mut encrypted, B128::len_bytes()).unwrap();
for i in 0..data.len() {
encrypted[i] ^= data[i];
}
self.hasher.update(encrypted);
}
/// Accumulate the given message body into the MAC's internal state.
pub fn update_body(&mut self, data: &[u8]) {
self.hasher.update(data);
let prev = self.digest();
let aes = Aes256Enc::new_from_slice(self.secret.as_ref()).unwrap();
let mut encrypted = self.digest().0;
aes.encrypt_padded::<NoPadding>(&mut encrypted, B128::len_bytes()).unwrap();
for i in 0..16 {
encrypted[i] ^= prev[i];
}
self.hasher.update(encrypted);
}
/// Produce a digest by finalizing the internal keccak256 hasher and returning the first 128
/// bits.
pub fn digest(&self) -> B128 {
B128::from_slice(&self.hasher.clone().finalize()[..16])
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network-api/src/lib.rs | crates/net/network-api/src/lib.rs | //! Reth interface definitions and commonly used types for the reth-network crate.
//!
//! Provides abstractions for the reth-network crate.
//!
//! ## Feature Flags
//!
//! - `serde` (default): Enable serde support
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
pub mod downloaders;
/// Network Error
pub mod error;
pub mod events;
/// Implementation of network traits for that does nothing.
pub mod noop;
pub mod test_utils;
use test_utils::PeersHandleProvider;
pub use alloy_rpc_types_admin::EthProtocolInfo;
pub use reth_network_p2p::{BlockClient, HeadersClient};
pub use reth_network_types::{PeerKind, Reputation, ReputationChangeKind};
pub use downloaders::BlockDownloaderProvider;
pub use error::NetworkError;
pub use events::{
DiscoveredEvent, DiscoveryEvent, NetworkEvent, NetworkEventListenerProvider, PeerRequest,
PeerRequestSender,
};
use reth_eth_wire_types::{
capability::Capabilities, Capability, DisconnectReason, EthVersion, NetworkPrimitives,
UnifiedStatus,
};
use reth_network_p2p::sync::NetworkSyncUpdater;
use reth_network_peers::NodeRecord;
use std::{future::Future, net::SocketAddr, sync::Arc, time::Instant};
/// The `PeerId` type.
pub type PeerId = alloy_primitives::B512;
/// Helper trait that unifies network API needed to launch node.
pub trait FullNetwork:
BlockDownloaderProvider<
Client: BlockClient<Block = <Self::Primitives as NetworkPrimitives>::Block>,
> + NetworkSyncUpdater
+ NetworkInfo
+ NetworkEventListenerProvider
+ Peers
+ PeersHandleProvider
+ Clone
+ Unpin
+ 'static
{
}
impl<T> FullNetwork for T where
T: BlockDownloaderProvider<
Client: BlockClient<Block = <Self::Primitives as NetworkPrimitives>::Block>,
> + NetworkSyncUpdater
+ NetworkInfo
+ NetworkEventListenerProvider
+ Peers
+ PeersHandleProvider
+ Clone
+ Unpin
+ 'static
{
}
/// Provides general purpose information about the network.
#[auto_impl::auto_impl(&, Arc)]
pub trait NetworkInfo: Send + Sync {
/// Returns the [`SocketAddr`] that listens for incoming connections.
fn local_addr(&self) -> SocketAddr;
/// Returns the current status of the network being ran by the local node.
fn network_status(&self) -> impl Future<Output = Result<NetworkStatus, NetworkError>> + Send;
/// Returns the chain id
fn chain_id(&self) -> u64;
/// Returns `true` if the network is undergoing sync.
fn is_syncing(&self) -> bool;
/// Returns `true` when the node is undergoing the very first Pipeline sync.
fn is_initially_syncing(&self) -> bool;
}
/// Provides general purpose information about Peers in the network.
#[auto_impl::auto_impl(&, Arc)]
pub trait PeersInfo: Send + Sync {
/// Returns how many peers the network is currently connected to.
///
/// Note: this should only include established connections and _not_ ongoing attempts.
fn num_connected_peers(&self) -> usize;
/// Returns the Ethereum Node Record of the node.
fn local_node_record(&self) -> NodeRecord;
/// Returns the local ENR of the node.
fn local_enr(&self) -> enr::Enr<enr::secp256k1::SecretKey>;
}
/// Provides an API for managing the peers of the network.
#[auto_impl::auto_impl(&, Arc)]
pub trait Peers: PeersInfo {
/// Adds a peer to the peer set with TCP `SocketAddr`.
fn add_peer(&self, peer: PeerId, tcp_addr: SocketAddr) {
self.add_peer_kind(peer, PeerKind::Static, tcp_addr, None);
}
/// Adds a peer to the peer set with TCP and UDP `SocketAddr`.
fn add_peer_with_udp(&self, peer: PeerId, tcp_addr: SocketAddr, udp_addr: SocketAddr) {
self.add_peer_kind(peer, PeerKind::Static, tcp_addr, Some(udp_addr));
}
/// Adds a trusted [`PeerId`] to the peer set.
///
/// This allows marking a peer as trusted without having to know the peer's address.
fn add_trusted_peer_id(&self, peer: PeerId);
/// Adds a trusted peer to the peer set with TCP `SocketAddr`.
fn add_trusted_peer(&self, peer: PeerId, tcp_addr: SocketAddr) {
self.add_peer_kind(peer, PeerKind::Trusted, tcp_addr, None);
}
/// Adds a trusted peer with TCP and UDP `SocketAddr` to the peer set.
fn add_trusted_peer_with_udp(&self, peer: PeerId, tcp_addr: SocketAddr, udp_addr: SocketAddr) {
self.add_peer_kind(peer, PeerKind::Trusted, tcp_addr, Some(udp_addr));
}
/// Adds a peer to the known peer set, with the given kind.
fn add_peer_kind(
&self,
peer: PeerId,
kind: PeerKind,
tcp_addr: SocketAddr,
udp_addr: Option<SocketAddr>,
);
/// Returns the rpc [`PeerInfo`] for all connected [`PeerKind::Trusted`] peers.
fn get_trusted_peers(
&self,
) -> impl Future<Output = Result<Vec<PeerInfo>, NetworkError>> + Send {
self.get_peers_by_kind(PeerKind::Trusted)
}
/// Returns the rpc [`PeerInfo`] for all connected [`PeerKind::Basic`] peers.
fn get_basic_peers(&self) -> impl Future<Output = Result<Vec<PeerInfo>, NetworkError>> + Send {
self.get_peers_by_kind(PeerKind::Basic)
}
/// Returns the rpc [`PeerInfo`] for all connected peers with the given kind.
fn get_peers_by_kind(
&self,
kind: PeerKind,
) -> impl Future<Output = Result<Vec<PeerInfo>, NetworkError>> + Send;
/// Returns the rpc [`PeerInfo`] for all connected peers.
fn get_all_peers(&self) -> impl Future<Output = Result<Vec<PeerInfo>, NetworkError>> + Send;
/// Returns the rpc [`PeerInfo`] for the given peer id.
///
/// Returns `None` if the peer is not connected.
fn get_peer_by_id(
&self,
peer_id: PeerId,
) -> impl Future<Output = Result<Option<PeerInfo>, NetworkError>> + Send;
/// Returns the rpc [`PeerInfo`] for the given peers if they are connected.
///
/// Note: This only returns peers that are connected, unconnected peers are ignored but keeping
/// the order in which they were requested.
fn get_peers_by_id(
&self,
peer_ids: Vec<PeerId>,
) -> impl Future<Output = Result<Vec<PeerInfo>, NetworkError>> + Send;
/// Removes a peer from the peer set that corresponds to given kind.
fn remove_peer(&self, peer: PeerId, kind: PeerKind);
/// Disconnect an existing connection to the given peer.
fn disconnect_peer(&self, peer: PeerId);
/// Disconnect an existing connection to the given peer using the provided reason
fn disconnect_peer_with_reason(&self, peer: PeerId, reason: DisconnectReason);
/// Connect to the given peer. NOTE: if the maximum number of outbound sessions is reached,
/// this won't do anything. See `reth_network::SessionManager::dial_outbound`.
fn connect_peer(&self, peer: PeerId, tcp_addr: SocketAddr) {
self.connect_peer_kind(peer, PeerKind::Static, tcp_addr, None)
}
/// Connects a peer to the known peer set, with the given kind.
fn connect_peer_kind(
&self,
peer: PeerId,
kind: PeerKind,
tcp_addr: SocketAddr,
udp_addr: Option<SocketAddr>,
);
/// Send a reputation change for the given peer.
fn reputation_change(&self, peer_id: PeerId, kind: ReputationChangeKind);
/// Get the reputation of a peer.
fn reputation_by_id(
&self,
peer_id: PeerId,
) -> impl Future<Output = Result<Option<Reputation>, NetworkError>> + Send;
}
/// Info about an active peer session.
#[derive(Debug, Clone)]
pub struct PeerInfo {
/// Announced capabilities of the peer
pub capabilities: Arc<Capabilities>,
/// The identifier of the remote peer
pub remote_id: PeerId,
/// The client's name and version
pub client_version: Arc<str>,
/// The peer's enode
pub enode: String,
/// The peer's enr
pub enr: Option<String>,
/// The peer's address we're connected to
pub remote_addr: SocketAddr,
/// The local address of the connection
pub local_addr: Option<SocketAddr>,
/// The direction of the session
pub direction: Direction,
/// The negotiated eth version.
pub eth_version: EthVersion,
/// The Status message the peer sent for the `eth` handshake
pub status: Arc<UnifiedStatus>,
/// The timestamp when the session to that peer has been established.
pub session_established: Instant,
/// The peer's connection kind
pub kind: PeerKind,
}
/// The direction of the connection.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum Direction {
/// Incoming connection.
Incoming,
/// Outgoing connection to a specific node.
Outgoing(PeerId),
}
impl Direction {
/// Returns `true` if this an incoming connection.
pub const fn is_incoming(&self) -> bool {
matches!(self, Self::Incoming)
}
/// Returns `true` if this an outgoing connection.
pub const fn is_outgoing(&self) -> bool {
matches!(self, Self::Outgoing(_))
}
}
impl std::fmt::Display for Direction {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Incoming => write!(f, "incoming"),
Self::Outgoing(_) => write!(f, "outgoing"),
}
}
}
/// The status of the network being ran by the local node.
#[derive(Clone, Debug)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct NetworkStatus {
/// The local node client version.
pub client_version: String,
/// The current ethereum protocol version
pub protocol_version: u64,
/// Information about the Ethereum Wire Protocol.
pub eth_protocol_info: EthProtocolInfo,
/// The list of supported capabilities and their versions.
pub capabilities: Vec<Capability>,
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network-api/src/noop.rs | crates/net/network-api/src/noop.rs | //! A network implementation that does nothing.
//!
//! This is useful for wiring components together that don't require network but still need to be
//! generic over it.
use core::{fmt, marker::PhantomData};
use std::net::{IpAddr, SocketAddr};
use crate::{
events::{NetworkPeersEvents, PeerEventStream},
test_utils::{PeersHandle, PeersHandleProvider},
BlockDownloaderProvider, DiscoveryEvent, NetworkError, NetworkEvent,
NetworkEventListenerProvider, NetworkInfo, NetworkStatus, PeerId, PeerInfo, PeerRequest, Peers,
PeersInfo,
};
use alloy_rpc_types_admin::EthProtocolInfo;
use enr::{secp256k1::SecretKey, Enr};
use reth_eth_wire_types::{
DisconnectReason, EthNetworkPrimitives, NetworkPrimitives, ProtocolVersion,
};
use reth_network_p2p::{sync::NetworkSyncUpdater, NoopFullBlockClient};
use reth_network_peers::NodeRecord;
use reth_network_types::{PeerKind, Reputation, ReputationChangeKind};
use reth_tokio_util::{EventSender, EventStream};
use tokio::sync::{mpsc, oneshot};
use tokio_stream::wrappers::UnboundedReceiverStream;
/// A type that implements all network trait that does nothing.
///
/// Intended for testing purposes where network is not used.
#[derive(Debug, Clone)]
#[non_exhaustive]
pub struct NoopNetwork<Net = EthNetworkPrimitives> {
peers_handle: PeersHandle,
_marker: PhantomData<Net>,
}
impl<Net> NoopNetwork<Net> {
/// Creates a new [`NoopNetwork`].
pub fn new() -> Self {
let (tx, _) = mpsc::unbounded_channel();
Self { peers_handle: PeersHandle::new(tx), _marker: PhantomData }
}
}
impl Default for NoopNetwork<EthNetworkPrimitives> {
fn default() -> Self {
let (tx, _) = mpsc::unbounded_channel();
Self { peers_handle: PeersHandle::new(tx), _marker: PhantomData }
}
}
impl<Net> NetworkInfo for NoopNetwork<Net>
where
Net: Send + Sync,
{
fn local_addr(&self) -> SocketAddr {
(IpAddr::from(std::net::Ipv4Addr::UNSPECIFIED), 30303).into()
}
async fn network_status(&self) -> Result<NetworkStatus, NetworkError> {
#[expect(deprecated)]
Ok(NetworkStatus {
client_version: "reth-test".to_string(),
protocol_version: ProtocolVersion::V5 as u64,
eth_protocol_info: EthProtocolInfo {
network: 1,
difficulty: None,
genesis: Default::default(),
config: Default::default(),
head: Default::default(),
},
capabilities: vec![],
})
}
fn chain_id(&self) -> u64 {
// mainnet
1
}
fn is_syncing(&self) -> bool {
false
}
fn is_initially_syncing(&self) -> bool {
false
}
}
impl<Net> PeersInfo for NoopNetwork<Net>
where
Net: Send + Sync,
{
fn num_connected_peers(&self) -> usize {
0
}
fn local_node_record(&self) -> NodeRecord {
NodeRecord::new(self.local_addr(), PeerId::random())
}
fn local_enr(&self) -> Enr<SecretKey> {
let sk = SecretKey::from_slice(&[0xcd; 32]).unwrap();
Enr::builder().build(&sk).unwrap()
}
}
impl<Net> Peers for NoopNetwork<Net>
where
Net: Send + Sync,
{
fn add_trusted_peer_id(&self, _peer: PeerId) {}
fn add_peer_kind(
&self,
_peer: PeerId,
_kind: PeerKind,
_tcp_addr: SocketAddr,
_udp_addr: Option<SocketAddr>,
) {
}
async fn get_peers_by_kind(&self, _kind: PeerKind) -> Result<Vec<PeerInfo>, NetworkError> {
Ok(vec![])
}
async fn get_all_peers(&self) -> Result<Vec<PeerInfo>, NetworkError> {
Ok(vec![])
}
async fn get_peer_by_id(&self, _peer_id: PeerId) -> Result<Option<PeerInfo>, NetworkError> {
Ok(None)
}
async fn get_peers_by_id(&self, _peer_id: Vec<PeerId>) -> Result<Vec<PeerInfo>, NetworkError> {
Ok(vec![])
}
fn remove_peer(&self, _peer: PeerId, _kind: PeerKind) {}
fn disconnect_peer(&self, _peer: PeerId) {}
fn disconnect_peer_with_reason(&self, _peer: PeerId, _reason: DisconnectReason) {}
fn connect_peer_kind(
&self,
_peer: PeerId,
_kind: PeerKind,
_tcp_addr: SocketAddr,
_udp_addr: Option<SocketAddr>,
) {
}
fn reputation_change(&self, _peer_id: PeerId, _kind: ReputationChangeKind) {}
async fn reputation_by_id(&self, _peer_id: PeerId) -> Result<Option<Reputation>, NetworkError> {
Ok(None)
}
}
impl<Net> BlockDownloaderProvider for NoopNetwork<Net>
where
Net: NetworkPrimitives,
{
type Client = NoopFullBlockClient<Net>;
async fn fetch_client(&self) -> Result<Self::Client, oneshot::error::RecvError> {
Ok(NoopFullBlockClient::<Net>::default())
}
}
impl<Net> NetworkSyncUpdater for NoopNetwork<Net>
where
Net: fmt::Debug + Send + Sync + 'static,
{
fn update_status(&self, _head: reth_ethereum_forks::Head) {}
fn update_sync_state(&self, _state: reth_network_p2p::sync::SyncState) {}
fn update_block_range(&self, _: reth_eth_wire_types::BlockRangeUpdate) {}
}
impl<Net> NetworkEventListenerProvider for NoopNetwork<Net>
where
Net: NetworkPrimitives,
{
type Primitives = Net;
fn event_listener(&self) -> EventStream<NetworkEvent<PeerRequest<Self::Primitives>>> {
let event_sender: EventSender<NetworkEvent<PeerRequest<Net>>> = Default::default();
event_sender.new_listener()
}
fn discovery_listener(&self) -> UnboundedReceiverStream<DiscoveryEvent> {
let (_, rx) = mpsc::unbounded_channel();
UnboundedReceiverStream::new(rx)
}
}
impl<Net> NetworkPeersEvents for NoopNetwork<Net>
where
Net: NetworkPrimitives,
{
fn peer_events(&self) -> PeerEventStream {
let event_sender: EventSender<NetworkEvent<PeerRequest<Net>>> = Default::default();
PeerEventStream::new(event_sender.new_listener())
}
}
impl<Net> PeersHandleProvider for NoopNetwork<Net>
where
Net: NetworkPrimitives,
{
fn peers_handle(&self) -> &PeersHandle {
&self.peers_handle
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network-api/src/downloaders.rs | crates/net/network-api/src/downloaders.rs | //! API related to syncing blocks.
use std::fmt::Debug;
use futures::Future;
use reth_network_p2p::BlockClient;
use tokio::sync::oneshot;
/// Provides client for downloading blocks.
#[auto_impl::auto_impl(&, Arc)]
pub trait BlockDownloaderProvider {
/// The client this type can provide.
type Client: BlockClient<Header: Debug, Body: Debug> + Send + Sync + Clone + 'static;
/// Returns a new [`BlockClient`], used for fetching blocks from peers.
///
/// The client is the entrypoint for sending block requests to the network.
fn fetch_client(
&self,
) -> impl Future<Output = Result<Self::Client, oneshot::error::RecvError>> + Send;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network-api/src/error.rs | crates/net/network-api/src/error.rs | use thiserror::Error;
use tokio::sync::{mpsc, oneshot};
/// Network Errors
#[derive(Error, Debug, Clone, PartialEq, Eq)]
pub enum NetworkError {
/// Indicates that the sender has been dropped.
#[error("sender has been dropped")]
ChannelClosed,
}
impl<T> From<mpsc::error::SendError<T>> for NetworkError {
fn from(_: mpsc::error::SendError<T>) -> Self {
Self::ChannelClosed
}
}
impl From<oneshot::error::RecvError> for NetworkError {
fn from(_: oneshot::error::RecvError) -> Self {
Self::ChannelClosed
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network-api/src/events.rs | crates/net/network-api/src/events.rs | //! API related to listening for network events.
use reth_eth_wire_types::{
message::RequestPair, BlockBodies, BlockHeaders, Capabilities, DisconnectReason, EthMessage,
EthNetworkPrimitives, EthVersion, GetBlockBodies, GetBlockHeaders, GetNodeData,
GetPooledTransactions, GetReceipts, NetworkPrimitives, NodeData, PooledTransactions, Receipts,
Receipts69, UnifiedStatus,
};
use reth_ethereum_forks::ForkId;
use reth_network_p2p::error::{RequestError, RequestResult};
use reth_network_peers::PeerId;
use reth_network_types::{PeerAddr, PeerKind};
use reth_tokio_util::EventStream;
use std::{
fmt,
net::SocketAddr,
pin::Pin,
sync::Arc,
task::{Context, Poll},
};
use tokio::sync::{mpsc, oneshot};
use tokio_stream::{wrappers::UnboundedReceiverStream, Stream, StreamExt};
/// A boxed stream of network peer events that provides a type-erased interface.
pub struct PeerEventStream(Pin<Box<dyn Stream<Item = PeerEvent> + Send + Sync>>);
impl fmt::Debug for PeerEventStream {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("PeerEventStream").finish_non_exhaustive()
}
}
impl PeerEventStream {
/// Create a new stream [`PeerEventStream`] by converting the provided stream's items into peer
/// events [`PeerEvent`]
pub fn new<S, T>(stream: S) -> Self
where
S: Stream<Item = T> + Send + Sync + 'static,
T: Into<PeerEvent> + 'static,
{
let mapped_stream = stream.map(Into::into);
Self(Box::pin(mapped_stream))
}
}
impl Stream for PeerEventStream {
type Item = PeerEvent;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
self.0.as_mut().poll_next(cx)
}
}
/// Represents information about an established peer session.
#[derive(Debug, Clone)]
pub struct SessionInfo {
/// The identifier of the peer to which a session was established.
pub peer_id: PeerId,
/// The remote addr of the peer to which a session was established.
pub remote_addr: SocketAddr,
/// The client version of the peer to which a session was established.
pub client_version: Arc<str>,
/// Capabilities the peer announced.
pub capabilities: Arc<Capabilities>,
/// The status of the peer to which a session was established.
pub status: Arc<UnifiedStatus>,
/// Negotiated eth version of the session.
pub version: EthVersion,
/// The kind of peer this session represents
pub peer_kind: PeerKind,
}
/// (Non-exhaustive) List of the different events emitted by the network that are of interest for
/// subscribers.
///
/// This includes any event types that may be relevant to tasks, for metrics, keep track of peers
/// etc.
#[derive(Debug, Clone)]
pub enum PeerEvent {
/// Closed the peer session.
SessionClosed {
/// The identifier of the peer to which a session was closed.
peer_id: PeerId,
/// Why the disconnect was triggered
reason: Option<DisconnectReason>,
},
/// Established a new session with the given peer.
SessionEstablished(SessionInfo),
/// Event emitted when a new peer is added
PeerAdded(PeerId),
/// Event emitted when a new peer is removed
PeerRemoved(PeerId),
}
/// (Non-exhaustive) Network events representing peer lifecycle events and session requests.
#[derive(Debug)]
pub enum NetworkEvent<R = PeerRequest> {
/// Basic peer lifecycle event.
Peer(PeerEvent),
/// Session established with requests.
ActivePeerSession {
/// Session information
info: SessionInfo,
/// A request channel to the session task.
messages: PeerRequestSender<R>,
},
}
impl<R> Clone for NetworkEvent<R> {
fn clone(&self) -> Self {
match self {
Self::Peer(event) => Self::Peer(event.clone()),
Self::ActivePeerSession { info, messages } => {
Self::ActivePeerSession { info: info.clone(), messages: messages.clone() }
}
}
}
}
impl<R> From<NetworkEvent<R>> for PeerEvent {
fn from(event: NetworkEvent<R>) -> Self {
match event {
NetworkEvent::Peer(peer_event) => peer_event,
NetworkEvent::ActivePeerSession { info, .. } => Self::SessionEstablished(info),
}
}
}
/// Provides peer event subscription for the network.
#[auto_impl::auto_impl(&, Arc)]
pub trait NetworkPeersEvents: Send + Sync {
/// Creates a new peer event listener stream.
fn peer_events(&self) -> PeerEventStream;
}
/// Provides event subscription for the network.
#[auto_impl::auto_impl(&, Arc)]
pub trait NetworkEventListenerProvider: NetworkPeersEvents {
/// The primitive types to use in the `PeerRequest` used in the stream.
type Primitives: NetworkPrimitives;
/// Creates a new [`NetworkEvent`] listener channel.
fn event_listener(&self) -> EventStream<NetworkEvent<PeerRequest<Self::Primitives>>>;
/// Returns a new [`DiscoveryEvent`] stream.
///
/// This stream yields [`DiscoveryEvent`]s for each peer that is discovered.
fn discovery_listener(&self) -> UnboundedReceiverStream<DiscoveryEvent>;
}
/// Events produced by the `Discovery` manager.
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum DiscoveryEvent {
/// Discovered a node
NewNode(DiscoveredEvent),
/// Retrieved a [`ForkId`] from the peer via ENR request, See <https://eips.ethereum.org/EIPS/eip-868>
EnrForkId(PeerId, ForkId),
}
/// Represents events related to peer discovery in the network.
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum DiscoveredEvent {
/// Indicates that a new peer has been discovered and queued for potential connection.
///
/// This event is generated when the system becomes aware of a new peer
/// but hasn't yet established a connection.
///
/// # Fields
///
/// * `peer_id` - The unique identifier of the discovered peer.
/// * `addr` - The network address of the discovered peer.
/// * `fork_id` - An optional identifier for the fork that this peer is associated with. `None`
/// if the peer is not associated with a specific fork.
EventQueued {
/// The unique identifier of the discovered peer.
peer_id: PeerId,
/// The network address of the discovered peer.
addr: PeerAddr,
/// An optional identifier for the fork that this peer is associated with.
/// `None` if the peer is not associated with a specific fork.
fork_id: Option<ForkId>,
},
}
/// Protocol related request messages that expect a response
#[derive(Debug)]
pub enum PeerRequest<N: NetworkPrimitives = EthNetworkPrimitives> {
/// Requests block headers from the peer.
///
/// The response should be sent through the channel.
GetBlockHeaders {
/// The request for block headers.
request: GetBlockHeaders,
/// The channel to send the response for block headers.
response: oneshot::Sender<RequestResult<BlockHeaders<N::BlockHeader>>>,
},
/// Requests block bodies from the peer.
///
/// The response should be sent through the channel.
GetBlockBodies {
/// The request for block bodies.
request: GetBlockBodies,
/// The channel to send the response for block bodies.
response: oneshot::Sender<RequestResult<BlockBodies<N::BlockBody>>>,
},
/// Requests pooled transactions from the peer.
///
/// The response should be sent through the channel.
GetPooledTransactions {
/// The request for pooled transactions.
request: GetPooledTransactions,
/// The channel to send the response for pooled transactions.
response: oneshot::Sender<RequestResult<PooledTransactions<N::PooledTransaction>>>,
},
/// Requests `NodeData` from the peer.
///
/// The response should be sent through the channel.
GetNodeData {
/// The request for `NodeData`.
request: GetNodeData,
/// The channel to send the response for `NodeData`.
response: oneshot::Sender<RequestResult<NodeData>>,
},
/// Requests receipts from the peer.
///
/// The response should be sent through the channel.
GetReceipts {
/// The request for receipts.
request: GetReceipts,
/// The channel to send the response for receipts.
response: oneshot::Sender<RequestResult<Receipts<N::Receipt>>>,
},
/// Requests receipts from the peer without bloom filter.
///
/// The response should be sent through the channel.
GetReceipts69 {
/// The request for receipts.
request: GetReceipts,
/// The channel to send the response for receipts.
response: oneshot::Sender<RequestResult<Receipts69<N::Receipt>>>,
},
}
// === impl PeerRequest ===
impl<N: NetworkPrimitives> PeerRequest<N> {
/// Invoked if we received a response which does not match the request
pub fn send_bad_response(self) {
self.send_err_response(RequestError::BadResponse)
}
/// Send an error back to the receiver.
pub fn send_err_response(self, err: RequestError) {
let _ = match self {
Self::GetBlockHeaders { response, .. } => response.send(Err(err)).ok(),
Self::GetBlockBodies { response, .. } => response.send(Err(err)).ok(),
Self::GetPooledTransactions { response, .. } => response.send(Err(err)).ok(),
Self::GetNodeData { response, .. } => response.send(Err(err)).ok(),
Self::GetReceipts { response, .. } => response.send(Err(err)).ok(),
Self::GetReceipts69 { response, .. } => response.send(Err(err)).ok(),
};
}
/// Returns the [`EthMessage`] for this type
pub fn create_request_message(&self, request_id: u64) -> EthMessage<N> {
match self {
Self::GetBlockHeaders { request, .. } => {
EthMessage::GetBlockHeaders(RequestPair { request_id, message: *request })
}
Self::GetBlockBodies { request, .. } => {
EthMessage::GetBlockBodies(RequestPair { request_id, message: request.clone() })
}
Self::GetPooledTransactions { request, .. } => {
EthMessage::GetPooledTransactions(RequestPair {
request_id,
message: request.clone(),
})
}
Self::GetNodeData { request, .. } => {
EthMessage::GetNodeData(RequestPair { request_id, message: request.clone() })
}
Self::GetReceipts { request, .. } | Self::GetReceipts69 { request, .. } => {
EthMessage::GetReceipts(RequestPair { request_id, message: request.clone() })
}
}
}
/// Consumes the type and returns the inner [`GetPooledTransactions`] variant.
pub fn into_get_pooled_transactions(self) -> Option<GetPooledTransactions> {
match self {
Self::GetPooledTransactions { request, .. } => Some(request),
_ => None,
}
}
}
/// A Cloneable connection for sending _requests_ directly to the session of a peer.
pub struct PeerRequestSender<R = PeerRequest> {
/// id of the remote node.
pub peer_id: PeerId,
/// The Sender half connected to a session.
pub to_session_tx: mpsc::Sender<R>,
}
impl<R> Clone for PeerRequestSender<R> {
fn clone(&self) -> Self {
Self { peer_id: self.peer_id, to_session_tx: self.to_session_tx.clone() }
}
}
// === impl PeerRequestSender ===
impl<R> PeerRequestSender<R> {
/// Constructs a new sender instance that's wired to a session
pub const fn new(peer_id: PeerId, to_session_tx: mpsc::Sender<R>) -> Self {
Self { peer_id, to_session_tx }
}
/// Attempts to immediately send a message on this Sender
pub fn try_send(&self, req: R) -> Result<(), mpsc::error::TrySendError<R>> {
self.to_session_tx.try_send(req)
}
/// Returns the peer id of the remote peer.
pub const fn peer_id(&self) -> &PeerId {
&self.peer_id
}
}
impl<R> fmt::Debug for PeerRequestSender<R> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("PeerRequestSender").field("peer_id", &self.peer_id).finish_non_exhaustive()
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network-api/src/test_utils/mod.rs | crates/net/network-api/src/test_utils/mod.rs | //! API for integration testing network components.
pub mod peers_manager;
pub use peers_manager::{PeerCommand, PeersHandle, PeersHandleProvider};
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network-api/src/test_utils/peers_manager.rs | crates/net/network-api/src/test_utils/peers_manager.rs | //! Interaction with `reth_network::PeersManager`, for integration testing. Otherwise
//! `reth_network::NetworkManager` manages `reth_network::PeersManager`.
use std::net::SocketAddr;
use derive_more::Constructor;
use reth_network_peers::{NodeRecord, PeerId};
use reth_network_types::{Peer, ReputationChangeKind};
use tokio::sync::{mpsc, oneshot};
/// Provides an API for managing the peers of the network.
#[auto_impl::auto_impl(&, Arc)]
pub trait PeersHandleProvider {
/// Returns the [`PeersHandle`] that can be cloned and shared.
///
/// The [`PeersHandle`] can be used to interact with the network's peer set.
fn peers_handle(&self) -> &PeersHandle;
}
/// A communication channel to the `PeersManager` to apply manual changes to the peer set.
#[derive(Clone, Debug, Constructor)]
pub struct PeersHandle {
/// Sender half of command channel back to the `PeersManager`
manager_tx: mpsc::UnboundedSender<PeerCommand>,
}
// === impl PeersHandle ===
impl PeersHandle {
fn send(&self, cmd: PeerCommand) {
let _ = self.manager_tx.send(cmd);
}
/// Adds a peer to the set.
pub fn add_peer(&self, peer_id: PeerId, addr: SocketAddr) {
self.send(PeerCommand::Add(peer_id, addr));
}
/// Removes a peer from the set.
pub fn remove_peer(&self, peer_id: PeerId) {
self.send(PeerCommand::Remove(peer_id));
}
/// Send a reputation change for the given peer.
pub fn reputation_change(&self, peer_id: PeerId, kind: ReputationChangeKind) {
self.send(PeerCommand::ReputationChange(peer_id, kind));
}
/// Returns a peer by its [`PeerId`], or `None` if the peer is not in the peer set.
pub async fn peer_by_id(&self, peer_id: PeerId) -> Option<Peer> {
let (tx, rx) = oneshot::channel();
self.send(PeerCommand::GetPeer(peer_id, tx));
rx.await.unwrap_or(None)
}
/// Returns all peers in the peerset.
pub async fn all_peers(&self) -> Vec<NodeRecord> {
let (tx, rx) = oneshot::channel();
self.send(PeerCommand::GetPeers(tx));
rx.await.unwrap_or_default()
}
}
/// Commands the `PeersManager` listens for.
#[derive(Debug)]
pub enum PeerCommand {
/// Command for manually add
Add(PeerId, SocketAddr),
/// Remove a peer from the set
///
/// If currently connected this will disconnect the session
Remove(PeerId),
/// Apply a reputation change to the given peer.
ReputationChange(PeerId, ReputationChangeKind),
/// Get information about a peer
GetPeer(PeerId, oneshot::Sender<Option<Peer>>),
/// Get node information on all peers
GetPeers(oneshot::Sender<Vec<NodeRecord>>),
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/dns/src/config.rs | crates/net/dns/src/config.rs | use crate::tree::LinkEntry;
use std::{
collections::HashSet,
num::{NonZeroU32, NonZeroUsize},
time::Duration,
};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
/// Settings for the [`DnsDiscoveryService`](crate::DnsDiscoveryService).
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct DnsDiscoveryConfig {
/// Timeout for DNS lookups.
///
/// Default: 5s
pub lookup_timeout: Duration,
/// The DNS request rate limit
///
/// Default: 3
pub max_requests_per_sec: NonZeroUsize,
/// The rate at which trees should be updated.
///
/// Default: 30min
pub recheck_interval: Duration,
/// Maximum number of cached DNS records.
pub dns_record_cache_limit: NonZeroU32,
/// Links to the DNS networks to bootstrap.
pub bootstrap_dns_networks: Option<HashSet<LinkEntry>>,
}
impl Default for DnsDiscoveryConfig {
fn default() -> Self {
Self {
lookup_timeout: Duration::from_secs(5),
max_requests_per_sec: NonZeroUsize::new(3).unwrap(),
recheck_interval: Duration::from_secs(60 * 30),
dns_record_cache_limit: NonZeroU32::new(1_000).unwrap(),
bootstrap_dns_networks: Some(Default::default()),
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/dns/src/lib.rs | crates/net/dns/src/lib.rs | //! Implementation of [EIP-1459](https://eips.ethereum.org/EIPS/eip-1459) Node Discovery via DNS.
//!
//! ## Feature Flags
//!
//! - `serde` (default): Enable serde support
//! - `test-utils`: Export utilities for testing
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
pub use crate::resolver::{DnsResolver, MapResolver, Resolver};
use crate::{
query::{QueryOutcome, QueryPool, ResolveEntryResult, ResolveRootResult},
sync::{ResolveKind, SyncAction},
tree::{DnsEntry, LinkEntry},
};
pub use config::DnsDiscoveryConfig;
use enr::Enr;
pub use error::ParseDnsEntryError;
use reth_ethereum_forks::{EnrForkIdEntry, ForkId};
use reth_network_peers::{pk2id, NodeRecord};
use schnellru::{ByLength, LruMap};
use secp256k1::SecretKey;
use std::{
collections::{hash_map::Entry, HashMap, HashSet, VecDeque},
net::IpAddr,
pin::Pin,
sync::Arc,
task::{ready, Context, Poll},
time::{Duration, Instant},
};
use sync::SyncTree;
use tokio::{
sync::{
mpsc,
mpsc::{error::TrySendError, UnboundedSender},
oneshot,
},
task::JoinHandle,
};
use tokio_stream::{
wrappers::{ReceiverStream, UnboundedReceiverStream},
Stream, StreamExt,
};
use tracing::{debug, trace};
mod config;
mod error;
mod query;
pub mod resolver;
mod sync;
pub mod tree;
/// [`DnsDiscoveryService`] front-end.
#[derive(Clone, Debug)]
pub struct DnsDiscoveryHandle {
/// Channel for sending commands to the service.
to_service: UnboundedSender<DnsDiscoveryCommand>,
}
// === impl DnsDiscovery ===
impl DnsDiscoveryHandle {
/// Starts syncing the given link to a tree.
pub fn sync_tree(&self, link: &str) -> Result<(), ParseDnsEntryError> {
self.sync_tree_with_link(link.parse()?);
Ok(())
}
/// Starts syncing the given link to a tree.
pub fn sync_tree_with_link(&self, link: LinkEntry) {
let _ = self.to_service.send(DnsDiscoveryCommand::SyncTree(link));
}
/// Returns the receiver half of new listener channel that streams discovered [`NodeRecord`]s.
pub async fn node_record_stream(
&self,
) -> Result<ReceiverStream<DnsNodeRecordUpdate>, oneshot::error::RecvError> {
let (tx, rx) = oneshot::channel();
let cmd = DnsDiscoveryCommand::NodeRecordUpdates(tx);
let _ = self.to_service.send(cmd);
rx.await
}
}
/// A client that discovers nodes via DNS.
#[must_use = "Service does nothing unless polled"]
#[expect(missing_debug_implementations)]
pub struct DnsDiscoveryService<R: Resolver = DnsResolver> {
/// Copy of the sender half, so new [`DnsDiscoveryHandle`] can be created on demand.
command_tx: UnboundedSender<DnsDiscoveryCommand>,
/// Receiver half of the command channel.
command_rx: UnboundedReceiverStream<DnsDiscoveryCommand>,
/// All subscribers for resolved [`NodeRecord`]s.
node_record_listeners: Vec<mpsc::Sender<DnsNodeRecordUpdate>>,
/// All the trees that can be synced.
trees: HashMap<LinkEntry, SyncTree>,
/// All queries currently in progress
queries: QueryPool<R, SecretKey>,
/// Cached dns records
dns_record_cache: LruMap<String, DnsEntry<SecretKey>>,
/// all buffered events
queued_events: VecDeque<DnsDiscoveryEvent>,
/// The rate at which trees should be updated.
recheck_interval: Duration,
/// Links to the DNS networks to bootstrap.
bootstrap_dns_networks: HashSet<LinkEntry>,
}
// === impl DnsDiscoveryService ===
impl<R: Resolver> DnsDiscoveryService<R> {
/// Creates a new instance of the [`DnsDiscoveryService`] using the given settings.
///
/// ```
/// use reth_dns_discovery::{DnsDiscoveryService, DnsResolver};
/// use std::sync::Arc;
/// # fn t() {
/// let service = DnsDiscoveryService::new(
/// Arc::new(DnsResolver::from_system_conf().unwrap()),
/// Default::default(),
/// );
/// # }
/// ```
pub fn new(resolver: Arc<R>, config: DnsDiscoveryConfig) -> Self {
let DnsDiscoveryConfig {
lookup_timeout,
max_requests_per_sec,
recheck_interval,
dns_record_cache_limit,
bootstrap_dns_networks,
} = config;
let queries = QueryPool::new(resolver, max_requests_per_sec, lookup_timeout);
let (command_tx, command_rx) = mpsc::unbounded_channel();
Self {
command_tx,
command_rx: UnboundedReceiverStream::new(command_rx),
node_record_listeners: Default::default(),
trees: Default::default(),
queries,
dns_record_cache: LruMap::new(ByLength::new(dns_record_cache_limit.get())),
queued_events: Default::default(),
recheck_interval,
bootstrap_dns_networks: bootstrap_dns_networks.unwrap_or_default(),
}
}
/// Spawns this services onto a new task
///
/// Note: requires a running runtime
pub fn spawn(mut self) -> JoinHandle<()> {
tokio::task::spawn(async move {
self.bootstrap();
while let Some(event) = self.next().await {
trace!(target: "disc::dns", ?event, "processed");
}
})
}
/// Starts discovery with all configured bootstrap links
pub fn bootstrap(&mut self) {
for link in self.bootstrap_dns_networks.clone() {
self.sync_tree_with_link(link);
}
}
/// Same as [`DnsDiscoveryService::new`] but also returns a new handle that's connected to the
/// service
pub fn new_pair(resolver: Arc<R>, config: DnsDiscoveryConfig) -> (Self, DnsDiscoveryHandle) {
let service = Self::new(resolver, config);
let handle = service.handle();
(service, handle)
}
/// Returns a new [`DnsDiscoveryHandle`] that can send commands to this type.
pub fn handle(&self) -> DnsDiscoveryHandle {
DnsDiscoveryHandle { to_service: self.command_tx.clone() }
}
/// Creates a new channel for [`NodeRecord`]s.
pub fn node_record_stream(&mut self) -> ReceiverStream<DnsNodeRecordUpdate> {
let (tx, rx) = mpsc::channel(256);
self.node_record_listeners.push(tx);
ReceiverStream::new(rx)
}
/// Sends the event to all listeners.
///
/// Remove channels that got closed.
fn notify(&mut self, record: DnsNodeRecordUpdate) {
self.node_record_listeners.retain_mut(|listener| match listener.try_send(record.clone()) {
Ok(()) => true,
Err(err) => match err {
TrySendError::Full(_) => true,
TrySendError::Closed(_) => false,
},
});
}
/// Starts syncing the given link to a tree.
pub fn sync_tree(&mut self, link: &str) -> Result<(), ParseDnsEntryError> {
self.sync_tree_with_link(link.parse()?);
Ok(())
}
/// Starts syncing the given link to a tree.
pub fn sync_tree_with_link(&mut self, link: LinkEntry) {
self.queries.resolve_root(link);
}
/// Resolves an entry
fn resolve_entry(&mut self, link: LinkEntry<SecretKey>, hash: String, kind: ResolveKind) {
if let Some(entry) = self.dns_record_cache.get(&hash).cloned() {
// already resolved
let cached = ResolveEntryResult { entry: Some(Ok(entry)), link, hash, kind };
self.on_resolved_entry(cached);
return
}
self.queries.resolve_entry(link, hash, kind)
}
fn on_resolved_root(&mut self, resp: ResolveRootResult<SecretKey>) {
match resp {
Ok((root, link)) => match self.trees.entry(link.clone()) {
Entry::Occupied(mut entry) => {
entry.get_mut().update_root(root);
}
Entry::Vacant(entry) => {
entry.insert(SyncTree::new(root, link));
}
},
Err((err, link)) => {
debug!(target: "disc::dns",%err, ?link, "Failed to lookup root")
}
}
}
fn on_resolved_enr(&mut self, enr: Enr<SecretKey>) {
if let Some(record) = convert_enr_node_record(&enr) {
self.notify(record);
}
self.queued_events.push_back(DnsDiscoveryEvent::Enr(enr))
}
fn on_resolved_entry(&mut self, resp: ResolveEntryResult<SecretKey>) {
let ResolveEntryResult { entry, link, hash, kind } = resp;
match entry {
Some(Err(err)) => {
debug!(target: "disc::dns",%err, domain=%link.domain, ?hash, "Failed to lookup entry")
}
None => {
trace!(target: "disc::dns",domain=%link.domain, ?hash, "No dns entry")
}
Some(Ok(entry)) => {
// cache entry
self.dns_record_cache.insert(hash.clone(), entry.clone());
match entry {
DnsEntry::Root(root) => {
debug!(target: "disc::dns",%root, domain=%link.domain, ?hash, "resolved unexpected root entry");
}
DnsEntry::Link(link_entry) => {
if kind.is_link() {
if let Some(tree) = self.trees.get_mut(&link) {
tree.resolved_links_mut().insert(hash, link_entry.clone());
}
self.sync_tree_with_link(link_entry)
} else {
debug!(target: "disc::dns",%link_entry, domain=%link.domain, ?hash, "resolved unexpected Link entry");
}
}
DnsEntry::Branch(branch_entry) => {
if let Some(tree) = self.trees.get_mut(&link) {
tree.extend_children(kind, branch_entry.children)
}
}
DnsEntry::Node(entry) => {
if kind.is_link() {
debug!(target: "disc::dns",domain=%link.domain, ?hash, "resolved unexpected enr entry");
} else {
self.on_resolved_enr(entry.enr)
}
}
}
}
}
}
/// Advances the state of the DNS discovery service by polling,triggering lookups
pub(crate) fn poll(&mut self, cx: &mut Context<'_>) -> Poll<DnsDiscoveryEvent> {
loop {
// drain buffered events first
if let Some(event) = self.queued_events.pop_front() {
return Poll::Ready(event)
}
// process all incoming commands
while let Poll::Ready(Some(cmd)) = Pin::new(&mut self.command_rx).poll_next(cx) {
match cmd {
DnsDiscoveryCommand::SyncTree(link) => {
self.sync_tree_with_link(link);
}
DnsDiscoveryCommand::NodeRecordUpdates(tx) => {
let _ = tx.send(self.node_record_stream());
}
}
}
while let Poll::Ready(outcome) = self.queries.poll(cx) {
// handle query outcome
match outcome {
QueryOutcome::Root(resp) => self.on_resolved_root(resp),
QueryOutcome::Entry(resp) => self.on_resolved_entry(resp),
}
}
let mut progress = false;
let now = Instant::now();
let mut pending_resolves = Vec::new();
let mut pending_updates = Vec::new();
for tree in self.trees.values_mut() {
while let Some(action) = tree.poll(now, self.recheck_interval) {
progress = true;
match action {
SyncAction::UpdateRoot => {
pending_updates.push(tree.link().clone());
}
SyncAction::Enr(hash) => {
pending_resolves.push((tree.link().clone(), hash, ResolveKind::Enr));
}
SyncAction::Link(hash) => {
pending_resolves.push((tree.link().clone(), hash, ResolveKind::Link));
}
}
}
}
for (domain, hash, kind) in pending_resolves {
self.resolve_entry(domain, hash, kind)
}
for link in pending_updates {
self.sync_tree_with_link(link)
}
if !progress && self.queued_events.is_empty() {
return Poll::Pending
}
}
}
}
/// A Stream events, mainly used for debugging
impl<R: Resolver> Stream for DnsDiscoveryService<R> {
type Item = DnsDiscoveryEvent;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
Poll::Ready(Some(ready!(self.get_mut().poll(cx))))
}
}
/// The converted discovered [Enr] object
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct DnsNodeRecordUpdate {
/// Discovered node and it's addresses
pub node_record: NodeRecord,
/// The forkid of the node, if present in the ENR
pub fork_id: Option<ForkId>,
/// Original [`Enr`].
pub enr: Enr<SecretKey>,
}
/// Commands sent from [`DnsDiscoveryHandle`] to [`DnsDiscoveryService`]
enum DnsDiscoveryCommand {
/// Sync a tree
SyncTree(LinkEntry),
NodeRecordUpdates(oneshot::Sender<ReceiverStream<DnsNodeRecordUpdate>>),
}
/// Represents dns discovery related update events.
#[derive(Debug, Clone)]
pub enum DnsDiscoveryEvent {
/// Resolved an Enr entry via DNS.
Enr(Enr<SecretKey>),
}
/// Converts an [Enr] into a [`NodeRecord`]
fn convert_enr_node_record(enr: &Enr<SecretKey>) -> Option<DnsNodeRecordUpdate> {
let node_record = NodeRecord {
address: enr.ip4().map(IpAddr::from).or_else(|| enr.ip6().map(IpAddr::from))?,
tcp_port: enr.tcp4().or_else(|| enr.tcp6())?,
udp_port: enr.udp4().or_else(|| enr.udp6())?,
id: pk2id(&enr.public_key()),
}
.into_ipv4_mapped();
let fork_id =
enr.get_decodable::<EnrForkIdEntry>(b"eth").transpose().ok().flatten().map(Into::into);
Some(DnsNodeRecordUpdate { node_record, fork_id, enr: enr.clone() })
}
#[cfg(test)]
mod tests {
use super::*;
use crate::tree::TreeRootEntry;
use alloy_chains::Chain;
use alloy_rlp::{Decodable, Encodable};
use enr::EnrKey;
use reth_chainspec::MAINNET;
use reth_ethereum_forks::{EthereumHardfork, ForkHash};
use secp256k1::rand::thread_rng;
use std::{future::poll_fn, net::Ipv4Addr};
#[test]
fn test_convert_enr_node_record() {
// rig
let secret_key = SecretKey::new(&mut secp256k1::rand::thread_rng());
let enr = Enr::builder()
.ip("127.0.0.1".parse().unwrap())
.udp4(9000)
.tcp4(30303)
.add_value(b"eth", &EnrForkIdEntry::from(MAINNET.latest_fork_id()))
.build(&secret_key)
.unwrap();
// test
let node_record_update = convert_enr_node_record(&enr).unwrap();
assert_eq!(node_record_update.node_record.address, "127.0.0.1".parse::<IpAddr>().unwrap());
assert_eq!(node_record_update.node_record.tcp_port, 30303);
assert_eq!(node_record_update.node_record.udp_port, 9000);
assert_eq!(node_record_update.fork_id, Some(MAINNET.latest_fork_id()));
assert_eq!(node_record_update.enr, enr);
}
#[test]
fn test_decode_and_convert_enr_node_record() {
// rig
let secret_key = SecretKey::new(&mut secp256k1::rand::thread_rng());
let enr = Enr::builder()
.ip("127.0.0.1".parse().unwrap())
.udp4(9000)
.tcp4(30303)
.add_value(b"eth", &EnrForkIdEntry::from(MAINNET.latest_fork_id()))
.add_value(b"opstack", &ForkId { hash: ForkHash(rand::random()), next: rand::random() })
.build(&secret_key)
.unwrap();
let mut encoded_enr = vec![];
enr.encode(&mut encoded_enr);
// test
let decoded_enr = Enr::decode(&mut &encoded_enr[..]).unwrap();
let node_record_update = convert_enr_node_record(&decoded_enr).unwrap();
assert_eq!(node_record_update.node_record.address, "127.0.0.1".parse::<IpAddr>().unwrap());
assert_eq!(node_record_update.node_record.tcp_port, 30303);
assert_eq!(node_record_update.node_record.udp_port, 9000);
assert_eq!(node_record_update.fork_id, Some(MAINNET.latest_fork_id()));
assert_eq!(node_record_update.enr, enr);
}
#[tokio::test]
async fn test_start_root_sync() {
reth_tracing::init_test_tracing();
let secret_key = SecretKey::new(&mut thread_rng());
let resolver = MapResolver::default();
let s = "enrtree-root:v1 e=QFT4PBCRX4XQCV3VUYJ6BTCEPU l=JGUFMSAGI7KZYB3P7IZW4S5Y3A seq=3 sig=3FmXuVwpa8Y7OstZTx9PIb1mt8FrW7VpDOFv4AaGCsZ2EIHmhraWhe4NxYhQDlw5MjeFXYMbJjsPeKlHzmJREQE";
let mut root: TreeRootEntry = s.parse().unwrap();
root.sign(&secret_key).unwrap();
let link =
LinkEntry { domain: "nodes.example.org".to_string(), pubkey: secret_key.public() };
resolver.insert(link.domain.clone(), root.to_string());
let mut service = DnsDiscoveryService::new(Arc::new(resolver), Default::default());
service.sync_tree_with_link(link.clone());
poll_fn(|cx| {
let _ = service.poll(cx);
Poll::Ready(())
})
.await;
let tree = service.trees.get(&link).unwrap();
assert_eq!(tree.root().clone(), root);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_get_node() {
reth_tracing::init_test_tracing();
let secret_key = SecretKey::new(&mut thread_rng());
let resolver = MapResolver::default();
let s = "enrtree-root:v1 e=QFT4PBCRX4XQCV3VUYJ6BTCEPU l=JGUFMSAGI7KZYB3P7IZW4S5Y3A seq=3 sig=3FmXuVwpa8Y7OstZTx9PIb1mt8FrW7VpDOFv4AaGCsZ2EIHmhraWhe4NxYhQDlw5MjeFXYMbJjsPeKlHzmJREQE";
let mut root: TreeRootEntry = s.parse().unwrap();
root.sign(&secret_key).unwrap();
let link =
LinkEntry { domain: "nodes.example.org".to_string(), pubkey: secret_key.public() };
resolver.insert(link.domain.clone(), root.to_string());
let mut builder = Enr::builder();
let fork_id = MAINNET.hardfork_fork_id(EthereumHardfork::Frontier).unwrap();
builder
.ip4(Ipv4Addr::LOCALHOST)
.udp4(30303)
.tcp4(30303)
.add_value(b"eth", &EnrForkIdEntry::from(fork_id));
let enr = builder.build(&secret_key).unwrap();
resolver.insert(format!("{}.{}", root.enr_root.clone(), link.domain), enr.to_base64());
let mut service = DnsDiscoveryService::new(Arc::new(resolver), Default::default());
let mut node_records = service.node_record_stream();
let task = tokio::task::spawn(async move {
let record = node_records.next().await.unwrap();
assert_eq!(record.fork_id, Some(fork_id));
});
service.sync_tree_with_link(link.clone());
let event = poll_fn(|cx| service.poll(cx)).await;
match event {
DnsDiscoveryEvent::Enr(discovered) => {
assert_eq!(discovered, enr);
}
}
poll_fn(|cx| {
assert!(service.poll(cx).is_pending());
Poll::Ready(())
})
.await;
task.await.unwrap();
}
#[tokio::test]
async fn test_recheck_tree() {
reth_tracing::init_test_tracing();
let config = DnsDiscoveryConfig {
recheck_interval: Duration::from_millis(750),
..Default::default()
};
let secret_key = SecretKey::new(&mut thread_rng());
let resolver = Arc::new(MapResolver::default());
let s = "enrtree-root:v1 e=QFT4PBCRX4XQCV3VUYJ6BTCEPU l=JGUFMSAGI7KZYB3P7IZW4S5Y3A seq=3 sig=3FmXuVwpa8Y7OstZTx9PIb1mt8FrW7VpDOFv4AaGCsZ2EIHmhraWhe4NxYhQDlw5MjeFXYMbJjsPeKlHzmJREQE";
let mut root: TreeRootEntry = s.parse().unwrap();
root.sign(&secret_key).unwrap();
let link =
LinkEntry { domain: "nodes.example.org".to_string(), pubkey: secret_key.public() };
resolver.insert(link.domain.clone(), root.to_string());
let mut service = DnsDiscoveryService::new(Arc::clone(&resolver), config.clone());
service.sync_tree_with_link(link.clone());
poll_fn(|cx| {
assert!(service.poll(cx).is_pending());
Poll::Ready(())
})
.await;
// await recheck timeout
tokio::time::sleep(config.recheck_interval).await;
let enr = Enr::empty(&secret_key).unwrap();
resolver.insert(format!("{}.{}", root.enr_root.clone(), link.domain), enr.to_base64());
let event = poll_fn(|cx| service.poll(cx)).await;
match event {
DnsDiscoveryEvent::Enr(discovered) => {
assert_eq!(discovered, enr);
}
}
poll_fn(|cx| {
assert!(service.poll(cx).is_pending());
Poll::Ready(())
})
.await;
}
#[tokio::test]
#[ignore]
async fn test_dns_resolver() {
reth_tracing::init_test_tracing();
let mut service = DnsDiscoveryService::new(
Arc::new(DnsResolver::from_system_conf().unwrap()),
Default::default(),
);
service.sync_tree(&Chain::mainnet().public_dns_network_protocol().unwrap()).unwrap();
while let Some(event) = service.next().await {
match event {
DnsDiscoveryEvent::Enr(enr) => {
println!("discovered enr {}", enr.to_base64());
}
}
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/dns/src/sync.rs | crates/net/dns/src/sync.rs | use crate::tree::{LinkEntry, TreeRootEntry};
use enr::EnrKeyUnambiguous;
use linked_hash_set::LinkedHashSet;
use secp256k1::SecretKey;
use std::{
collections::HashMap,
time::{Duration, Instant},
};
/// A sync-able tree
pub(crate) struct SyncTree<K: EnrKeyUnambiguous = SecretKey> {
/// Root of the tree
root: TreeRootEntry,
/// Link to this tree
link: LinkEntry<K>,
/// Timestamp when the root was updated
root_updated: Instant,
/// The state of the tree sync progress.
sync_state: SyncState,
/// Links contained in this tree
resolved_links: HashMap<String, LinkEntry<K>>,
/// Unresolved links of the tree
unresolved_links: LinkedHashSet<String>,
/// Unresolved nodes of the tree
unresolved_nodes: LinkedHashSet<String>,
}
// === impl SyncTree ===
impl<K: EnrKeyUnambiguous> SyncTree<K> {
pub(crate) fn new(root: TreeRootEntry, link: LinkEntry<K>) -> Self {
Self {
root,
link,
root_updated: Instant::now(),
sync_state: SyncState::Pending,
resolved_links: Default::default(),
unresolved_links: Default::default(),
unresolved_nodes: Default::default(),
}
}
#[cfg(test)]
pub(crate) const fn root(&self) -> &TreeRootEntry {
&self.root
}
pub(crate) const fn link(&self) -> &LinkEntry<K> {
&self.link
}
pub(crate) const fn resolved_links_mut(&mut self) -> &mut HashMap<String, LinkEntry<K>> {
&mut self.resolved_links
}
pub(crate) fn extend_children(
&mut self,
kind: ResolveKind,
children: impl IntoIterator<Item = String>,
) {
match kind {
ResolveKind::Enr => {
self.unresolved_nodes.extend(children);
}
ResolveKind::Link => {
self.unresolved_links.extend(children);
}
}
}
/// Advances the state of the tree by returning actions to perform
pub(crate) fn poll(&mut self, now: Instant, update_timeout: Duration) -> Option<SyncAction> {
match self.sync_state {
SyncState::Pending => {
self.sync_state = SyncState::Enr;
return Some(SyncAction::Link(self.root.link_root.clone()))
}
SyncState::Enr => {
self.sync_state = SyncState::Active;
return Some(SyncAction::Enr(self.root.enr_root.clone()))
}
SyncState::Link => {
self.sync_state = SyncState::Active;
return Some(SyncAction::Link(self.root.link_root.clone()))
}
SyncState::Active => {
if now > self.root_updated + update_timeout {
self.sync_state = SyncState::RootUpdate;
return Some(SyncAction::UpdateRoot)
}
}
SyncState::RootUpdate => return None,
}
if let Some(link) = self.unresolved_links.pop_front() {
return Some(SyncAction::Link(link))
}
let enr = self.unresolved_nodes.pop_front()?;
Some(SyncAction::Enr(enr))
}
/// Updates the root and returns what changed
pub(crate) fn update_root(&mut self, root: TreeRootEntry) {
let enr = root.enr_root == self.root.enr_root;
let link = root.link_root == self.root.link_root;
self.root = root;
self.root_updated = Instant::now();
let state = match (enr, link) {
(true, true) => {
self.unresolved_nodes.clear();
self.unresolved_links.clear();
SyncState::Pending
}
(true, _) => {
self.unresolved_nodes.clear();
SyncState::Enr
}
(_, true) => {
self.unresolved_links.clear();
SyncState::Link
}
_ => {
// unchanged
return
}
};
self.sync_state = state;
}
}
/// The action to perform by the service
pub(crate) enum SyncAction {
UpdateRoot,
Enr(String),
Link(String),
}
/// How the [`SyncTree::update_root`] changed the root
enum SyncState {
RootUpdate,
Pending,
Enr,
Link,
Active,
}
/// What kind of hash to resolve
pub(crate) enum ResolveKind {
Enr,
Link,
}
// === impl ResolveKind ===
impl ResolveKind {
pub(crate) const fn is_link(&self) -> bool {
matches!(self, Self::Link)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/dns/src/tree.rs | crates/net/dns/src/tree.rs | //! Support for the [EIP-1459 DNS Record Structure](https://eips.ethereum.org/EIPS/eip-1459#dns-record-structure)
//!
//! The nodes in a list are encoded as a merkle tree for distribution via the DNS protocol. Entries
//! of the merkle tree are contained in DNS TXT records. The root of the tree is a TXT record with
//! the following content:
//!
//! ```text
//! enrtree-root:v1 e=<enr-root> l=<link-root> seq=<sequence-number> sig=<signature>
//! ```
//!
//! where
//!
//! enr-root and link-root refer to the root hashes of subtrees containing nodes and links to
//! subtrees.
//! `sequence-number` is the tree’s update sequence number, a decimal integer.
//! `signature` is a 65-byte secp256k1 EC signature over the keccak256 hash of the record
//! content, excluding the sig= part, encoded as URL-safe base64 (RFC-4648).
use crate::error::{
ParseDnsEntryError,
ParseDnsEntryError::{FieldNotFound, UnknownEntry},
ParseEntryResult,
};
use alloy_primitives::{hex, Bytes};
use data_encoding::{BASE32_NOPAD, BASE64URL_NOPAD};
use enr::{Enr, EnrKey, EnrKeyUnambiguous, EnrPublicKey, Error as EnrError};
use secp256k1::SecretKey;
#[cfg(feature = "serde")]
use serde_with::{DeserializeFromStr, SerializeDisplay};
use std::{
fmt,
hash::{Hash, Hasher},
str::FromStr,
};
/// Prefix used for root entries in the ENR tree.
const ROOT_V1_PREFIX: &str = "enrtree-root:v1";
/// Prefix used for link entries in the ENR tree.
const LINK_PREFIX: &str = "enrtree://";
/// Prefix used for branch entries in the ENR tree.
const BRANCH_PREFIX: &str = "enrtree-branch:";
/// Prefix used for ENR entries in the ENR tree.
const ENR_PREFIX: &str = "enr:";
/// Represents all variants of DNS entries for Ethereum node lists.
#[derive(Debug, Clone)]
pub enum DnsEntry<K: EnrKeyUnambiguous> {
/// Represents a root entry in the DNS tree containing node records.
Root(TreeRootEntry),
/// Represents a link entry in the DNS tree pointing to another node list.
Link(LinkEntry<K>),
/// Represents a branch entry in the DNS tree containing hashes of subtree entries.
Branch(BranchEntry),
/// Represents a leaf entry in the DNS tree containing a node record.
Node(NodeEntry<K>),
}
impl<K: EnrKeyUnambiguous> fmt::Display for DnsEntry<K> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Root(entry) => entry.fmt(f),
Self::Link(entry) => entry.fmt(f),
Self::Branch(entry) => entry.fmt(f),
Self::Node(entry) => entry.fmt(f),
}
}
}
impl<K: EnrKeyUnambiguous> FromStr for DnsEntry<K> {
type Err = ParseDnsEntryError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
if let Some(s) = s.strip_prefix(ROOT_V1_PREFIX) {
TreeRootEntry::parse_value(s).map(DnsEntry::Root)
} else if let Some(s) = s.strip_prefix(BRANCH_PREFIX) {
BranchEntry::parse_value(s).map(DnsEntry::Branch)
} else if let Some(s) = s.strip_prefix(LINK_PREFIX) {
LinkEntry::parse_value(s).map(DnsEntry::Link)
} else if let Some(s) = s.strip_prefix(ENR_PREFIX) {
NodeEntry::parse_value(s).map(DnsEntry::Node)
} else {
Err(UnknownEntry(s.to_string()))
}
}
}
/// Represents an `enr-root` hash of subtrees containing nodes and links.
#[derive(Clone, Eq, PartialEq)]
pub struct TreeRootEntry {
/// The `enr-root` hash.
pub enr_root: String,
/// The root hash of the links.
pub link_root: String,
/// The sequence number associated with the entry.
pub sequence_number: u64,
/// The signature of the entry.
pub signature: Bytes,
}
// === impl TreeRootEntry ===
impl TreeRootEntry {
/// Parses the entry from text.
///
/// Caution: This assumes the prefix is already removed.
fn parse_value(mut input: &str) -> ParseEntryResult<Self> {
let input = &mut input;
let enr_root = parse_value(input, "e=", "ENR Root", |s| Ok(s.to_string()))?;
let link_root = parse_value(input, "l=", "Link Root", |s| Ok(s.to_string()))?;
let sequence_number = parse_value(input, "seq=", "Sequence number", |s| {
s.parse::<u64>().map_err(|_| {
ParseDnsEntryError::Other(format!("Failed to parse sequence number {s}"))
})
})?;
let signature = parse_value(input, "sig=", "Signature", |s| {
BASE64URL_NOPAD.decode(s.as_bytes()).map_err(|err| {
ParseDnsEntryError::Base64DecodeError(format!("signature error: {err}"))
})
})?
.into();
Ok(Self { enr_root, link_root, sequence_number, signature })
}
/// Returns the _unsigned_ content pairs of the entry:
///
/// ```text
/// e=<enr-root> l=<link-root> seq=<sequence-number> sig=<signature>
/// ```
fn content(&self) -> String {
format!(
"{} e={} l={} seq={}",
ROOT_V1_PREFIX, self.enr_root, self.link_root, self.sequence_number
)
}
/// Signs the content with the given key
pub fn sign<K: EnrKey>(&mut self, key: &K) -> Result<(), EnrError> {
let sig = key.sign_v4(self.content().as_bytes()).map_err(|_| EnrError::SigningError)?;
self.signature = sig.into();
Ok(())
}
/// Verify the signature of the record.
#[must_use]
pub fn verify<K: EnrKey>(&self, pubkey: &K::PublicKey) -> bool {
let mut sig = self.signature.clone();
sig.truncate(64);
pubkey.verify_v4(self.content().as_bytes(), &sig)
}
}
impl FromStr for TreeRootEntry {
type Err = ParseDnsEntryError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
if let Some(s) = s.strip_prefix(ROOT_V1_PREFIX) {
Self::parse_value(s)
} else {
Err(UnknownEntry(s.to_string()))
}
}
}
impl fmt::Debug for TreeRootEntry {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("TreeRootEntry")
.field("enr_root", &self.enr_root)
.field("link_root", &self.link_root)
.field("sequence_number", &self.sequence_number)
.field("signature", &hex::encode(self.signature.as_ref()))
.finish()
}
}
impl fmt::Display for TreeRootEntry {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{} sig={}", self.content(), BASE64URL_NOPAD.encode(self.signature.as_ref()))
}
}
/// Represents a branch entry in the DNS tree, containing base32 hashes of subtree entries.
#[derive(Debug, Clone)]
pub struct BranchEntry {
/// The list of base32-encoded hashes of subtree entries in the branch.
pub children: Vec<String>,
}
// === impl BranchEntry ===
impl BranchEntry {
/// Parses the entry from text.
///
/// Caution: This assumes the prefix is already removed.
fn parse_value(input: &str) -> ParseEntryResult<Self> {
#[inline]
fn ensure_valid_hash(hash: &str) -> ParseEntryResult<String> {
/// Returns the maximum length in bytes of the no-padding decoded data corresponding to
/// `n` bytes of base32-encoded data.
/// See also <https://cs.opensource.google/go/go/+/refs/tags/go1.19.5:src/encoding/base32/base32.go;l=526-531;drc=8a5845e4e34c046758af3729acf9221b8b6c01ae>
#[inline(always)]
const fn base32_no_padding_decoded_len(n: usize) -> usize {
n * 5 / 8
}
let decoded_len = base32_no_padding_decoded_len(hash.len());
if !(12..=32).contains(&decoded_len) || hash.chars().any(|c| c.is_whitespace()) {
return Err(ParseDnsEntryError::InvalidChildHash(hash.to_string()))
}
Ok(hash.to_string())
}
let children =
input.trim().split(',').map(ensure_valid_hash).collect::<ParseEntryResult<Vec<_>>>()?;
Ok(Self { children })
}
}
impl FromStr for BranchEntry {
type Err = ParseDnsEntryError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
s.strip_prefix(BRANCH_PREFIX)
.map_or_else(|| Err(UnknownEntry(s.to_string())), Self::parse_value)
}
}
impl fmt::Display for BranchEntry {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}{}", BRANCH_PREFIX, self.children.join(","))
}
}
/// Represents a link entry in the DNS tree, facilitating federation and web-of-trust functionality.
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde", derive(SerializeDisplay, DeserializeFromStr))]
pub struct LinkEntry<K: EnrKeyUnambiguous = SecretKey> {
/// The domain associated with the link entry.
pub domain: String,
/// The public key corresponding to the Ethereum Node Record (ENR) used for the link entry.
pub pubkey: K::PublicKey,
}
// === impl LinkEntry ===
impl<K: EnrKeyUnambiguous> LinkEntry<K> {
/// Parses the entry from text.
///
/// Caution: This assumes the prefix is already removed.
fn parse_value(input: &str) -> ParseEntryResult<Self> {
let (pubkey, domain) = input.split_once('@').ok_or_else(|| {
ParseDnsEntryError::Other(format!("Missing @ delimiter in Link entry: {input}"))
})?;
let pubkey = K::decode_public(&BASE32_NOPAD.decode(pubkey.as_bytes()).map_err(|err| {
ParseDnsEntryError::Base32DecodeError(format!("pubkey error: {err}"))
})?)
.map_err(|err| ParseDnsEntryError::RlpDecodeError(err.to_string()))?;
Ok(Self { domain: domain.to_string(), pubkey })
}
}
impl<K> PartialEq for LinkEntry<K>
where
K: EnrKeyUnambiguous,
K::PublicKey: PartialEq,
{
fn eq(&self, other: &Self) -> bool {
self.domain == other.domain && self.pubkey == other.pubkey
}
}
impl<K> Eq for LinkEntry<K>
where
K: EnrKeyUnambiguous,
K::PublicKey: Eq + PartialEq,
{
}
impl<K> Hash for LinkEntry<K>
where
K: EnrKeyUnambiguous,
K::PublicKey: Hash,
{
fn hash<H: Hasher>(&self, state: &mut H) {
self.domain.hash(state);
self.pubkey.hash(state);
}
}
impl<K: EnrKeyUnambiguous> FromStr for LinkEntry<K> {
type Err = ParseDnsEntryError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
s.strip_prefix(LINK_PREFIX)
.map_or_else(|| Err(UnknownEntry(s.to_string())), Self::parse_value)
}
}
impl<K: EnrKeyUnambiguous> fmt::Display for LinkEntry<K> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{}{}@{}",
LINK_PREFIX,
BASE32_NOPAD.encode(self.pubkey.encode().as_ref()),
self.domain
)
}
}
/// Represents the actual Ethereum Node Record (ENR) entry in the DNS tree.
#[derive(Debug, Clone)]
pub struct NodeEntry<K: EnrKeyUnambiguous> {
/// The Ethereum Node Record (ENR) associated with the node entry.
pub enr: Enr<K>,
}
// === impl NodeEntry ===
impl<K: EnrKeyUnambiguous> NodeEntry<K> {
/// Parses the entry from text.
///
/// Caution: This assumes the prefix is already removed.
fn parse_value(s: &str) -> ParseEntryResult<Self> {
Ok(Self { enr: s.parse().map_err(ParseDnsEntryError::Other)? })
}
}
impl<K: EnrKeyUnambiguous> FromStr for NodeEntry<K> {
type Err = ParseDnsEntryError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
s.strip_prefix(ENR_PREFIX)
.map_or_else(|| Err(UnknownEntry(s.to_string())), Self::parse_value)
}
}
impl<K: EnrKeyUnambiguous> fmt::Display for NodeEntry<K> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.enr.to_base64().fmt(f)
}
}
/// Parses the value of the key value pair
fn parse_value<F, V>(input: &mut &str, key: &str, err: &'static str, f: F) -> ParseEntryResult<V>
where
F: Fn(&str) -> ParseEntryResult<V>,
{
ensure_strip_key(input, key, err)?;
let val = input.split_whitespace().next().ok_or(FieldNotFound(err))?;
*input = &input[val.len()..];
f(val)
}
/// Strips the `key` from the `input`
///
/// Returns an err if the `input` does not start with the `key`
fn ensure_strip_key(input: &mut &str, key: &str, err: &'static str) -> ParseEntryResult<()> {
*input = input.trim_start().strip_prefix(key).ok_or(FieldNotFound(err))?;
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn parse_root_entry() {
let s = "enrtree-root:v1 e=QFT4PBCRX4XQCV3VUYJ6BTCEPU l=JGUFMSAGI7KZYB3P7IZW4S5Y3A seq=3 sig=3FmXuVwpa8Y7OstZTx9PIb1mt8FrW7VpDOFv4AaGCsZ2EIHmhraWhe4NxYhQDlw5MjeFXYMbJjsPeKlHzmJREQE";
let root: TreeRootEntry = s.parse().unwrap();
assert_eq!(root.to_string(), s);
match s.parse::<DnsEntry<SecretKey>>().unwrap() {
DnsEntry::Root(root) => {
assert_eq!(root.to_string(), s);
}
_ => unreachable!(),
}
}
#[test]
fn parse_branch_entry() {
let s = "enrtree-branch:CCCCCCCCCCCCCCCCCCCC,BBBBBBBBBBBBBBBBBBBB";
let entry: BranchEntry = s.parse().unwrap();
assert_eq!(entry.to_string(), s);
match s.parse::<DnsEntry<SecretKey>>().unwrap() {
DnsEntry::Branch(entry) => {
assert_eq!(entry.to_string(), s);
}
_ => unreachable!(),
}
}
#[test]
fn parse_branch_entry_base32() {
let s = "enrtree-branch:YNEGZIWHOM7TOOSUATAPTM";
let entry: BranchEntry = s.parse().unwrap();
assert_eq!(entry.to_string(), s);
match s.parse::<DnsEntry<SecretKey>>().unwrap() {
DnsEntry::Branch(entry) => {
assert_eq!(entry.to_string(), s);
}
_ => unreachable!(),
}
}
#[test]
fn parse_invalid_branch_entry() {
let s = "enrtree-branch:1,2";
let res = s.parse::<BranchEntry>();
assert!(res.is_err());
let s = "enrtree-branch:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA";
let res = s.parse::<BranchEntry>();
assert!(res.is_err());
let s = "enrtree-branch:,BBBBBBBBBBBBBBBBBBBB";
let res = s.parse::<BranchEntry>();
assert!(res.is_err());
let s = "enrtree-branch:CCCCCCCCCCCCCCCCCCCC\n,BBBBBBBBBBBBBBBBBBBB";
let res = s.parse::<BranchEntry>();
assert!(res.is_err());
}
#[test]
fn parse_link_entry() {
let s = "enrtree://AM5FCQLWIZX2QFPNJAP7VUERCCRNGRHWZG3YYHIUV7BVDQ5FDPRT2@nodes.example.org";
let entry: LinkEntry<SecretKey> = s.parse().unwrap();
assert_eq!(entry.to_string(), s);
match s.parse::<DnsEntry<SecretKey>>().unwrap() {
DnsEntry::Link(entry) => {
assert_eq!(entry.to_string(), s);
}
_ => unreachable!(),
}
}
#[test]
fn parse_enr_entry() {
let s = "enr:-HW4QES8QIeXTYlDzbfr1WEzE-XKY4f8gJFJzjJL-9D7TC9lJb4Z3JPRRz1lP4pL_N_QpT6rGQjAU9Apnc-C1iMP36OAgmlkgnY0iXNlY3AyNTZrMaED5IdwfMxdmR8W37HqSFdQLjDkIwBd4Q_MjxgZifgKSdM";
let entry: NodeEntry<SecretKey> = s.parse().unwrap();
assert_eq!(entry.to_string(), s);
match s.parse::<DnsEntry<SecretKey>>().unwrap() {
DnsEntry::Node(entry) => {
assert_eq!(entry.to_string(), s);
}
_ => unreachable!(),
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/dns/src/error.rs | crates/net/dns/src/error.rs | use crate::tree::TreeRootEntry;
/// Alias for a parse result
pub(crate) type ParseEntryResult<T> = Result<T, ParseDnsEntryError>;
/// Alias for lookup results
pub(crate) type LookupResult<T> = Result<T, LookupError>;
/// Error while parsing a [`DnsEntry`](crate::tree::DnsEntry)
#[derive(thiserror::Error, Debug)]
pub enum ParseDnsEntryError {
/// Unknown entry error.
#[error("unknown entry: {0}")]
/// Indicates an unknown entry encountered during parsing.
UnknownEntry(String),
/// Field not found error.
#[error("field {0} not found")]
/// Indicates a field was not found during parsing.
FieldNotFound(&'static str),
/// Base64 decoding error.
#[error("base64 decoding failed: {0}")]
/// Indicates a failure during Base64 decoding.
Base64DecodeError(String),
/// Base32 decoding error.
#[error("base32 decoding failed: {0}")]
/// Indicates a failure during Base32 decoding.
Base32DecodeError(String),
/// RLP decoding error.
#[error("{0}")]
/// Indicates an error during RLP decoding.
RlpDecodeError(String),
/// Invalid child hash error in a branch.
#[error("invalid child hash in branch: {0}")]
/// Indicates an invalid child hash within a branch.
InvalidChildHash(String),
/// Other error.
#[error("{0}")]
/// Indicates other unspecified errors.
Other(String),
}
/// Errors that can happen during lookups
#[derive(thiserror::Error, Debug)]
pub(crate) enum LookupError {
/// Parse error.
#[error(transparent)]
/// Represents errors during parsing.
Parse(#[from] ParseDnsEntryError),
/// Invalid root error.
#[error("failed to verify root {0}")]
/// Indicates failure while verifying the root entry.
InvalidRoot(TreeRootEntry),
/// Request timed out error.
#[error("request timed out")]
/// Indicates a timeout occurred during the request.
RequestTimedOut,
/// Entry not found error.
#[error("entry not found")]
/// Indicates the requested entry was not found.
EntryNotFound,
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/dns/src/resolver.rs | crates/net/dns/src/resolver.rs | //! Perform DNS lookups
use hickory_resolver::name_server::ConnectionProvider;
pub use hickory_resolver::{ResolveError, TokioResolver};
use parking_lot::RwLock;
use std::{collections::HashMap, future::Future};
use tracing::trace;
/// A type that can lookup DNS entries
pub trait Resolver: Send + Sync + Unpin + 'static {
/// Performs a textual lookup and returns the first text
fn lookup_txt(&self, query: &str) -> impl Future<Output = Option<String>> + Send;
}
impl<P: ConnectionProvider> Resolver for hickory_resolver::Resolver<P> {
async fn lookup_txt(&self, query: &str) -> Option<String> {
// See: [AsyncResolver::txt_lookup]
// > *hint* queries that end with a '.' are fully qualified names and are cheaper lookups
let fqn = if query.ends_with('.') { query.to_string() } else { format!("{query}.") };
match self.txt_lookup(fqn).await {
Err(err) => {
trace!(target: "disc::dns", %err, ?query, "dns lookup failed");
None
}
Ok(lookup) => {
let txt = lookup.into_iter().next()?;
let entry = txt.iter().next()?;
String::from_utf8(entry.to_vec()).ok()
}
}
}
}
/// An asynchronous DNS resolver
///
/// See also [`TokioResolver`]
///
/// ```
/// # fn t() {
/// use reth_dns_discovery::resolver::DnsResolver;
/// let resolver = DnsResolver::from_system_conf().unwrap();
/// # }
/// ```
///
/// Note: This [Resolver] can send multiple lookup attempts, See also
/// [`ResolverOpts`](hickory_resolver::config::ResolverOpts) which configures 2 attempts (1 retry)
/// by default.
#[derive(Clone, Debug)]
pub struct DnsResolver(TokioResolver);
// === impl DnsResolver ===
impl DnsResolver {
/// Create a new resolver by wrapping the given [`TokioResolver`].
pub const fn new(resolver: TokioResolver) -> Self {
Self(resolver)
}
/// Constructs a new Tokio based Resolver with the system configuration.
///
/// This will use `/etc/resolv.conf` on Unix OSes and the registry on Windows.
pub fn from_system_conf() -> Result<Self, ResolveError> {
TokioResolver::builder_tokio().map(|builder| Self::new(builder.build()))
}
}
impl Resolver for DnsResolver {
async fn lookup_txt(&self, query: &str) -> Option<String> {
Resolver::lookup_txt(&self.0, query).await
}
}
/// A [Resolver] that uses an in memory map to lookup entries
#[derive(Debug, Default)]
pub struct MapResolver(RwLock<HashMap<String, String>>);
// === impl MapResolver ===
impl MapResolver {
/// Inserts a key-value pair into the map.
pub fn insert(&self, k: String, v: String) -> Option<String> {
self.0.write().insert(k, v)
}
/// Returns the value corresponding to the key
pub fn get(&self, k: &str) -> Option<String> {
self.0.read().get(k).cloned()
}
/// Removes a key from the map, returning the value at the key if the key was previously in the
/// map.
pub fn remove(&self, k: &str) -> Option<String> {
self.0.write().remove(k)
}
}
impl Resolver for MapResolver {
async fn lookup_txt(&self, query: &str) -> Option<String> {
self.get(query)
}
}
/// A Resolver that always times out.
#[cfg(test)]
pub(crate) struct TimeoutResolver(pub(crate) std::time::Duration);
#[cfg(test)]
impl Resolver for TimeoutResolver {
async fn lookup_txt(&self, _query: &str) -> Option<String> {
tokio::time::sleep(self.0).await;
None
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/dns/src/query.rs | crates/net/dns/src/query.rs | //! Handles query execution
use crate::{
error::{LookupError, LookupResult},
resolver::Resolver,
sync::ResolveKind,
tree::{DnsEntry, LinkEntry, TreeRootEntry},
};
use enr::EnrKeyUnambiguous;
use reth_tokio_util::ratelimit::{Rate, RateLimit};
use std::{
collections::VecDeque,
future::Future,
num::NonZeroUsize,
pin::Pin,
sync::Arc,
task::{ready, Context, Poll},
time::Duration,
};
/// The `QueryPool` provides an aggregate state machine for driving queries to completion.
pub(crate) struct QueryPool<R: Resolver, K: EnrKeyUnambiguous> {
/// The [Resolver] that's used to lookup queries.
resolver: Arc<R>,
/// Buffered queries
queued_queries: VecDeque<Query<K>>,
/// All active queries
active_queries: Vec<Query<K>>,
/// buffered results
queued_outcomes: VecDeque<QueryOutcome<K>>,
/// Rate limit for DNS requests
rate_limit: RateLimit,
/// Timeout for DNS lookups.
lookup_timeout: Duration,
}
// === impl QueryPool ===
impl<R: Resolver, K: EnrKeyUnambiguous> QueryPool<R, K> {
pub(crate) fn new(
resolver: Arc<R>,
max_requests_per_sec: NonZeroUsize,
lookup_timeout: Duration,
) -> Self {
Self {
resolver,
queued_queries: Default::default(),
active_queries: vec![],
queued_outcomes: Default::default(),
rate_limit: RateLimit::new(Rate::new(
max_requests_per_sec.get() as u64,
Duration::from_secs(1),
)),
lookup_timeout,
}
}
/// Resolves the root the link's domain references
pub(crate) fn resolve_root(&mut self, link: LinkEntry<K>) {
let resolver = Arc::clone(&self.resolver);
let timeout = self.lookup_timeout;
self.queued_queries.push_back(Query::Root(Box::pin(resolve_root(resolver, link, timeout))))
}
/// Resolves the [`DnsEntry`] for `<hash.domain>`
pub(crate) fn resolve_entry(&mut self, link: LinkEntry<K>, hash: String, kind: ResolveKind) {
let resolver = Arc::clone(&self.resolver);
let timeout = self.lookup_timeout;
self.queued_queries
.push_back(Query::Entry(Box::pin(resolve_entry(resolver, link, hash, kind, timeout))))
}
/// Advances the state of the queries
pub(crate) fn poll(&mut self, cx: &mut Context<'_>) -> Poll<QueryOutcome<K>> {
loop {
// drain buffered events first
if let Some(event) = self.queued_outcomes.pop_front() {
return Poll::Ready(event)
}
// queue in new queries if we have capacity
'queries: while self.active_queries.len() < self.rate_limit.limit() as usize {
if self.rate_limit.poll_ready(cx).is_ready() {
if let Some(query) = self.queued_queries.pop_front() {
self.rate_limit.tick();
self.active_queries.push(query);
continue 'queries
}
}
break
}
// advance all queries
for idx in (0..self.active_queries.len()).rev() {
let mut query = self.active_queries.swap_remove(idx);
if let Poll::Ready(outcome) = query.poll(cx) {
self.queued_outcomes.push_back(outcome);
} else {
// still pending
self.active_queries.push(query);
}
}
if self.queued_outcomes.is_empty() {
return Poll::Pending
}
}
}
}
// === Various future/type alias ===
pub(crate) struct ResolveEntryResult<K: EnrKeyUnambiguous> {
pub(crate) entry: Option<LookupResult<DnsEntry<K>>>,
pub(crate) link: LinkEntry<K>,
pub(crate) hash: String,
pub(crate) kind: ResolveKind,
}
pub(crate) type ResolveRootResult<K> =
Result<(TreeRootEntry, LinkEntry<K>), (LookupError, LinkEntry<K>)>;
type ResolveRootFuture<K> = Pin<Box<dyn Future<Output = ResolveRootResult<K>> + Send>>;
type ResolveEntryFuture<K> = Pin<Box<dyn Future<Output = ResolveEntryResult<K>> + Send>>;
enum Query<K: EnrKeyUnambiguous> {
Root(ResolveRootFuture<K>),
Entry(ResolveEntryFuture<K>),
}
// === impl Query ===
impl<K: EnrKeyUnambiguous> Query<K> {
/// Advances the query
fn poll(&mut self, cx: &mut Context<'_>) -> Poll<QueryOutcome<K>> {
match self {
Self::Root(query) => {
let outcome = ready!(query.as_mut().poll(cx));
Poll::Ready(QueryOutcome::Root(outcome))
}
Self::Entry(query) => {
let outcome = ready!(query.as_mut().poll(cx));
Poll::Ready(QueryOutcome::Entry(outcome))
}
}
}
}
/// The output the queries return
pub(crate) enum QueryOutcome<K: EnrKeyUnambiguous> {
Root(ResolveRootResult<K>),
Entry(ResolveEntryResult<K>),
}
/// Retrieves the [`DnsEntry`]
async fn resolve_entry<K: EnrKeyUnambiguous, R: Resolver>(
resolver: Arc<R>,
link: LinkEntry<K>,
hash: String,
kind: ResolveKind,
timeout: Duration,
) -> ResolveEntryResult<K> {
let fqn = format!("{hash}.{}", link.domain);
let mut resp = ResolveEntryResult { entry: None, link, hash, kind };
match lookup_with_timeout::<R>(&resolver, &fqn, timeout).await {
Ok(Some(entry)) => {
resp.entry = Some(entry.parse::<DnsEntry<K>>().map_err(|err| err.into()))
}
Err(err) => resp.entry = Some(Err(err)),
Ok(None) => {}
}
resp
}
/// Retrieves the root entry the link points to and returns the verified entry
///
/// Returns an error if the record could be retrieved but is not a root entry or failed to be
/// verified.
async fn resolve_root<K: EnrKeyUnambiguous, R: Resolver>(
resolver: Arc<R>,
link: LinkEntry<K>,
timeout: Duration,
) -> ResolveRootResult<K> {
let root = match lookup_with_timeout::<R>(&resolver, &link.domain, timeout).await {
Ok(Some(root)) => root,
Ok(_) => return Err((LookupError::EntryNotFound, link)),
Err(err) => return Err((err, link)),
};
match root.parse::<TreeRootEntry>() {
Ok(root) => {
if root.verify::<K>(&link.pubkey) {
Ok((root, link))
} else {
Err((LookupError::InvalidRoot(root), link))
}
}
Err(err) => Err((err.into(), link)),
}
}
async fn lookup_with_timeout<R: Resolver>(
r: &R,
query: &str,
timeout: Duration,
) -> LookupResult<Option<String>> {
tokio::time::timeout(timeout, r.lookup_txt(query))
.await
.map_err(|_| LookupError::RequestTimedOut)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{resolver::TimeoutResolver, DnsDiscoveryConfig, MapResolver};
use std::future::poll_fn;
#[tokio::test]
async fn test_rate_limit() {
let resolver = Arc::new(MapResolver::default());
let config = DnsDiscoveryConfig::default();
let mut pool = QueryPool::new(resolver, config.max_requests_per_sec, config.lookup_timeout);
let s = "enrtree://AM5FCQLWIZX2QFPNJAP7VUERCCRNGRHWZG3YYHIUV7BVDQ5FDPRT2@nodes.example.org";
let entry: LinkEntry = s.parse().unwrap();
for _n in 0..config.max_requests_per_sec.get() {
poll_fn(|cx| {
pool.resolve_root(entry.clone());
assert_eq!(pool.queued_queries.len(), 1);
assert!(pool.rate_limit.poll_ready(cx).is_ready());
let _ = pool.poll(cx);
assert_eq!(pool.queued_queries.len(), 0);
Poll::Ready(())
})
.await;
}
pool.resolve_root(entry.clone());
assert_eq!(pool.queued_queries.len(), 1);
poll_fn(|cx| {
assert!(pool.rate_limit.poll_ready(cx).is_pending());
let _ = pool.poll(cx);
assert_eq!(pool.queued_queries.len(), 1);
Poll::Ready(())
})
.await;
}
#[tokio::test]
async fn test_timeouts() {
let config =
DnsDiscoveryConfig { lookup_timeout: Duration::from_millis(500), ..Default::default() };
let resolver = Arc::new(TimeoutResolver(config.lookup_timeout * 2));
let mut pool = QueryPool::new(resolver, config.max_requests_per_sec, config.lookup_timeout);
let s = "enrtree://AM5FCQLWIZX2QFPNJAP7VUERCCRNGRHWZG3YYHIUV7BVDQ5FDPRT2@nodes.example.org";
let entry: LinkEntry = s.parse().unwrap();
pool.resolve_root(entry);
let outcome = poll_fn(|cx| pool.poll(cx)).await;
match outcome {
QueryOutcome::Root(res) => {
let res = res.unwrap_err().0;
match res {
LookupError::RequestTimedOut => {}
_ => unreachable!(),
}
}
QueryOutcome::Entry(_) => {
unreachable!()
}
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network/src/config.rs | crates/net/network/src/config.rs | //! Network config support
use crate::{
error::NetworkError,
import::{BlockImport, ProofOfStakeBlockImport},
transactions::TransactionsManagerConfig,
NetworkHandle, NetworkManager,
};
use reth_chainspec::{ChainSpecProvider, EthChainSpec, Hardforks};
use reth_discv4::{Discv4Config, Discv4ConfigBuilder, NatResolver, DEFAULT_DISCOVERY_ADDRESS};
use reth_discv5::NetworkStackId;
use reth_dns_discovery::DnsDiscoveryConfig;
use reth_eth_wire::{
handshake::{EthHandshake, EthRlpxHandshake},
EthNetworkPrimitives, HelloMessage, HelloMessageWithProtocols, NetworkPrimitives,
UnifiedStatus,
};
use reth_ethereum_forks::{ForkFilter, Head};
use reth_network_peers::{mainnet_nodes, pk2id, sepolia_nodes, PeerId, TrustedPeer};
use reth_network_types::{PeersConfig, SessionsConfig};
use reth_storage_api::{noop::NoopProvider, BlockNumReader, BlockReader, HeaderProvider};
use reth_tasks::{TaskSpawner, TokioTaskExecutor};
use secp256k1::SECP256K1;
use std::{collections::HashSet, net::SocketAddr, sync::Arc};
// re-export for convenience
use crate::{
protocol::{IntoRlpxSubProtocol, RlpxSubProtocols},
transactions::TransactionPropagationMode,
};
pub use secp256k1::SecretKey;
/// Convenience function to create a new random [`SecretKey`]
pub fn rng_secret_key() -> SecretKey {
SecretKey::new(&mut rand_08::thread_rng())
}
/// All network related initialization settings.
#[derive(Debug)]
pub struct NetworkConfig<C, N: NetworkPrimitives = EthNetworkPrimitives> {
/// The client type that can interact with the chain.
///
/// This type is used to fetch the block number after we established a session and received the
/// [`UnifiedStatus`] block hash.
pub client: C,
/// The node's secret key, from which the node's identity is derived.
pub secret_key: SecretKey,
/// All boot nodes to start network discovery with.
pub boot_nodes: HashSet<TrustedPeer>,
/// How to set up discovery over DNS.
pub dns_discovery_config: Option<DnsDiscoveryConfig>,
/// Address to use for discovery v4.
pub discovery_v4_addr: SocketAddr,
/// How to set up discovery.
pub discovery_v4_config: Option<Discv4Config>,
/// How to set up discovery version 5.
pub discovery_v5_config: Option<reth_discv5::Config>,
/// Address to listen for incoming connections
pub listener_addr: SocketAddr,
/// How to instantiate peer manager.
pub peers_config: PeersConfig,
/// How to configure the [`SessionManager`](crate::session::SessionManager).
pub sessions_config: SessionsConfig,
/// The chain id
pub chain_id: u64,
/// The [`ForkFilter`] to use at launch for authenticating sessions.
///
/// See also <https://github.com/ethereum/EIPs/blob/master/EIPS/eip-2124.md#stale-software-examples>
///
/// For sync from block `0`, this should be the default chain [`ForkFilter`] beginning at the
/// first hardfork, `Frontier` for mainnet.
pub fork_filter: ForkFilter,
/// The block importer type.
pub block_import: Box<dyn BlockImport<N::NewBlockPayload>>,
/// The default mode of the network.
pub network_mode: NetworkMode,
/// The executor to use for spawning tasks.
pub executor: Box<dyn TaskSpawner>,
/// The `Status` message to send to peers at the beginning.
pub status: UnifiedStatus,
/// Sets the hello message for the p2p handshake in `RLPx`
pub hello_message: HelloMessageWithProtocols,
/// Additional protocols to announce and handle in `RLPx`
pub extra_protocols: RlpxSubProtocols,
/// Whether to disable transaction gossip
pub tx_gossip_disabled: bool,
/// How to instantiate transactions manager.
pub transactions_manager_config: TransactionsManagerConfig,
/// The NAT resolver for external IP
pub nat: Option<NatResolver>,
/// The Ethereum P2P handshake, see also:
/// <https://github.com/ethereum/devp2p/blob/master/rlpx.md#initial-handshake>.
/// This can be overridden to support custom handshake logic via the
/// [`NetworkConfigBuilder`].
pub handshake: Arc<dyn EthRlpxHandshake>,
}
// === impl NetworkConfig ===
impl<N: NetworkPrimitives> NetworkConfig<(), N> {
/// Convenience method for creating the corresponding builder type
pub fn builder(secret_key: SecretKey) -> NetworkConfigBuilder<N> {
NetworkConfigBuilder::new(secret_key)
}
/// Convenience method for creating the corresponding builder type with a random secret key.
pub fn builder_with_rng_secret_key() -> NetworkConfigBuilder<N> {
NetworkConfigBuilder::with_rng_secret_key()
}
}
impl<C, N: NetworkPrimitives> NetworkConfig<C, N> {
/// Create a new instance with all mandatory fields set, rest is field with defaults.
pub fn new(client: C, secret_key: SecretKey) -> Self
where
C: ChainSpecProvider<ChainSpec: Hardforks>,
{
NetworkConfig::builder(secret_key).build(client)
}
/// Apply a function to the config.
pub fn apply<F>(self, f: F) -> Self
where
F: FnOnce(Self) -> Self,
{
f(self)
}
/// Sets the config to use for the discovery v4 protocol.
pub fn set_discovery_v4(mut self, discovery_config: Discv4Config) -> Self {
self.discovery_v4_config = Some(discovery_config);
self
}
/// Sets the address for the incoming `RLPx` connection listener.
pub const fn set_listener_addr(mut self, listener_addr: SocketAddr) -> Self {
self.listener_addr = listener_addr;
self
}
/// Returns the address for the incoming `RLPx` connection listener.
pub const fn listener_addr(&self) -> &SocketAddr {
&self.listener_addr
}
}
impl<C, N> NetworkConfig<C, N>
where
C: BlockNumReader + 'static,
N: NetworkPrimitives,
{
/// Convenience method for calling [`NetworkManager::new`].
pub async fn manager(self) -> Result<NetworkManager<N>, NetworkError> {
NetworkManager::new(self).await
}
}
impl<C, N> NetworkConfig<C, N>
where
N: NetworkPrimitives,
C: BlockReader<Block = N::Block, Receipt = N::Receipt, Header = N::BlockHeader>
+ HeaderProvider
+ Clone
+ Unpin
+ 'static,
{
/// Starts the networking stack given a [`NetworkConfig`] and returns a handle to the network.
pub async fn start_network(self) -> Result<NetworkHandle<N>, NetworkError> {
let client = self.client.clone();
let (handle, network, _txpool, eth) = NetworkManager::builder::<C>(self)
.await?
.request_handler::<C>(client)
.split_with_handle();
tokio::task::spawn(network);
tokio::task::spawn(eth);
Ok(handle)
}
}
/// Builder for [`NetworkConfig`](struct.NetworkConfig.html).
#[derive(Debug)]
pub struct NetworkConfigBuilder<N: NetworkPrimitives = EthNetworkPrimitives> {
/// The node's secret key, from which the node's identity is derived.
secret_key: SecretKey,
/// How to configure discovery over DNS.
dns_discovery_config: Option<DnsDiscoveryConfig>,
/// How to set up discovery version 4.
discovery_v4_builder: Option<Discv4ConfigBuilder>,
/// How to set up discovery version 5.
discovery_v5_builder: Option<reth_discv5::ConfigBuilder>,
/// All boot nodes to start network discovery with.
boot_nodes: HashSet<TrustedPeer>,
/// Address to use for discovery
discovery_addr: Option<SocketAddr>,
/// Listener for incoming connections
listener_addr: Option<SocketAddr>,
/// How to instantiate peer manager.
peers_config: Option<PeersConfig>,
/// How to configure the sessions manager
sessions_config: Option<SessionsConfig>,
/// The default mode of the network.
network_mode: NetworkMode,
/// The executor to use for spawning tasks.
executor: Option<Box<dyn TaskSpawner>>,
/// Sets the hello message for the p2p handshake in `RLPx`
hello_message: Option<HelloMessageWithProtocols>,
/// The executor to use for spawning tasks.
extra_protocols: RlpxSubProtocols,
/// Head used to start set for the fork filter and status.
head: Option<Head>,
/// Whether tx gossip is disabled
tx_gossip_disabled: bool,
/// The block importer type
block_import: Option<Box<dyn BlockImport<N::NewBlockPayload>>>,
/// How to instantiate transactions manager.
transactions_manager_config: TransactionsManagerConfig,
/// The NAT resolver for external IP
nat: Option<NatResolver>,
/// The Ethereum P2P handshake, see also:
/// <https://github.com/ethereum/devp2p/blob/master/rlpx.md#initial-handshake>.
handshake: Arc<dyn EthRlpxHandshake>,
}
impl NetworkConfigBuilder<EthNetworkPrimitives> {
/// Creates the `NetworkConfigBuilder` with [`EthNetworkPrimitives`] types.
pub fn eth(secret_key: SecretKey) -> Self {
Self::new(secret_key)
}
}
// === impl NetworkConfigBuilder ===
#[expect(missing_docs)]
impl<N: NetworkPrimitives> NetworkConfigBuilder<N> {
/// Create a new builder instance with a random secret key.
pub fn with_rng_secret_key() -> Self {
Self::new(rng_secret_key())
}
/// Create a new builder instance with the given secret key.
pub fn new(secret_key: SecretKey) -> Self {
Self {
secret_key,
dns_discovery_config: Some(Default::default()),
discovery_v4_builder: Some(Default::default()),
discovery_v5_builder: None,
boot_nodes: Default::default(),
discovery_addr: None,
listener_addr: None,
peers_config: None,
sessions_config: None,
network_mode: Default::default(),
executor: None,
hello_message: None,
extra_protocols: Default::default(),
head: None,
tx_gossip_disabled: false,
block_import: None,
transactions_manager_config: Default::default(),
nat: None,
handshake: Arc::new(EthHandshake::default()),
}
}
/// Apply a function to the builder.
pub fn apply<F>(self, f: F) -> Self
where
F: FnOnce(Self) -> Self,
{
f(self)
}
/// Returns the configured [`PeerId`]
pub fn get_peer_id(&self) -> PeerId {
pk2id(&self.secret_key.public_key(SECP256K1))
}
/// Returns the configured [`SecretKey`], from which the node's identity is derived.
pub const fn secret_key(&self) -> &SecretKey {
&self.secret_key
}
/// Sets the [`NetworkMode`].
pub const fn network_mode(mut self, network_mode: NetworkMode) -> Self {
self.network_mode = network_mode;
self
}
/// Configures the network to use proof-of-work.
///
/// This effectively allows block propagation in the `eth` sub-protocol, which has been
/// soft-deprecated with ethereum `PoS` after the merge. Even if block propagation is
/// technically allowed, according to the eth protocol, it is not expected to be used in `PoS`
/// networks and peers are supposed to terminate the connection if they receive a `NewBlock`
/// message.
pub const fn with_pow(self) -> Self {
self.network_mode(NetworkMode::Work)
}
/// Sets the highest synced block.
///
/// This is used to construct the appropriate [`ForkFilter`] and [`UnifiedStatus`] message.
///
/// If not set, this defaults to the genesis specified by the current chain specification.
pub const fn set_head(mut self, head: Head) -> Self {
self.head = Some(head);
self
}
/// Sets the `HelloMessage` to send when connecting to peers.
///
/// ```
/// # use reth_eth_wire::HelloMessage;
/// # use reth_network::NetworkConfigBuilder;
/// # fn builder(builder: NetworkConfigBuilder) {
/// let peer_id = builder.get_peer_id();
/// builder.hello_message(HelloMessage::builder(peer_id).build());
/// # }
/// ```
pub fn hello_message(mut self, hello_message: HelloMessageWithProtocols) -> Self {
self.hello_message = Some(hello_message);
self
}
/// Set a custom peer config for how peers are handled
pub fn peer_config(mut self, config: PeersConfig) -> Self {
self.peers_config = Some(config);
self
}
/// Sets the executor to use for spawning tasks.
///
/// If `None`, then [`tokio::spawn`] is used for spawning tasks.
pub fn with_task_executor(mut self, executor: Box<dyn TaskSpawner>) -> Self {
self.executor = Some(executor);
self
}
/// Sets a custom config for how sessions are handled.
pub const fn sessions_config(mut self, config: SessionsConfig) -> Self {
self.sessions_config = Some(config);
self
}
/// Configures the transactions manager with the given config.
pub const fn transactions_manager_config(mut self, config: TransactionsManagerConfig) -> Self {
self.transactions_manager_config = config;
self
}
/// Configures the propagation mode for the transaction manager.
pub const fn transaction_propagation_mode(mut self, mode: TransactionPropagationMode) -> Self {
self.transactions_manager_config.propagation_mode = mode;
self
}
/// Sets the discovery and listener address
///
/// This is a convenience function for both [`NetworkConfigBuilder::listener_addr`] and
/// [`NetworkConfigBuilder::discovery_addr`].
///
/// By default, both are on the same port:
/// [`DEFAULT_DISCOVERY_PORT`](reth_discv4::DEFAULT_DISCOVERY_PORT)
pub const fn set_addrs(self, addr: SocketAddr) -> Self {
self.listener_addr(addr).discovery_addr(addr)
}
/// Sets the socket address the network will listen on.
///
/// By default, this is [`DEFAULT_DISCOVERY_ADDRESS`]
pub const fn listener_addr(mut self, listener_addr: SocketAddr) -> Self {
self.listener_addr = Some(listener_addr);
self
}
/// Sets the port of the address the network will listen on.
///
/// By default, this is [`DEFAULT_DISCOVERY_PORT`](reth_discv4::DEFAULT_DISCOVERY_PORT)
pub fn listener_port(mut self, port: u16) -> Self {
self.listener_addr.get_or_insert(DEFAULT_DISCOVERY_ADDRESS).set_port(port);
self
}
/// Sets the socket address the discovery network will listen on
pub const fn discovery_addr(mut self, discovery_addr: SocketAddr) -> Self {
self.discovery_addr = Some(discovery_addr);
self
}
/// Sets the port of the address the discovery network will listen on.
///
/// By default, this is [`DEFAULT_DISCOVERY_PORT`](reth_discv4::DEFAULT_DISCOVERY_PORT)
pub fn discovery_port(mut self, port: u16) -> Self {
self.discovery_addr.get_or_insert(DEFAULT_DISCOVERY_ADDRESS).set_port(port);
self
}
/// Launches the network with an unused network and discovery port
/// This is useful for testing.
pub fn with_unused_ports(self) -> Self {
self.with_unused_discovery_port().with_unused_listener_port()
}
/// Sets the discovery port to an unused port.
/// This is useful for testing.
pub fn with_unused_discovery_port(self) -> Self {
self.discovery_port(0)
}
/// Sets the listener port to an unused port.
/// This is useful for testing.
pub fn with_unused_listener_port(self) -> Self {
self.listener_port(0)
}
/// Sets the external ip resolver to use for discovery v4.
///
/// If no [`Discv4ConfigBuilder`] is set via [`Self::discovery`], this will create a new one.
///
/// This is a convenience function for setting the external ip resolver on the default
/// [`Discv4Config`] config.
pub fn external_ip_resolver(mut self, resolver: NatResolver) -> Self {
self.discovery_v4_builder
.get_or_insert_with(Discv4Config::builder)
.external_ip_resolver(Some(resolver));
self.nat = Some(resolver);
self
}
/// Sets the discv4 config to use.
pub fn discovery(mut self, builder: Discv4ConfigBuilder) -> Self {
self.discovery_v4_builder = Some(builder);
self
}
/// Sets the discv5 config to use.
pub fn discovery_v5(mut self, builder: reth_discv5::ConfigBuilder) -> Self {
self.discovery_v5_builder = Some(builder);
self
}
/// Sets the dns discovery config to use.
pub fn dns_discovery(mut self, config: DnsDiscoveryConfig) -> Self {
self.dns_discovery_config = Some(config);
self
}
/// Convenience function for setting [`Self::boot_nodes`] to the mainnet boot nodes.
pub fn mainnet_boot_nodes(self) -> Self {
self.boot_nodes(mainnet_nodes())
}
/// Convenience function for setting [`Self::boot_nodes`] to the sepolia boot nodes.
pub fn sepolia_boot_nodes(self) -> Self {
self.boot_nodes(sepolia_nodes())
}
/// Sets the boot nodes to use to bootstrap the configured discovery services (discv4 + discv5).
pub fn boot_nodes<T: Into<TrustedPeer>>(mut self, nodes: impl IntoIterator<Item = T>) -> Self {
self.boot_nodes = nodes.into_iter().map(Into::into).collect();
self
}
/// Returns an iterator over all configured boot nodes.
pub fn boot_nodes_iter(&self) -> impl Iterator<Item = &TrustedPeer> + '_ {
self.boot_nodes.iter()
}
/// Disable the DNS discovery.
pub fn disable_dns_discovery(mut self) -> Self {
self.dns_discovery_config = None;
self
}
// Disable nat
pub const fn disable_nat(mut self) -> Self {
self.nat = None;
self
}
/// Disables all discovery.
pub fn disable_discovery(self) -> Self {
self.disable_discv4_discovery().disable_discv5_discovery().disable_dns_discovery()
}
/// Disables all discovery if the given condition is true.
pub fn disable_discovery_if(self, disable: bool) -> Self {
if disable {
self.disable_discovery()
} else {
self
}
}
/// Disable the Discv4 discovery.
pub fn disable_discv4_discovery(mut self) -> Self {
self.discovery_v4_builder = None;
self
}
/// Disable the Discv5 discovery.
pub fn disable_discv5_discovery(mut self) -> Self {
self.discovery_v5_builder = None;
self
}
/// Disable the DNS discovery if the given condition is true.
pub fn disable_dns_discovery_if(self, disable: bool) -> Self {
if disable {
self.disable_dns_discovery()
} else {
self
}
}
/// Disable the Discv4 discovery if the given condition is true.
pub fn disable_discv4_discovery_if(self, disable: bool) -> Self {
if disable {
self.disable_discv4_discovery()
} else {
self
}
}
/// Disable the Discv5 discovery if the given condition is true.
pub fn disable_discv5_discovery_if(self, disable: bool) -> Self {
if disable {
self.disable_discv5_discovery()
} else {
self
}
}
/// Adds a new additional protocol to the `RLPx` sub-protocol list.
pub fn add_rlpx_sub_protocol(mut self, protocol: impl IntoRlpxSubProtocol) -> Self {
self.extra_protocols.push(protocol);
self
}
/// Sets whether tx gossip is disabled.
pub const fn disable_tx_gossip(mut self, disable_tx_gossip: bool) -> Self {
self.tx_gossip_disabled = disable_tx_gossip;
self
}
/// Sets the block import type.
pub fn block_import(mut self, block_import: Box<dyn BlockImport<N::NewBlockPayload>>) -> Self {
self.block_import = Some(block_import);
self
}
/// Convenience function for creating a [`NetworkConfig`] with a noop provider that does
/// nothing.
pub fn build_with_noop_provider<ChainSpec>(
self,
chain_spec: Arc<ChainSpec>,
) -> NetworkConfig<NoopProvider<ChainSpec>, N>
where
ChainSpec: EthChainSpec + Hardforks + 'static,
{
self.build(NoopProvider::eth(chain_spec))
}
/// Sets the NAT resolver for external IP.
pub const fn add_nat(mut self, nat: Option<NatResolver>) -> Self {
self.nat = nat;
self
}
/// Overrides the default Eth `RLPx` handshake.
pub fn eth_rlpx_handshake(mut self, handshake: Arc<dyn EthRlpxHandshake>) -> Self {
self.handshake = handshake;
self
}
/// Consumes the type and creates the actual [`NetworkConfig`]
/// for the given client type that can interact with the chain.
///
/// The given client is to be used for interacting with the chain, for example fetching the
/// corresponding block for a given block hash we receive from a peer in the status message when
/// establishing a connection.
pub fn build<C>(self, client: C) -> NetworkConfig<C, N>
where
C: ChainSpecProvider<ChainSpec: Hardforks>,
{
let peer_id = self.get_peer_id();
let chain_spec = client.chain_spec();
let Self {
secret_key,
mut dns_discovery_config,
discovery_v4_builder,
mut discovery_v5_builder,
boot_nodes,
discovery_addr,
listener_addr,
peers_config,
sessions_config,
network_mode,
executor,
hello_message,
extra_protocols,
head,
tx_gossip_disabled,
block_import,
transactions_manager_config,
nat,
handshake,
} = self;
let head = head.unwrap_or_else(|| Head {
hash: chain_spec.genesis_hash(),
number: 0,
timestamp: chain_spec.genesis().timestamp,
difficulty: chain_spec.genesis().difficulty,
total_difficulty: chain_spec.genesis().difficulty,
});
discovery_v5_builder = discovery_v5_builder.map(|mut builder| {
if let Some(network_stack_id) = NetworkStackId::id(&chain_spec) {
let fork_id = chain_spec.fork_id(&head);
builder = builder.fork(network_stack_id, fork_id)
}
builder
});
let listener_addr = listener_addr.unwrap_or(DEFAULT_DISCOVERY_ADDRESS);
let mut hello_message =
hello_message.unwrap_or_else(|| HelloMessage::builder(peer_id).build());
hello_message.port = listener_addr.port();
// set the status
let status = UnifiedStatus::spec_builder(&chain_spec, &head);
// set a fork filter based on the chain spec and head
let fork_filter = chain_spec.fork_filter(head);
// get the chain id
let chain_id = chain_spec.chain().id();
// If default DNS config is used then we add the known dns network to bootstrap from
if let Some(dns_networks) =
dns_discovery_config.as_mut().and_then(|c| c.bootstrap_dns_networks.as_mut())
{
if dns_networks.is_empty() {
if let Some(link) = chain_spec.chain().public_dns_network_protocol() {
dns_networks.insert(link.parse().expect("is valid DNS link entry"));
}
}
}
NetworkConfig {
client,
secret_key,
boot_nodes,
dns_discovery_config,
discovery_v4_config: discovery_v4_builder.map(|builder| builder.build()),
discovery_v5_config: discovery_v5_builder.map(|builder| builder.build()),
discovery_v4_addr: discovery_addr.unwrap_or(DEFAULT_DISCOVERY_ADDRESS),
listener_addr,
peers_config: peers_config.unwrap_or_default(),
sessions_config: sessions_config.unwrap_or_default(),
chain_id,
block_import: block_import.unwrap_or_else(|| Box::<ProofOfStakeBlockImport>::default()),
network_mode,
executor: executor.unwrap_or_else(|| Box::<TokioTaskExecutor>::default()),
status,
hello_message,
extra_protocols,
fork_filter,
tx_gossip_disabled,
transactions_manager_config,
nat,
handshake,
}
}
}
/// Describes the mode of the network wrt. POS or POW.
///
/// This affects block propagation in the `eth` sub-protocol [EIP-3675](https://eips.ethereum.org/EIPS/eip-3675#devp2p)
///
/// In POS `NewBlockHashes` and `NewBlock` messages become invalid.
#[derive(Debug, Clone, Copy, Eq, PartialEq, Default)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum NetworkMode {
/// Network is in proof-of-work mode.
Work,
/// Network is in proof-of-stake mode
#[default]
Stake,
}
// === impl NetworkMode ===
impl NetworkMode {
/// Returns true if network has entered proof-of-stake
pub const fn is_stake(&self) -> bool {
matches!(self, Self::Stake)
}
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_eips::eip2124::ForkHash;
use alloy_genesis::Genesis;
use alloy_primitives::U256;
use reth_chainspec::{
Chain, ChainSpecBuilder, EthereumHardfork, ForkCondition, ForkId, MAINNET,
};
use reth_discv5::build_local_enr;
use reth_dns_discovery::tree::LinkEntry;
use reth_storage_api::noop::NoopProvider;
use std::{net::Ipv4Addr, sync::Arc};
fn builder() -> NetworkConfigBuilder {
let secret_key = SecretKey::new(&mut rand_08::thread_rng());
NetworkConfigBuilder::new(secret_key)
}
#[test]
fn test_network_dns_defaults() {
let config = builder().build(NoopProvider::default());
let dns = config.dns_discovery_config.unwrap();
let bootstrap_nodes = dns.bootstrap_dns_networks.unwrap();
let mainnet_dns: LinkEntry =
Chain::mainnet().public_dns_network_protocol().unwrap().parse().unwrap();
assert!(bootstrap_nodes.contains(&mainnet_dns));
assert_eq!(bootstrap_nodes.len(), 1);
}
#[test]
fn test_network_fork_filter_default() {
let mut chain_spec = Arc::clone(&MAINNET);
// remove any `next` fields we would have by removing all hardforks
Arc::make_mut(&mut chain_spec).hardforks = Default::default();
// check that the forkid is initialized with the genesis and no other forks
let genesis_fork_hash = ForkHash::from(chain_spec.genesis_hash());
// enforce that the fork_id set in the status is consistent with the generated fork filter
let config = builder().build_with_noop_provider(chain_spec);
let status = config.status;
let fork_filter = config.fork_filter;
// assert that there are no other forks
assert_eq!(status.forkid.next, 0);
// assert the same thing for the fork_filter
assert_eq!(fork_filter.current().next, 0);
// check status and fork_filter forkhash
assert_eq!(status.forkid.hash, genesis_fork_hash);
assert_eq!(fork_filter.current().hash, genesis_fork_hash);
}
#[test]
fn test_discv5_fork_id_default() {
#[cfg(feature = "timestamp-in-seconds")]
const GENESIS_TIME: u64 = 151_515;
#[cfg(not(feature = "timestamp-in-seconds"))]
const GENESIS_TIME: u64 = 151_515_000;
let genesis = Genesis::default().with_timestamp(GENESIS_TIME);
let active_fork = (EthereumHardfork::Shanghai, ForkCondition::Timestamp(GENESIS_TIME));
let future_fork = (EthereumHardfork::Cancun, ForkCondition::Timestamp(GENESIS_TIME + 1));
let chain_spec = ChainSpecBuilder::default()
.chain(Chain::dev())
.genesis(genesis.into())
.with_fork(active_fork.0, active_fork.1)
.with_fork(future_fork.0, future_fork.1)
.build();
// get the fork id to advertise on discv5
let genesis_fork_hash = ForkHash::from(chain_spec.genesis_hash());
let fork_id = ForkId { hash: genesis_fork_hash, next: GENESIS_TIME + 1 };
// check the fork id is set to active fork and _not_ yet future fork
assert_eq!(
fork_id,
chain_spec.fork_id(&Head {
hash: chain_spec.genesis_hash(),
number: 0,
timestamp: GENESIS_TIME,
difficulty: U256::ZERO,
total_difficulty: U256::ZERO,
})
);
assert_ne!(fork_id, chain_spec.latest_fork_id());
// enforce that the fork_id set in local enr
let fork_key = b"odyssey";
let config = builder()
.discovery_v5(
reth_discv5::Config::builder((Ipv4Addr::LOCALHOST, 30303).into())
.fork(fork_key, fork_id),
)
.build_with_noop_provider(Arc::new(chain_spec));
let (local_enr, _, _, _) = build_local_enr(
&config.secret_key,
&config.discovery_v5_config.expect("should build config"),
);
// peers on the odyssey network will check discovered enrs for the 'odyssey' key and
// decide based on this if they attempt and rlpx connection to the peer or not
let advertised_fork_id = *local_enr
.get_decodable::<Vec<ForkId>>(fork_key)
.expect("should read 'odyssey'")
.expect("should decode fork id list")
.first()
.expect("should be non-empty");
assert_eq!(advertised_fork_id, fork_id);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network/src/discovery.rs | crates/net/network/src/discovery.rs | //! Discovery support for the network.
use crate::{
cache::LruMap,
error::{NetworkError, ServiceKind},
};
use enr::Enr;
use futures::StreamExt;
use reth_discv4::{DiscoveryUpdate, Discv4, Discv4Config};
use reth_discv5::{DiscoveredPeer, Discv5};
use reth_dns_discovery::{
DnsDiscoveryConfig, DnsDiscoveryHandle, DnsDiscoveryService, DnsNodeRecordUpdate, DnsResolver,
};
use reth_ethereum_forks::{EnrForkIdEntry, ForkId};
use reth_network_api::{DiscoveredEvent, DiscoveryEvent};
use reth_network_peers::{NodeRecord, PeerId};
use reth_network_types::PeerAddr;
use secp256k1::SecretKey;
use std::{
collections::VecDeque,
net::{IpAddr, SocketAddr},
pin::Pin,
sync::Arc,
task::{ready, Context, Poll},
};
use tokio::{sync::mpsc, task::JoinHandle};
use tokio_stream::{wrappers::ReceiverStream, Stream};
use tracing::trace;
/// Default max capacity for cache of discovered peers.
///
/// Default is 10 000 peers.
pub const DEFAULT_MAX_CAPACITY_DISCOVERED_PEERS_CACHE: u32 = 10_000;
/// An abstraction over the configured discovery protocol.
///
/// Listens for new discovered nodes and emits events for discovered nodes and their
/// address.
#[derive(Debug)]
pub struct Discovery {
/// All nodes discovered via discovery protocol.
///
/// These nodes can be ephemeral and are updated via the discovery protocol.
discovered_nodes: LruMap<PeerId, PeerAddr>,
/// Local ENR of the discovery v4 service (discv5 ENR has same [`PeerId`]).
local_enr: NodeRecord,
/// Handler to interact with the Discovery v4 service
discv4: Option<Discv4>,
/// All KAD table updates from the discv4 service.
discv4_updates: Option<ReceiverStream<DiscoveryUpdate>>,
/// The handle to the spawned discv4 service
_discv4_service: Option<JoinHandle<()>>,
/// Handler to interact with the Discovery v5 service
discv5: Option<Discv5>,
/// All KAD table updates from the discv5 service.
discv5_updates: Option<ReceiverStream<discv5::Event>>,
/// Handler to interact with the DNS discovery service
_dns_discovery: Option<DnsDiscoveryHandle>,
/// Updates from the DNS discovery service.
dns_discovery_updates: Option<ReceiverStream<DnsNodeRecordUpdate>>,
/// The handle to the spawned DNS discovery service
_dns_disc_service: Option<JoinHandle<()>>,
/// Events buffered until polled.
queued_events: VecDeque<DiscoveryEvent>,
/// List of listeners subscribed to discovery events.
discovery_listeners: Vec<mpsc::UnboundedSender<DiscoveryEvent>>,
}
impl Discovery {
/// Spawns the discovery service.
///
/// This will spawn the [`reth_discv4::Discv4Service`] onto a new task and establish a listener
/// channel to receive all discovered nodes.
pub async fn new(
tcp_addr: SocketAddr,
discovery_v4_addr: SocketAddr,
sk: SecretKey,
discv4_config: Option<Discv4Config>,
discv5_config: Option<reth_discv5::Config>, // contains discv5 listen address
dns_discovery_config: Option<DnsDiscoveryConfig>,
) -> Result<Self, NetworkError> {
// setup discv4 with the discovery address and tcp port
let local_enr =
NodeRecord::from_secret_key(discovery_v4_addr, &sk).with_tcp_port(tcp_addr.port());
let discv4_future = async {
let Some(disc_config) = discv4_config else { return Ok((None, None, None)) };
let (discv4, mut discv4_service) =
Discv4::bind(discovery_v4_addr, local_enr, sk, disc_config).await.map_err(
|err| {
NetworkError::from_io_error(err, ServiceKind::Discovery(discovery_v4_addr))
},
)?;
let discv4_updates = discv4_service.update_stream();
// spawn the service
let discv4_service = discv4_service.spawn();
Ok((Some(discv4), Some(discv4_updates), Some(discv4_service)))
};
let discv5_future = async {
let Some(config) = discv5_config else { return Ok::<_, NetworkError>((None, None)) };
let (discv5, discv5_updates, _local_enr_discv5) = Discv5::start(&sk, config).await?;
Ok((Some(discv5), Some(discv5_updates.into())))
};
let ((discv4, discv4_updates, _discv4_service), (discv5, discv5_updates)) =
tokio::try_join!(discv4_future, discv5_future)?;
// setup DNS discovery
let (_dns_discovery, dns_discovery_updates, _dns_disc_service) =
if let Some(dns_config) = dns_discovery_config {
let (mut service, dns_disc) = DnsDiscoveryService::new_pair(
Arc::new(DnsResolver::from_system_conf()?),
dns_config,
);
let dns_discovery_updates = service.node_record_stream();
let dns_disc_service = service.spawn();
(Some(dns_disc), Some(dns_discovery_updates), Some(dns_disc_service))
} else {
(None, None, None)
};
Ok(Self {
discovery_listeners: Default::default(),
local_enr,
discv4,
discv4_updates,
_discv4_service,
discv5,
discv5_updates,
discovered_nodes: LruMap::new(DEFAULT_MAX_CAPACITY_DISCOVERED_PEERS_CACHE),
queued_events: Default::default(),
_dns_disc_service,
_dns_discovery,
dns_discovery_updates,
})
}
/// Registers a listener for receiving [`DiscoveryEvent`] updates.
pub(crate) fn add_listener(&mut self, tx: mpsc::UnboundedSender<DiscoveryEvent>) {
self.discovery_listeners.push(tx);
}
/// Notifies all registered listeners with the provided `event`.
#[inline]
fn notify_listeners(&mut self, event: &DiscoveryEvent) {
self.discovery_listeners.retain_mut(|listener| listener.send(event.clone()).is_ok());
}
/// Updates the `eth:ForkId` field in discv4.
pub(crate) fn update_fork_id(&self, fork_id: ForkId) {
if let Some(discv4) = &self.discv4 {
// use forward-compatible forkid entry
discv4.set_eip868_rlp(b"eth".to_vec(), EnrForkIdEntry::from(fork_id))
}
// todo: update discv5 enr
}
/// Bans the [`IpAddr`] in the discovery service.
pub(crate) fn ban_ip(&self, ip: IpAddr) {
if let Some(discv4) = &self.discv4 {
discv4.ban_ip(ip)
}
if let Some(discv5) = &self.discv5 {
discv5.ban_ip(ip)
}
}
/// Bans the [`PeerId`] and [`IpAddr`] in the discovery service.
pub(crate) fn ban(&self, peer_id: PeerId, ip: IpAddr) {
if let Some(discv4) = &self.discv4 {
discv4.ban(peer_id, ip)
}
if let Some(discv5) = &self.discv5 {
discv5.ban(peer_id, ip)
}
}
/// Returns a shared reference to the discv4.
pub fn discv4(&self) -> Option<Discv4> {
self.discv4.clone()
}
/// Returns the id with which the local node identifies itself in the network
pub(crate) const fn local_id(&self) -> PeerId {
self.local_enr.id // local discv4 and discv5 have same id, since signed with same secret key
}
/// Add a node to the discv4 table.
pub(crate) fn add_discv4_node(&self, node: NodeRecord) {
if let Some(discv4) = &self.discv4 {
discv4.add_node(node);
}
}
/// Returns discv5 handle.
pub fn discv5(&self) -> Option<Discv5> {
self.discv5.clone()
}
/// Add a node to the discv4 table.
#[expect(clippy::result_large_err)]
pub(crate) fn add_discv5_node(&self, enr: Enr<SecretKey>) -> Result<(), NetworkError> {
if let Some(discv5) = &self.discv5 {
discv5.add_node(enr).map_err(NetworkError::Discv5Error)?;
}
Ok(())
}
/// Processes an incoming [`NodeRecord`] update from a discovery service
fn on_node_record_update(&mut self, record: NodeRecord, fork_id: Option<ForkId>) {
let peer_id = record.id;
let tcp_addr = record.tcp_addr();
if tcp_addr.port() == 0 {
// useless peer for p2p
return
}
let udp_addr = record.udp_addr();
let addr = PeerAddr::new(tcp_addr, Some(udp_addr));
_ =
self.discovered_nodes.get_or_insert(peer_id, || {
self.queued_events.push_back(DiscoveryEvent::NewNode(
DiscoveredEvent::EventQueued { peer_id, addr, fork_id },
));
addr
})
}
fn on_discv4_update(&mut self, update: DiscoveryUpdate) {
match update {
DiscoveryUpdate::Added(record) | DiscoveryUpdate::DiscoveredAtCapacity(record) => {
self.on_node_record_update(record, None);
}
DiscoveryUpdate::EnrForkId(node, fork_id) => {
self.queued_events.push_back(DiscoveryEvent::EnrForkId(node.id, fork_id))
}
DiscoveryUpdate::Removed(peer_id) => {
self.discovered_nodes.remove(&peer_id);
}
DiscoveryUpdate::Batch(updates) => {
for update in updates {
self.on_discv4_update(update);
}
}
}
}
pub(crate) fn poll(&mut self, cx: &mut Context<'_>) -> Poll<DiscoveryEvent> {
loop {
// Drain all buffered events first
if let Some(event) = self.queued_events.pop_front() {
self.notify_listeners(&event);
return Poll::Ready(event)
}
// drain the discv4 update stream
while let Some(Poll::Ready(Some(update))) =
self.discv4_updates.as_mut().map(|updates| updates.poll_next_unpin(cx))
{
self.on_discv4_update(update)
}
// drain the discv5 update stream
while let Some(Poll::Ready(Some(update))) =
self.discv5_updates.as_mut().map(|updates| updates.poll_next_unpin(cx))
{
if let Some(discv5) = self.discv5.as_mut() {
if let Some(DiscoveredPeer { node_record, fork_id }) =
discv5.on_discv5_update(update)
{
self.on_node_record_update(node_record, fork_id);
}
}
}
// drain the dns update stream
while let Some(Poll::Ready(Some(update))) =
self.dns_discovery_updates.as_mut().map(|updates| updates.poll_next_unpin(cx))
{
self.add_discv4_node(update.node_record);
if let Err(err) = self.add_discv5_node(update.enr) {
trace!(target: "net::discovery",
%err,
"failed adding node discovered by dns to discv5"
);
}
self.on_node_record_update(update.node_record, update.fork_id);
}
if self.queued_events.is_empty() {
return Poll::Pending
}
}
}
}
impl Stream for Discovery {
type Item = DiscoveryEvent;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
Poll::Ready(Some(ready!(self.get_mut().poll(cx))))
}
}
#[cfg(test)]
impl Discovery {
/// Returns a Discovery instance that does nothing and is intended for testing purposes.
///
/// NOTE: This instance does nothing
pub(crate) fn noop() -> Self {
let (_discovery_listeners, _): (mpsc::UnboundedSender<DiscoveryEvent>, _) =
mpsc::unbounded_channel();
Self {
discovered_nodes: LruMap::new(0),
local_enr: NodeRecord {
address: IpAddr::V4(std::net::Ipv4Addr::UNSPECIFIED),
tcp_port: 0,
udp_port: 0,
id: PeerId::random(),
},
discv4: Default::default(),
discv4_updates: Default::default(),
discv5: None,
discv5_updates: None,
queued_events: Default::default(),
_discv4_service: Default::default(),
_dns_discovery: None,
dns_discovery_updates: None,
_dns_disc_service: None,
discovery_listeners: Default::default(),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use secp256k1::SECP256K1;
use std::net::{Ipv4Addr, SocketAddrV4};
#[tokio::test(flavor = "multi_thread")]
async fn test_discovery_setup() {
let (secret_key, _) = SECP256K1.generate_keypair(&mut rand_08::thread_rng());
let discovery_addr = SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, 0));
let _discovery = Discovery::new(
discovery_addr,
discovery_addr,
secret_key,
Default::default(),
None,
Default::default(),
)
.await
.unwrap();
}
use reth_discv4::Discv4ConfigBuilder;
use reth_discv5::{enr::EnrCombinedKeyWrapper, enr_to_discv4_id};
use tracing::trace;
async fn start_discovery_node(udp_port_discv4: u16, udp_port_discv5: u16) -> Discovery {
let secret_key = SecretKey::new(&mut rand_08::thread_rng());
let discv4_addr = format!("127.0.0.1:{udp_port_discv4}").parse().unwrap();
let discv5_addr: SocketAddr = format!("127.0.0.1:{udp_port_discv5}").parse().unwrap();
// disable `NatResolver`
let discv4_config = Discv4ConfigBuilder::default().external_ip_resolver(None).build();
let discv5_listen_config = discv5::ListenConfig::from(discv5_addr);
let discv5_config = reth_discv5::Config::builder(discv5_addr)
.discv5_config(discv5::ConfigBuilder::new(discv5_listen_config).build())
.build();
Discovery::new(
discv4_addr,
discv4_addr,
secret_key,
Some(discv4_config),
Some(discv5_config),
None,
)
.await
.expect("should build discv5 with discv4 downgrade")
}
#[tokio::test(flavor = "multi_thread")]
async fn discv5_and_discv4_same_pk() {
reth_tracing::init_test_tracing();
// set up test
let mut node_1 = start_discovery_node(40014, 40015).await;
let discv4_enr_1 = node_1.discv4.as_ref().unwrap().node_record();
let discv5_enr_node_1 =
node_1.discv5.as_ref().unwrap().with_discv5(|discv5| discv5.local_enr());
let discv4_id_1 = discv4_enr_1.id;
let discv5_id_1 = discv5_enr_node_1.node_id();
let mut node_2 = start_discovery_node(40024, 40025).await;
let discv4_enr_2 = node_2.discv4.as_ref().unwrap().node_record();
let discv5_enr_node_2 =
node_2.discv5.as_ref().unwrap().with_discv5(|discv5| discv5.local_enr());
let discv4_id_2 = discv4_enr_2.id;
let discv5_id_2 = discv5_enr_node_2.node_id();
trace!(target: "net::discovery::tests",
node_1_node_id=format!("{:#}", discv5_id_1),
node_2_node_id=format!("{:#}", discv5_id_2),
"started nodes"
);
// test
// assert discovery version 4 and version 5 nodes have same id
assert_eq!(discv4_id_1, enr_to_discv4_id(&discv5_enr_node_1).unwrap());
assert_eq!(discv4_id_2, enr_to_discv4_id(&discv5_enr_node_2).unwrap());
// add node_2:discv4 manually to node_1:discv4
node_1.add_discv4_node(discv4_enr_2);
// verify node_2:discv4 discovered node_1:discv4 and vv
let event_node_1 = node_1.next().await.unwrap();
let event_node_2 = node_2.next().await.unwrap();
assert_eq!(
DiscoveryEvent::NewNode(DiscoveredEvent::EventQueued {
peer_id: discv4_id_2,
addr: PeerAddr::new(discv4_enr_2.tcp_addr(), Some(discv4_enr_2.udp_addr())),
fork_id: None
}),
event_node_1
);
assert_eq!(
DiscoveryEvent::NewNode(DiscoveredEvent::EventQueued {
peer_id: discv4_id_1,
addr: PeerAddr::new(discv4_enr_1.tcp_addr(), Some(discv4_enr_1.udp_addr())),
fork_id: None
}),
event_node_2
);
assert_eq!(1, node_1.discovered_nodes.len());
assert_eq!(1, node_2.discovered_nodes.len());
// add node_2:discv5 to node_1:discv5, manual insertion won't emit an event
node_1.add_discv5_node(EnrCombinedKeyWrapper(discv5_enr_node_2.clone()).into()).unwrap();
// verify node_2 is in KBuckets of node_1:discv5
assert!(node_1
.discv5
.as_ref()
.unwrap()
.with_discv5(|discv5| discv5.table_entries_id().contains(&discv5_id_2)));
// manually trigger connection from node_1:discv5 to node_2:discv5
node_1
.discv5
.as_ref()
.unwrap()
.with_discv5(|discv5| discv5.send_ping(discv5_enr_node_2.clone()))
.await
.unwrap();
// this won't emit an event, since the nodes already discovered each other on discv4, the
// number of nodes stored for each node on this level remains 1.
assert_eq!(1, node_1.discovered_nodes.len());
assert_eq!(1, node_2.discovered_nodes.len());
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network/src/builder.rs | crates/net/network/src/builder.rs | //! Builder support for configuring the entire setup.
use std::fmt::Debug;
use crate::{
eth_requests::EthRequestHandler,
transactions::{
config::{StrictEthAnnouncementFilter, TransactionPropagationKind},
policy::NetworkPolicies,
TransactionPropagationPolicy, TransactionsManager, TransactionsManagerConfig,
},
NetworkHandle, NetworkManager,
};
use reth_eth_wire::{EthNetworkPrimitives, NetworkPrimitives};
use reth_network_api::test_utils::PeersHandleProvider;
use reth_transaction_pool::TransactionPool;
use tokio::sync::mpsc;
/// We set the max channel capacity of the `EthRequestHandler` to 256
/// 256 requests with malicious 10MB body requests is 2.6GB which can be absorbed by the node.
pub(crate) const ETH_REQUEST_CHANNEL_CAPACITY: usize = 256;
/// A builder that can configure all components of the network.
#[expect(missing_debug_implementations)]
pub struct NetworkBuilder<Tx, Eth, N: NetworkPrimitives = EthNetworkPrimitives> {
pub(crate) network: NetworkManager<N>,
pub(crate) transactions: Tx,
pub(crate) request_handler: Eth,
}
// === impl NetworkBuilder ===
impl<Tx, Eth, N: NetworkPrimitives> NetworkBuilder<Tx, Eth, N> {
/// Consumes the type and returns all fields.
pub fn split(self) -> (NetworkManager<N>, Tx, Eth) {
let Self { network, transactions, request_handler } = self;
(network, transactions, request_handler)
}
/// Returns the network manager.
pub const fn network(&self) -> &NetworkManager<N> {
&self.network
}
/// Returns the mutable network manager.
pub const fn network_mut(&mut self) -> &mut NetworkManager<N> {
&mut self.network
}
/// Returns the handle to the network.
pub fn handle(&self) -> NetworkHandle<N> {
self.network.handle().clone()
}
/// Consumes the type and returns all fields and also return a [`NetworkHandle`].
pub fn split_with_handle(self) -> (NetworkHandle<N>, NetworkManager<N>, Tx, Eth) {
let Self { network, transactions, request_handler } = self;
let handle = network.handle().clone();
(handle, network, transactions, request_handler)
}
/// Creates a new [`EthRequestHandler`] and wires it to the network.
pub fn request_handler<Client>(
self,
client: Client,
) -> NetworkBuilder<Tx, EthRequestHandler<Client, N>, N> {
let Self { mut network, transactions, .. } = self;
let (tx, rx) = mpsc::channel(ETH_REQUEST_CHANNEL_CAPACITY);
network.set_eth_request_handler(tx);
let peers = network.handle().peers_handle().clone();
let request_handler = EthRequestHandler::new(client, peers, rx);
NetworkBuilder { network, request_handler, transactions }
}
/// Creates a new [`TransactionsManager`] and wires it to the network.
pub fn transactions<Pool: TransactionPool>(
self,
pool: Pool,
transactions_manager_config: TransactionsManagerConfig,
) -> NetworkBuilder<
TransactionsManager<
Pool,
N,
NetworkPolicies<TransactionPropagationKind, StrictEthAnnouncementFilter>,
>,
Eth,
N,
> {
self.transactions_with_policy(
pool,
transactions_manager_config,
TransactionPropagationKind::default(),
)
}
/// Creates a new [`TransactionsManager`] and wires it to the network.
pub fn transactions_with_policy<
Pool: TransactionPool,
P: TransactionPropagationPolicy + Debug,
>(
self,
pool: Pool,
transactions_manager_config: TransactionsManagerConfig,
propagation_policy: P,
) -> NetworkBuilder<
TransactionsManager<Pool, N, NetworkPolicies<P, StrictEthAnnouncementFilter>>,
Eth,
N,
> {
let Self { mut network, request_handler, .. } = self;
let (tx, rx) = mpsc::unbounded_channel();
network.set_transactions(tx);
let handle = network.handle().clone();
let announcement_policy = StrictEthAnnouncementFilter::default();
let policies = NetworkPolicies::new(propagation_policy, announcement_policy);
let transactions = TransactionsManager::with_policy(
handle,
pool,
rx,
transactions_manager_config,
policies,
);
NetworkBuilder { network, request_handler, transactions }
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network/src/lib.rs | crates/net/network/src/lib.rs | //! reth P2P networking.
//!
//! Ethereum's networking protocol is specified in [devp2p](https://github.com/ethereum/devp2p).
//!
//! In order for a node to join the ethereum p2p network it needs to know what nodes are already
//! part of that network. This includes public identities (public key) and addresses (where to reach
//! them).
//!
//! ## Bird's Eye View
//!
//! See also diagram in [`NetworkManager`]
//!
//! The `Network` is made up of several, separate tasks:
//!
//! - `Transactions Task`: is a spawned
//! [`TransactionsManager`](crate::transactions::TransactionsManager) future that:
//!
//! * Responds to incoming transaction related requests
//! * Requests missing transactions from the `Network`
//! * Broadcasts new transactions received from the
//! [`TransactionPool`](reth_transaction_pool::TransactionPool) over the `Network`
//!
//! - `ETH request Task`: is a spawned
//! [`EthRequestHandler`](crate::eth_requests::EthRequestHandler) future that:
//!
//! * Responds to incoming ETH related requests: `Headers`, `Bodies`
//!
//! - `Discovery Task`: is a spawned [`Discv4`](reth_discv4::Discv4) future that handles peer
//! discovery and emits new peers to the `Network`
//!
//! - [`NetworkManager`] task advances the state of the `Network`, which includes:
//!
//! * Initiating new _outgoing_ connections to discovered peers
//! * Handling _incoming_ TCP connections from peers
//! * Peer management
//! * Route requests:
//! - from remote peers to corresponding tasks
//! - from local to remote peers
//!
//! ## Usage
//!
//! ### Configure and launch a standalone network
//!
//! The [`NetworkConfig`] is used to configure the network.
//! It requires an instance of [`BlockReader`](reth_storage_api::BlockReader).
//!
//! ```
//! # async fn launch() {
//! use reth_network::{
//! config::rng_secret_key, EthNetworkPrimitives, NetworkConfig, NetworkManager,
//! };
//! use reth_network_peers::mainnet_nodes;
//! use reth_storage_api::noop::NoopProvider;
//!
//! // This block provider implementation is used for testing purposes.
//! let client = NoopProvider::default();
//!
//! // The key that's used for encrypting sessions and to identify our node.
//! let local_key = rng_secret_key();
//!
//! let config = NetworkConfig::<_, EthNetworkPrimitives>::builder(local_key)
//! .boot_nodes(mainnet_nodes())
//! .build(client);
//!
//! // create the network instance
//! let network = NetworkManager::new(config).await.unwrap();
//!
//! // keep a handle to the network and spawn it
//! let handle = network.handle().clone();
//! tokio::task::spawn(network);
//!
//! # }
//! ```
//!
//! ### Configure all components of the Network with the [`NetworkBuilder`]
//!
//! ```
//! use reth_network::{
//! config::rng_secret_key, EthNetworkPrimitives, NetworkConfig, NetworkManager,
//! };
//! use reth_network_peers::mainnet_nodes;
//! use reth_storage_api::noop::NoopProvider;
//! use reth_transaction_pool::TransactionPool;
//! async fn launch<Pool: TransactionPool>(pool: Pool) {
//! // This block provider implementation is used for testing purposes.
//! let client = NoopProvider::default();
//!
//! // The key that's used for encrypting sessions and to identify our node.
//! let local_key = rng_secret_key();
//!
//! let config = NetworkConfig::<_, EthNetworkPrimitives>::builder(local_key)
//! .boot_nodes(mainnet_nodes())
//! .build(client.clone());
//! let transactions_manager_config = config.transactions_manager_config.clone();
//!
//! // create the network instance
//! let (handle, network, transactions, request_handler) = NetworkManager::builder(config)
//! .await
//! .unwrap()
//! .transactions(pool, transactions_manager_config)
//! .request_handler(client)
//! .split_with_handle();
//! }
//! ```
//!
//! # Feature Flags
//!
//! - `serde` (default): Enable serde support for configuration types.
//! - `test-utils`: Various utilities helpful for writing tests
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![allow(unreachable_pub)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#[cfg(any(test, feature = "test-utils"))]
/// Common helpers for network testing.
pub mod test_utils;
pub mod cache;
pub mod config;
pub mod error;
pub mod eth_requests;
pub mod import;
pub mod message;
pub mod peers;
pub mod protocol;
pub mod transactions;
mod budget;
mod builder;
mod discovery;
mod fetch;
mod flattened_response;
mod listener;
mod manager;
mod metrics;
mod network;
mod session;
mod state;
mod swarm;
mod trusted_peers_resolver;
pub use reth_eth_wire::{DisconnectReason, HelloMessageWithProtocols};
pub use reth_eth_wire_types::{primitives, EthNetworkPrimitives, NetworkPrimitives};
pub use reth_network_api::{
events, BlockDownloaderProvider, DiscoveredEvent, DiscoveryEvent, NetworkEvent,
NetworkEventListenerProvider, NetworkInfo, PeerRequest, PeerRequestSender, Peers, PeersInfo,
};
pub use reth_network_p2p::sync::{NetworkSyncUpdater, SyncState};
pub use reth_network_types::{PeersConfig, SessionsConfig};
pub use session::{
ActiveSessionHandle, ActiveSessionMessage, Direction, EthRlpxConnection, PeerInfo,
PendingSessionEvent, PendingSessionHandle, PendingSessionHandshakeError, SessionCommand,
SessionEvent, SessionId, SessionManager,
};
pub use builder::NetworkBuilder;
pub use config::{NetworkConfig, NetworkConfigBuilder};
pub use discovery::Discovery;
pub use fetch::FetchClient;
pub use flattened_response::FlattenedResponse;
pub use manager::NetworkManager;
pub use metrics::TxTypesCounter;
pub use network::{NetworkHandle, NetworkProtocols};
pub use swarm::NetworkConnectionState;
/// re-export p2p interfaces
pub use reth_network_p2p as p2p;
/// re-export types crates
pub mod types {
pub use reth_eth_wire_types::*;
pub use reth_network_types::*;
}
use aquamarine as _;
use smallvec as _;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network/src/trusted_peers_resolver.rs | crates/net/network/src/trusted_peers_resolver.rs | //! Periodically resolves DNS records for a set of trusted peers and emits updates as they complete
use futures::{future::BoxFuture, ready, stream::FuturesUnordered, FutureExt, StreamExt};
use reth_network_peers::{NodeRecord, PeerId, TrustedPeer};
use std::{
io,
task::{Context, Poll},
};
use tokio::time::Interval;
use tracing::warn;
/// `TrustedPeersResolver` periodically spawns DNS resolution tasks for trusted peers.
/// It returns a resolved (`PeerId`, `NodeRecord`) update when one of its in‑flight tasks completes.
#[derive(Debug)]
pub struct TrustedPeersResolver {
/// The list of trusted peers to resolve.
pub trusted_peers: Vec<TrustedPeer>,
/// The timer that triggers a new resolution cycle.
pub interval: Interval,
/// Futures for currently in‑flight resolution tasks.
pub pending: FuturesUnordered<BoxFuture<'static, (PeerId, Result<NodeRecord, io::Error>)>>,
}
impl TrustedPeersResolver {
/// Create a new resolver with the given trusted peers and resolution interval.
pub fn new(trusted_peers: Vec<TrustedPeer>, resolve_interval: Interval) -> Self {
Self { trusted_peers, interval: resolve_interval, pending: FuturesUnordered::new() }
}
/// Update the resolution interval (useful for testing purposes)
#[allow(dead_code)]
pub fn set_interval(&mut self, interval: Interval) {
self.interval = interval;
}
/// Poll the resolver.
/// When the interval ticks, new resolution futures for each trusted peer are spawned.
/// If a future completes successfully, it returns the resolved (`PeerId`, `NodeRecord`).
pub fn poll(&mut self, cx: &mut Context<'_>) -> Poll<(PeerId, NodeRecord)> {
if self.trusted_peers.is_empty() {
return Poll::Pending;
}
if self.interval.poll_tick(cx).is_ready() {
self.pending.clear();
for trusted in self.trusted_peers.iter().cloned() {
let peer_id = trusted.id;
let task = async move {
let result = trusted.resolve().await;
(peer_id, result)
}
.boxed();
self.pending.push(task);
}
}
match ready!(self.pending.poll_next_unpin(cx)) {
Some((peer_id, Ok(record))) => Poll::Ready((peer_id, record)),
Some((peer_id, Err(e))) => {
warn!(target: "net::peers", "Failed to resolve trusted peer {:?}: {:?}", peer_id, e);
Poll::Pending
}
None => Poll::Pending,
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network/src/network.rs | crates/net/network/src/network.rs | use crate::{
config::NetworkMode, message::PeerMessage, protocol::RlpxSubProtocol,
swarm::NetworkConnectionState, transactions::TransactionsHandle, FetchClient,
};
use alloy_primitives::B256;
use enr::Enr;
use futures::StreamExt;
use parking_lot::Mutex;
use reth_discv4::{Discv4, NatResolver};
use reth_discv5::Discv5;
use reth_eth_wire::{
BlockRangeUpdate, DisconnectReason, EthNetworkPrimitives, NetworkPrimitives,
NewPooledTransactionHashes, SharedTransactions,
};
use reth_ethereum_forks::Head;
use reth_network_api::{
events::{NetworkPeersEvents, PeerEvent, PeerEventStream},
test_utils::{PeersHandle, PeersHandleProvider},
BlockDownloaderProvider, DiscoveryEvent, NetworkError, NetworkEvent,
NetworkEventListenerProvider, NetworkInfo, NetworkStatus, PeerInfo, PeerRequest, Peers,
PeersInfo,
};
use reth_network_p2p::sync::{NetworkSyncUpdater, SyncState, SyncStateProvider};
use reth_network_peers::{NodeRecord, PeerId};
use reth_network_types::{PeerAddr, PeerKind, Reputation, ReputationChangeKind};
use reth_tokio_util::{EventSender, EventStream};
use secp256k1::SecretKey;
use std::{
net::SocketAddr,
sync::{
atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering},
Arc,
},
};
use tokio::sync::{
mpsc::{self, UnboundedSender},
oneshot,
};
use tokio_stream::wrappers::UnboundedReceiverStream;
/// A _shareable_ network frontend. Used to interact with the network.
///
/// See also [`NetworkManager`](crate::NetworkManager).
#[derive(Clone, Debug)]
pub struct NetworkHandle<N: NetworkPrimitives = EthNetworkPrimitives> {
/// The Arc'ed delegate that contains the state.
inner: Arc<NetworkInner<N>>,
}
// === impl NetworkHandle ===
impl<N: NetworkPrimitives> NetworkHandle<N> {
/// Creates a single new instance.
#[expect(clippy::too_many_arguments)]
pub(crate) fn new(
num_active_peers: Arc<AtomicUsize>,
listener_address: Arc<Mutex<SocketAddr>>,
to_manager_tx: UnboundedSender<NetworkHandleMessage<N>>,
secret_key: SecretKey,
local_peer_id: PeerId,
peers: PeersHandle,
network_mode: NetworkMode,
chain_id: Arc<AtomicU64>,
tx_gossip_disabled: bool,
discv4: Option<Discv4>,
discv5: Option<Discv5>,
event_sender: EventSender<NetworkEvent<PeerRequest<N>>>,
nat: Option<NatResolver>,
) -> Self {
let inner = NetworkInner {
num_active_peers,
to_manager_tx,
listener_address,
secret_key,
local_peer_id,
peers,
network_mode,
is_syncing: Arc::new(AtomicBool::new(false)),
initial_sync_done: Arc::new(AtomicBool::new(false)),
chain_id,
tx_gossip_disabled,
discv4,
discv5,
event_sender,
nat,
};
Self { inner: Arc::new(inner) }
}
/// Returns the [`PeerId`] used in the network.
pub fn peer_id(&self) -> &PeerId {
&self.inner.local_peer_id
}
fn manager(&self) -> &UnboundedSender<NetworkHandleMessage<N>> {
&self.inner.to_manager_tx
}
/// Returns the mode of the network, either pow, or pos
pub fn mode(&self) -> &NetworkMode {
&self.inner.network_mode
}
/// Sends a [`NetworkHandleMessage`] to the manager
pub(crate) fn send_message(&self, msg: NetworkHandleMessage<N>) {
let _ = self.inner.to_manager_tx.send(msg);
}
/// Update the status of the node.
pub fn update_status(&self, head: Head) {
self.send_message(NetworkHandleMessage::StatusUpdate { head });
}
/// Announce a block over devp2p
///
/// Caution: in `PoS` this is a noop because new blocks are no longer announced over devp2p.
/// Instead they are sent to the node by CL and can be requested over devp2p.
/// Broadcasting new blocks is considered a protocol violation.
pub fn announce_block(&self, block: N::NewBlockPayload, hash: B256) {
self.send_message(NetworkHandleMessage::AnnounceBlock(block, hash))
}
/// Sends a [`PeerRequest`] to the given peer's session.
pub fn send_request(&self, peer_id: PeerId, request: PeerRequest<N>) {
self.send_message(NetworkHandleMessage::EthRequest { peer_id, request })
}
/// Send transactions hashes to the peer.
pub fn send_transactions_hashes(&self, peer_id: PeerId, msg: NewPooledTransactionHashes) {
self.send_message(NetworkHandleMessage::SendPooledTransactionHashes { peer_id, msg })
}
/// Send full transactions to the peer
pub fn send_transactions(&self, peer_id: PeerId, msg: Vec<Arc<N::BroadcastedTransaction>>) {
self.send_message(NetworkHandleMessage::SendTransaction {
peer_id,
msg: SharedTransactions(msg),
})
}
/// Send eth message to the peer.
pub fn send_eth_message(&self, peer_id: PeerId, message: PeerMessage<N>) {
self.send_message(NetworkHandleMessage::EthMessage { peer_id, message })
}
/// Send message to get the [`TransactionsHandle`].
///
/// Returns `None` if no transaction task is installed.
pub async fn transactions_handle(&self) -> Option<TransactionsHandle<N>> {
let (tx, rx) = oneshot::channel();
let _ = self.manager().send(NetworkHandleMessage::GetTransactionsHandle(tx));
rx.await.unwrap()
}
/// Send message to gracefully shutdown node.
///
/// This will disconnect all active and pending sessions and prevent
/// new connections to be established.
pub async fn shutdown(&self) -> Result<(), oneshot::error::RecvError> {
let (tx, rx) = oneshot::channel();
self.send_message(NetworkHandleMessage::Shutdown(tx));
rx.await
}
/// Set network connection state to Active.
///
/// New outbound connections will be established if there's capacity.
pub fn set_network_active(&self) {
self.set_network_conn(NetworkConnectionState::Active);
}
/// Set network connection state to Hibernate.
///
/// No new outbound connections will be established.
pub fn set_network_hibernate(&self) {
self.set_network_conn(NetworkConnectionState::Hibernate);
}
/// Set network connection state.
fn set_network_conn(&self, network_conn: NetworkConnectionState) {
self.send_message(NetworkHandleMessage::SetNetworkState(network_conn));
}
/// Whether tx gossip is disabled
pub fn tx_gossip_disabled(&self) -> bool {
self.inner.tx_gossip_disabled
}
/// Returns the secret key used for authenticating sessions.
pub fn secret_key(&self) -> &SecretKey {
&self.inner.secret_key
}
}
// === API Implementations ===
impl<N: NetworkPrimitives> NetworkPeersEvents for NetworkHandle<N> {
/// Returns an event stream of peer-specific network events.
fn peer_events(&self) -> PeerEventStream {
let peer_events = self.inner.event_sender.new_listener().map(|event| match event {
NetworkEvent::Peer(peer_event) => peer_event,
NetworkEvent::ActivePeerSession { info, .. } => PeerEvent::SessionEstablished(info),
});
PeerEventStream::new(peer_events)
}
}
impl<N: NetworkPrimitives> NetworkEventListenerProvider for NetworkHandle<N> {
type Primitives = N;
fn event_listener(&self) -> EventStream<NetworkEvent<PeerRequest<Self::Primitives>>> {
self.inner.event_sender.new_listener()
}
fn discovery_listener(&self) -> UnboundedReceiverStream<DiscoveryEvent> {
let (tx, rx) = mpsc::unbounded_channel();
let _ = self.manager().send(NetworkHandleMessage::DiscoveryListener(tx));
UnboundedReceiverStream::new(rx)
}
}
impl<N: NetworkPrimitives> NetworkProtocols for NetworkHandle<N> {
fn add_rlpx_sub_protocol(&self, protocol: RlpxSubProtocol) {
self.send_message(NetworkHandleMessage::AddRlpxSubProtocol(protocol))
}
}
impl<N: NetworkPrimitives> PeersInfo for NetworkHandle<N> {
fn num_connected_peers(&self) -> usize {
self.inner.num_active_peers.load(Ordering::Relaxed)
}
fn local_node_record(&self) -> NodeRecord {
if let Some(discv4) = &self.inner.discv4 {
discv4.node_record()
} else if let Some(record) = self.inner.discv5.as_ref().and_then(|d| d.node_record()) {
record
} else {
let external_ip = self.inner.nat.and_then(|nat| nat.as_external_ip());
let mut socket_addr = *self.inner.listener_address.lock();
if let Some(ip) = external_ip {
// if able to resolve external ip, use it instead and also set the local address
socket_addr.set_ip(ip)
} else if socket_addr.ip().is_unspecified() {
// zero address is invalid
if socket_addr.ip().is_ipv4() {
socket_addr.set_ip(std::net::IpAddr::V4(std::net::Ipv4Addr::LOCALHOST));
} else {
socket_addr.set_ip(std::net::IpAddr::V6(std::net::Ipv6Addr::LOCALHOST));
}
}
NodeRecord::new(socket_addr, *self.peer_id())
}
}
fn local_enr(&self) -> Enr<SecretKey> {
let local_node_record = self.local_node_record();
let mut builder = Enr::builder();
builder.ip(local_node_record.address);
if local_node_record.address.is_ipv4() {
builder.udp4(local_node_record.udp_port);
builder.tcp4(local_node_record.tcp_port);
} else {
builder.udp6(local_node_record.udp_port);
builder.tcp6(local_node_record.tcp_port);
}
builder.build(&self.inner.secret_key).expect("valid enr")
}
}
impl<N: NetworkPrimitives> Peers for NetworkHandle<N> {
fn add_trusted_peer_id(&self, peer: PeerId) {
self.send_message(NetworkHandleMessage::AddTrustedPeerId(peer));
}
/// Sends a message to the [`NetworkManager`](crate::NetworkManager) to add a peer to the known
/// set, with the given kind.
fn add_peer_kind(
&self,
peer: PeerId,
kind: PeerKind,
tcp_addr: SocketAddr,
udp_addr: Option<SocketAddr>,
) {
let addr = PeerAddr::new(tcp_addr, udp_addr);
self.send_message(NetworkHandleMessage::AddPeerAddress(peer, kind, addr));
}
async fn get_peers_by_kind(&self, kind: PeerKind) -> Result<Vec<PeerInfo>, NetworkError> {
let (tx, rx) = oneshot::channel();
let _ = self.manager().send(NetworkHandleMessage::GetPeerInfosByPeerKind(kind, tx));
Ok(rx.await?)
}
async fn get_all_peers(&self) -> Result<Vec<PeerInfo>, NetworkError> {
let (tx, rx) = oneshot::channel();
let _ = self.manager().send(NetworkHandleMessage::GetPeerInfos(tx));
Ok(rx.await?)
}
async fn get_peer_by_id(&self, peer_id: PeerId) -> Result<Option<PeerInfo>, NetworkError> {
let (tx, rx) = oneshot::channel();
let _ = self.manager().send(NetworkHandleMessage::GetPeerInfoById(peer_id, tx));
Ok(rx.await?)
}
async fn get_peers_by_id(&self, peer_ids: Vec<PeerId>) -> Result<Vec<PeerInfo>, NetworkError> {
let (tx, rx) = oneshot::channel();
let _ = self.manager().send(NetworkHandleMessage::GetPeerInfosByIds(peer_ids, tx));
Ok(rx.await?)
}
/// Sends a message to the [`NetworkManager`](crate::NetworkManager) to remove a peer from the
/// set corresponding to given kind.
fn remove_peer(&self, peer: PeerId, kind: PeerKind) {
self.send_message(NetworkHandleMessage::RemovePeer(peer, kind))
}
/// Sends a message to the [`NetworkManager`](crate::NetworkManager) to disconnect an existing
/// connection to the given peer.
fn disconnect_peer(&self, peer: PeerId) {
self.send_message(NetworkHandleMessage::DisconnectPeer(peer, None))
}
/// Sends a message to the [`NetworkManager`](crate::NetworkManager) to disconnect an existing
/// connection to the given peer using the provided reason
fn disconnect_peer_with_reason(&self, peer: PeerId, reason: DisconnectReason) {
self.send_message(NetworkHandleMessage::DisconnectPeer(peer, Some(reason)))
}
/// Sends a message to the [`NetworkManager`](crate::NetworkManager) to connect to the given
/// peer.
fn connect_peer_kind(
&self,
peer_id: PeerId,
kind: PeerKind,
tcp_addr: SocketAddr,
udp_addr: Option<SocketAddr>,
) {
self.send_message(NetworkHandleMessage::ConnectPeer(
peer_id,
kind,
PeerAddr::new(tcp_addr, udp_addr),
))
}
/// Send a reputation change for the given peer.
fn reputation_change(&self, peer_id: PeerId, kind: ReputationChangeKind) {
self.send_message(NetworkHandleMessage::ReputationChange(peer_id, kind));
}
async fn reputation_by_id(&self, peer_id: PeerId) -> Result<Option<Reputation>, NetworkError> {
let (tx, rx) = oneshot::channel();
let _ = self.manager().send(NetworkHandleMessage::GetReputationById(peer_id, tx));
Ok(rx.await?)
}
}
impl<N: NetworkPrimitives> PeersHandleProvider for NetworkHandle<N> {
fn peers_handle(&self) -> &PeersHandle {
&self.inner.peers
}
}
impl<N: NetworkPrimitives> NetworkInfo for NetworkHandle<N> {
fn local_addr(&self) -> SocketAddr {
*self.inner.listener_address.lock()
}
async fn network_status(&self) -> Result<NetworkStatus, NetworkError> {
let (tx, rx) = oneshot::channel();
let _ = self.manager().send(NetworkHandleMessage::GetStatus(tx));
rx.await.map_err(Into::into)
}
fn chain_id(&self) -> u64 {
self.inner.chain_id.load(Ordering::Relaxed)
}
fn is_syncing(&self) -> bool {
SyncStateProvider::is_syncing(self)
}
fn is_initially_syncing(&self) -> bool {
SyncStateProvider::is_initially_syncing(self)
}
}
impl<N: NetworkPrimitives> SyncStateProvider for NetworkHandle<N> {
fn is_syncing(&self) -> bool {
self.inner.is_syncing.load(Ordering::Relaxed)
}
// used to guard the txpool
fn is_initially_syncing(&self) -> bool {
if self.inner.initial_sync_done.load(Ordering::Relaxed) {
return false
}
self.inner.is_syncing.load(Ordering::Relaxed)
}
}
impl<N: NetworkPrimitives> NetworkSyncUpdater for NetworkHandle<N> {
fn update_sync_state(&self, state: SyncState) {
let future_state = state.is_syncing();
let prev_state = self.inner.is_syncing.swap(future_state, Ordering::Relaxed);
let syncing_to_idle_state_transition = prev_state && !future_state;
if syncing_to_idle_state_transition {
self.inner.initial_sync_done.store(true, Ordering::Relaxed);
}
}
/// Update the status of the node.
fn update_status(&self, head: Head) {
self.send_message(NetworkHandleMessage::StatusUpdate { head });
}
/// Updates the advertised block range.
fn update_block_range(&self, update: reth_eth_wire::BlockRangeUpdate) {
self.send_message(NetworkHandleMessage::InternalBlockRangeUpdate(update));
}
}
impl<N: NetworkPrimitives> BlockDownloaderProvider for NetworkHandle<N> {
type Client = FetchClient<N>;
async fn fetch_client(&self) -> Result<Self::Client, oneshot::error::RecvError> {
let (tx, rx) = oneshot::channel();
let _ = self.manager().send(NetworkHandleMessage::FetchClient(tx));
rx.await
}
}
#[derive(Debug)]
struct NetworkInner<N: NetworkPrimitives = EthNetworkPrimitives> {
/// Number of active peer sessions the node's currently handling.
num_active_peers: Arc<AtomicUsize>,
/// Sender half of the message channel to the [`crate::NetworkManager`].
to_manager_tx: UnboundedSender<NetworkHandleMessage<N>>,
/// The local address that accepts incoming connections.
listener_address: Arc<Mutex<SocketAddr>>,
/// The secret key used for authenticating sessions.
secret_key: SecretKey,
/// The identifier used by this node.
local_peer_id: PeerId,
/// Access to all the nodes.
peers: PeersHandle,
/// The mode of the network
network_mode: NetworkMode,
/// Represents if the network is currently syncing.
is_syncing: Arc<AtomicBool>,
/// Used to differentiate between an initial pipeline sync or a live sync
initial_sync_done: Arc<AtomicBool>,
/// The chain id
chain_id: Arc<AtomicU64>,
/// Whether to disable transaction gossip
tx_gossip_disabled: bool,
/// The instance of the discv4 service
discv4: Option<Discv4>,
/// The instance of the discv5 service
discv5: Option<Discv5>,
/// Sender for high level network events.
event_sender: EventSender<NetworkEvent<PeerRequest<N>>>,
/// The NAT resolver
nat: Option<NatResolver>,
}
/// Provides access to modify the network's additional protocol handlers.
pub trait NetworkProtocols: Send + Sync {
/// Adds an additional protocol handler to the `RLPx` sub-protocol list.
fn add_rlpx_sub_protocol(&self, protocol: RlpxSubProtocol);
}
/// Internal messages that can be passed to the [`NetworkManager`](crate::NetworkManager).
#[derive(Debug)]
pub(crate) enum NetworkHandleMessage<N: NetworkPrimitives = EthNetworkPrimitives> {
/// Marks a peer as trusted.
AddTrustedPeerId(PeerId),
/// Adds an address for a peer, including its ID, kind, and socket address.
AddPeerAddress(PeerId, PeerKind, PeerAddr),
/// Removes a peer from the peerset corresponding to the given kind.
RemovePeer(PeerId, PeerKind),
/// Disconnects a connection to a peer if it exists, optionally providing a disconnect reason.
DisconnectPeer(PeerId, Option<DisconnectReason>),
/// Broadcasts an event to announce a new block to all nodes.
AnnounceBlock(N::NewBlockPayload, B256),
/// Sends a list of transactions to the given peer.
SendTransaction {
/// The ID of the peer to which the transactions are sent.
peer_id: PeerId,
/// The shared transactions to send.
msg: SharedTransactions<N::BroadcastedTransaction>,
},
/// Sends a list of transaction hashes to the given peer.
SendPooledTransactionHashes {
/// The ID of the peer to which the transaction hashes are sent.
peer_id: PeerId,
/// The new pooled transaction hashes to send.
msg: NewPooledTransactionHashes,
},
/// Sends an `eth` protocol request to the peer.
EthRequest {
/// The peer to send the request to.
peer_id: PeerId,
/// The request to send to the peer's sessions.
request: PeerRequest<N>,
},
/// Sends an `eth` protocol message to the peer.
EthMessage {
/// The peer to send the message to.
peer_id: PeerId,
/// The `eth` protocol message to send to the peer's session.
message: PeerMessage<N>,
},
/// Applies a reputation change to the given peer.
ReputationChange(PeerId, ReputationChangeKind),
/// Returns the client that can be used to interact with the network.
FetchClient(oneshot::Sender<FetchClient<N>>),
/// Applies a status update.
StatusUpdate {
/// The head status to apply.
head: Head,
},
/// Retrieves the current status via a oneshot sender.
GetStatus(oneshot::Sender<NetworkStatus>),
/// Gets `PeerInfo` for the specified peer IDs.
GetPeerInfosByIds(Vec<PeerId>, oneshot::Sender<Vec<PeerInfo>>),
/// Gets `PeerInfo` from all the peers via a oneshot sender.
GetPeerInfos(oneshot::Sender<Vec<PeerInfo>>),
/// Gets `PeerInfo` for a specific peer via a oneshot sender.
GetPeerInfoById(PeerId, oneshot::Sender<Option<PeerInfo>>),
/// Gets `PeerInfo` for a specific peer kind via a oneshot sender.
GetPeerInfosByPeerKind(PeerKind, oneshot::Sender<Vec<PeerInfo>>),
/// Gets the reputation for a specific peer via a oneshot sender.
GetReputationById(PeerId, oneshot::Sender<Option<Reputation>>),
/// Retrieves the `TransactionsHandle` via a oneshot sender.
GetTransactionsHandle(oneshot::Sender<Option<TransactionsHandle<N>>>),
/// Initiates a graceful shutdown of the network via a oneshot sender.
Shutdown(oneshot::Sender<()>),
/// Sets the network state between hibernation and active.
SetNetworkState(NetworkConnectionState),
/// Adds a new listener for `DiscoveryEvent`.
DiscoveryListener(UnboundedSender<DiscoveryEvent>),
/// Adds an additional `RlpxSubProtocol`.
AddRlpxSubProtocol(RlpxSubProtocol),
/// Connect to the given peer.
ConnectPeer(PeerId, PeerKind, PeerAddr),
/// Message to update the node's advertised block range information.
InternalBlockRangeUpdate(BlockRangeUpdate),
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network/src/manager.rs | crates/net/network/src/manager.rs | //! High level network management.
//!
//! The [`NetworkManager`] contains the state of the network as a whole. It controls how connections
//! are handled and keeps track of connections to peers.
//!
//! ## Capabilities
//!
//! The network manages peers depending on their announced capabilities via their `RLPx` sessions. Most importantly the [Ethereum Wire Protocol](https://github.com/ethereum/devp2p/blob/master/caps/eth.md)(`eth`).
//!
//! ## Overview
//!
//! The [`NetworkManager`] is responsible for advancing the state of the `network`. The `network` is
//! made up of peer-to-peer connections between nodes that are available on the same network.
//! Responsible for peer discovery is ethereum's discovery protocol (discv4, discv5). If the address
//! (IP+port) of our node is published via discovery, remote peers can initiate inbound connections
//! to the local node. Once a (tcp) connection is established, both peers start to authenticate a [RLPx session](https://github.com/ethereum/devp2p/blob/master/rlpx.md) via a handshake. If the handshake was successful, both peers announce their capabilities and are now ready to exchange sub-protocol messages via the `RLPx` session.
use crate::{
budget::{DEFAULT_BUDGET_TRY_DRAIN_NETWORK_HANDLE_CHANNEL, DEFAULT_BUDGET_TRY_DRAIN_SWARM},
config::NetworkConfig,
discovery::Discovery,
error::{NetworkError, ServiceKind},
eth_requests::IncomingEthRequest,
import::{BlockImport, BlockImportEvent, BlockImportOutcome, BlockValidation, NewBlockEvent},
listener::ConnectionListener,
message::{NewBlockMessage, PeerMessage},
metrics::{DisconnectMetrics, NetworkMetrics, NETWORK_POOL_TRANSACTIONS_SCOPE},
network::{NetworkHandle, NetworkHandleMessage},
peers::PeersManager,
poll_nested_stream_with_budget,
protocol::IntoRlpxSubProtocol,
session::SessionManager,
state::NetworkState,
swarm::{Swarm, SwarmEvent},
transactions::NetworkTransactionEvent,
FetchClient, NetworkBuilder,
};
use futures::{Future, StreamExt};
use parking_lot::Mutex;
use reth_chainspec::EnrForkIdEntry;
use reth_eth_wire::{DisconnectReason, EthNetworkPrimitives, NetworkPrimitives};
use reth_fs_util::{self as fs, FsPathError};
use reth_metrics::common::mpsc::UnboundedMeteredSender;
use reth_network_api::{
events::{PeerEvent, SessionInfo},
test_utils::PeersHandle,
EthProtocolInfo, NetworkEvent, NetworkStatus, PeerInfo, PeerRequest,
};
use reth_network_peers::{NodeRecord, PeerId};
use reth_network_types::ReputationChangeKind;
use reth_storage_api::BlockNumReader;
use reth_tasks::shutdown::GracefulShutdown;
use reth_tokio_util::EventSender;
use secp256k1::SecretKey;
use std::{
net::SocketAddr,
path::Path,
pin::Pin,
sync::{
atomic::{AtomicU64, AtomicUsize, Ordering},
Arc,
},
task::{Context, Poll},
time::{Duration, Instant},
};
use tokio::sync::mpsc::{self, error::TrySendError};
use tokio_stream::wrappers::UnboundedReceiverStream;
use tracing::{debug, error, trace, warn};
#[cfg_attr(doc, aquamarine::aquamarine)]
// TODO: Inlined diagram due to a bug in aquamarine library, should become an include when it's
// fixed. See https://github.com/mersinvald/aquamarine/issues/50
// include_mmd!("docs/mermaid/network-manager.mmd")
/// Manages the _entire_ state of the network.
///
/// This is an endless [`Future`] that consistently drives the state of the entire network forward.
///
/// The [`NetworkManager`] is the container type for all parts involved with advancing the network.
///
/// ```mermaid
/// graph TB
/// handle(NetworkHandle)
/// events(NetworkEvents)
/// transactions(Transactions Task)
/// ethrequest(ETH Request Task)
/// discovery(Discovery Task)
/// subgraph NetworkManager
/// direction LR
/// subgraph Swarm
/// direction TB
/// B1[(Session Manager)]
/// B2[(Connection Listener)]
/// B3[(Network State)]
/// end
/// end
/// handle <--> |request response channel| NetworkManager
/// NetworkManager --> |Network events| events
/// transactions <--> |transactions| NetworkManager
/// ethrequest <--> |ETH request handing| NetworkManager
/// discovery --> |Discovered peers| NetworkManager
/// ```
#[derive(Debug)]
#[must_use = "The NetworkManager does nothing unless polled"]
pub struct NetworkManager<N: NetworkPrimitives = EthNetworkPrimitives> {
/// The type that manages the actual network part, which includes connections.
swarm: Swarm<N>,
/// Underlying network handle that can be shared.
handle: NetworkHandle<N>,
/// Receiver half of the command channel set up between this type and the [`NetworkHandle`]
from_handle_rx: UnboundedReceiverStream<NetworkHandleMessage<N>>,
/// Handles block imports according to the `eth` protocol.
block_import: Box<dyn BlockImport<N::NewBlockPayload>>,
/// Sender for high level network events.
event_sender: EventSender<NetworkEvent<PeerRequest<N>>>,
/// Sender half to send events to the
/// [`TransactionsManager`](crate::transactions::TransactionsManager) task, if configured.
to_transactions_manager: Option<UnboundedMeteredSender<NetworkTransactionEvent<N>>>,
/// Sender half to send events to the
/// [`EthRequestHandler`](crate::eth_requests::EthRequestHandler) task, if configured.
///
/// The channel that originally receives and bundles all requests from all sessions is already
/// bounded. However, since handling an eth request is more I/O intensive than delegating
/// them from the bounded channel to the eth-request channel, it is possible that this
/// builds up if the node is flooded with requests.
///
/// Even though nonmalicious requests are relatively cheap, it's possible to craft
/// body requests with bogus data up until the allowed max message size limit.
/// Thus, we use a bounded channel here to avoid unbounded build up if the node is flooded with
/// requests. This channel size is set at
/// [`ETH_REQUEST_CHANNEL_CAPACITY`](crate::builder::ETH_REQUEST_CHANNEL_CAPACITY)
to_eth_request_handler: Option<mpsc::Sender<IncomingEthRequest<N>>>,
/// Tracks the number of active session (connected peers).
///
/// This is updated via internal events and shared via `Arc` with the [`NetworkHandle`]
/// Updated by the `NetworkWorker` and loaded by the `NetworkService`.
num_active_peers: Arc<AtomicUsize>,
/// Metrics for the Network
metrics: NetworkMetrics,
/// Disconnect metrics for the Network
disconnect_metrics: DisconnectMetrics,
}
impl NetworkManager {
/// Creates the manager of a new network with [`EthNetworkPrimitives`] types.
///
/// ```no_run
/// # async fn f() {
/// use reth_chainspec::MAINNET;
/// use reth_network::{NetworkConfig, NetworkManager};
/// let config =
/// NetworkConfig::builder_with_rng_secret_key().build_with_noop_provider(MAINNET.clone());
/// let manager = NetworkManager::eth(config).await;
/// # }
/// ```
pub async fn eth<C: BlockNumReader + 'static>(
config: NetworkConfig<C, EthNetworkPrimitives>,
) -> Result<Self, NetworkError> {
Self::new(config).await
}
}
impl<N: NetworkPrimitives> NetworkManager<N> {
/// Sets the dedicated channel for events intended for the
/// [`TransactionsManager`](crate::transactions::TransactionsManager).
pub fn with_transactions(
mut self,
tx: mpsc::UnboundedSender<NetworkTransactionEvent<N>>,
) -> Self {
self.set_transactions(tx);
self
}
/// Sets the dedicated channel for events intended for the
/// [`TransactionsManager`](crate::transactions::TransactionsManager).
pub fn set_transactions(&mut self, tx: mpsc::UnboundedSender<NetworkTransactionEvent<N>>) {
self.to_transactions_manager =
Some(UnboundedMeteredSender::new(tx, NETWORK_POOL_TRANSACTIONS_SCOPE));
}
/// Sets the dedicated channel for events intended for the
/// [`EthRequestHandler`](crate::eth_requests::EthRequestHandler).
pub fn with_eth_request_handler(mut self, tx: mpsc::Sender<IncomingEthRequest<N>>) -> Self {
self.set_eth_request_handler(tx);
self
}
/// Sets the dedicated channel for events intended for the
/// [`EthRequestHandler`](crate::eth_requests::EthRequestHandler).
pub fn set_eth_request_handler(&mut self, tx: mpsc::Sender<IncomingEthRequest<N>>) {
self.to_eth_request_handler = Some(tx);
}
/// Adds an additional protocol handler to the `RLPx` sub-protocol list.
pub fn add_rlpx_sub_protocol(&mut self, protocol: impl IntoRlpxSubProtocol) {
self.swarm.add_rlpx_sub_protocol(protocol)
}
/// Returns the [`NetworkHandle`] that can be cloned and shared.
///
/// The [`NetworkHandle`] can be used to interact with this [`NetworkManager`]
pub const fn handle(&self) -> &NetworkHandle<N> {
&self.handle
}
/// Returns the secret key used for authenticating sessions.
pub const fn secret_key(&self) -> SecretKey {
self.swarm.sessions().secret_key()
}
#[inline]
fn update_poll_metrics(&self, start: Instant, poll_durations: NetworkManagerPollDurations) {
let metrics = &self.metrics;
let NetworkManagerPollDurations { acc_network_handle, acc_swarm } = poll_durations;
// update metrics for whole poll function
metrics.duration_poll_network_manager.set(start.elapsed().as_secs_f64());
// update poll metrics for nested items
metrics.acc_duration_poll_network_handle.set(acc_network_handle.as_secs_f64());
metrics.acc_duration_poll_swarm.set(acc_swarm.as_secs_f64());
}
/// Creates the manager of a new network.
///
/// The [`NetworkManager`] is an endless future that needs to be polled in order to advance the
/// state of the entire network.
pub async fn new<C: BlockNumReader + 'static>(
config: NetworkConfig<C, N>,
) -> Result<Self, NetworkError> {
let NetworkConfig {
client,
secret_key,
discovery_v4_addr,
mut discovery_v4_config,
mut discovery_v5_config,
listener_addr,
peers_config,
sessions_config,
chain_id,
block_import,
network_mode,
boot_nodes,
executor,
hello_message,
status,
fork_filter,
dns_discovery_config,
extra_protocols,
tx_gossip_disabled,
transactions_manager_config: _,
nat,
handshake,
} = config;
let peers_manager = PeersManager::new(peers_config);
let peers_handle = peers_manager.handle();
let incoming = ConnectionListener::bind(listener_addr).await.map_err(|err| {
NetworkError::from_io_error(err, ServiceKind::Listener(listener_addr))
})?;
// retrieve the tcp address of the socket
let listener_addr = incoming.local_address();
// resolve boot nodes
let resolved_boot_nodes =
futures::future::try_join_all(boot_nodes.iter().map(|record| record.resolve())).await?;
if let Some(disc_config) = discovery_v4_config.as_mut() {
// merge configured boot nodes
disc_config.bootstrap_nodes.extend(resolved_boot_nodes.clone());
// add the forkid entry for EIP-868, but wrap it in an `EnrForkIdEntry` for proper
// encoding
disc_config.add_eip868_pair("eth", EnrForkIdEntry::from(status.forkid));
}
if let Some(discv5) = discovery_v5_config.as_mut() {
// merge configured boot nodes
discv5.extend_unsigned_boot_nodes(resolved_boot_nodes)
}
let discovery = Discovery::new(
listener_addr,
discovery_v4_addr,
secret_key,
discovery_v4_config,
discovery_v5_config,
dns_discovery_config,
)
.await?;
// need to retrieve the addr here since provided port could be `0`
let local_peer_id = discovery.local_id();
let discv4 = discovery.discv4();
let discv5 = discovery.discv5();
let num_active_peers = Arc::new(AtomicUsize::new(0));
let sessions = SessionManager::new(
secret_key,
sessions_config,
executor,
status,
hello_message,
fork_filter,
extra_protocols,
handshake,
);
let state = NetworkState::new(
crate::state::BlockNumReader::new(client),
discovery,
peers_manager,
Arc::clone(&num_active_peers),
);
let swarm = Swarm::new(incoming, sessions, state);
let (to_manager_tx, from_handle_rx) = mpsc::unbounded_channel();
let event_sender: EventSender<NetworkEvent<PeerRequest<N>>> = Default::default();
let handle = NetworkHandle::new(
Arc::clone(&num_active_peers),
Arc::new(Mutex::new(listener_addr)),
to_manager_tx,
secret_key,
local_peer_id,
peers_handle,
network_mode,
Arc::new(AtomicU64::new(chain_id)),
tx_gossip_disabled,
discv4,
discv5,
event_sender.clone(),
nat,
);
Ok(Self {
swarm,
handle,
from_handle_rx: UnboundedReceiverStream::new(from_handle_rx),
block_import,
event_sender,
to_transactions_manager: None,
to_eth_request_handler: None,
num_active_peers,
metrics: Default::default(),
disconnect_metrics: Default::default(),
})
}
/// Create a new [`NetworkManager`] instance and start a [`NetworkBuilder`] to configure all
/// components of the network
///
/// ```
/// use reth_network::{
/// config::rng_secret_key, EthNetworkPrimitives, NetworkConfig, NetworkManager,
/// };
/// use reth_network_peers::mainnet_nodes;
/// use reth_storage_api::noop::NoopProvider;
/// use reth_transaction_pool::TransactionPool;
/// async fn launch<Pool: TransactionPool>(pool: Pool) {
/// // This block provider implementation is used for testing purposes.
/// let client = NoopProvider::default();
///
/// // The key that's used for encrypting sessions and to identify our node.
/// let local_key = rng_secret_key();
///
/// let config = NetworkConfig::<_, EthNetworkPrimitives>::builder(local_key)
/// .boot_nodes(mainnet_nodes())
/// .build(client.clone());
/// let transactions_manager_config = config.transactions_manager_config.clone();
///
/// // create the network instance
/// let (handle, network, transactions, request_handler) = NetworkManager::builder(config)
/// .await
/// .unwrap()
/// .transactions(pool, transactions_manager_config)
/// .request_handler(client)
/// .split_with_handle();
/// }
/// ```
pub async fn builder<C: BlockNumReader + 'static>(
config: NetworkConfig<C, N>,
) -> Result<NetworkBuilder<(), (), N>, NetworkError> {
let network = Self::new(config).await?;
Ok(network.into_builder())
}
/// Create a [`NetworkBuilder`] to configure all components of the network
pub const fn into_builder(self) -> NetworkBuilder<(), (), N> {
NetworkBuilder { network: self, transactions: (), request_handler: () }
}
/// Returns the [`SocketAddr`] that listens for incoming tcp connections.
pub const fn local_addr(&self) -> SocketAddr {
self.swarm.listener().local_address()
}
/// How many peers we're currently connected to.
pub fn num_connected_peers(&self) -> usize {
self.swarm.state().num_active_peers()
}
/// Returns the [`PeerId`] used in the network.
pub fn peer_id(&self) -> &PeerId {
self.handle.peer_id()
}
/// Returns an iterator over all peers in the peer set.
pub fn all_peers(&self) -> impl Iterator<Item = NodeRecord> + '_ {
self.swarm.state().peers().iter_peers()
}
/// Returns the number of peers in the peer set.
pub fn num_known_peers(&self) -> usize {
self.swarm.state().peers().num_known_peers()
}
/// Returns a new [`PeersHandle`] that can be cloned and shared.
///
/// The [`PeersHandle`] can be used to interact with the network's peer set.
pub fn peers_handle(&self) -> PeersHandle {
self.swarm.state().peers().handle()
}
/// Collect the peers from the [`NetworkManager`] and write them to the given
/// `persistent_peers_file`.
pub fn write_peers_to_file(&self, persistent_peers_file: &Path) -> Result<(), FsPathError> {
let known_peers = self.all_peers().collect::<Vec<_>>();
persistent_peers_file.parent().map(fs::create_dir_all).transpose()?;
reth_fs_util::write_json_file(persistent_peers_file, &known_peers)?;
Ok(())
}
/// Returns a new [`FetchClient`] that can be cloned and shared.
///
/// The [`FetchClient`] is the entrypoint for sending requests to the network.
pub fn fetch_client(&self) -> FetchClient<N> {
self.swarm.state().fetch_client()
}
/// Returns the current [`NetworkStatus`] for the local node.
pub fn status(&self) -> NetworkStatus {
let sessions = self.swarm.sessions();
let status = sessions.status();
let hello_message = sessions.hello_message();
#[expect(deprecated)]
NetworkStatus {
client_version: hello_message.client_version,
protocol_version: hello_message.protocol_version as u64,
eth_protocol_info: EthProtocolInfo {
difficulty: None,
head: status.blockhash,
network: status.chain.id(),
genesis: status.genesis,
config: Default::default(),
},
capabilities: hello_message
.protocols
.into_iter()
.map(|protocol| protocol.cap)
.collect(),
}
}
/// Sends an event to the [`TransactionsManager`](crate::transactions::TransactionsManager) if
/// configured.
fn notify_tx_manager(&self, event: NetworkTransactionEvent<N>) {
if let Some(ref tx) = self.to_transactions_manager {
let _ = tx.send(event);
}
}
/// Sends an event to the [`EthRequestManager`](crate::eth_requests::EthRequestHandler) if
/// configured.
fn delegate_eth_request(&self, event: IncomingEthRequest<N>) {
if let Some(ref reqs) = self.to_eth_request_handler {
let _ = reqs.try_send(event).map_err(|e| {
if let TrySendError::Full(_) = e {
debug!(target:"net", "EthRequestHandler channel is full!");
self.metrics.total_dropped_eth_requests_at_full_capacity.increment(1);
}
});
}
}
/// Handle an incoming request from the peer
fn on_eth_request(&self, peer_id: PeerId, req: PeerRequest<N>) {
match req {
PeerRequest::GetBlockHeaders { request, response } => {
self.delegate_eth_request(IncomingEthRequest::GetBlockHeaders {
peer_id,
request,
response,
})
}
PeerRequest::GetBlockBodies { request, response } => {
self.delegate_eth_request(IncomingEthRequest::GetBlockBodies {
peer_id,
request,
response,
})
}
PeerRequest::GetNodeData { request, response } => {
self.delegate_eth_request(IncomingEthRequest::GetNodeData {
peer_id,
request,
response,
})
}
PeerRequest::GetReceipts { request, response } => {
self.delegate_eth_request(IncomingEthRequest::GetReceipts {
peer_id,
request,
response,
})
}
PeerRequest::GetReceipts69 { request, response } => {
self.delegate_eth_request(IncomingEthRequest::GetReceipts69 {
peer_id,
request,
response,
})
}
PeerRequest::GetPooledTransactions { request, response } => {
self.notify_tx_manager(NetworkTransactionEvent::GetPooledTransactions {
peer_id,
request,
response,
});
}
}
}
/// Invoked after a `NewBlock` message from the peer was validated
fn on_block_import_result(&mut self, event: BlockImportEvent<N::NewBlockPayload>) {
match event {
BlockImportEvent::Announcement(validation) => match validation {
BlockValidation::ValidHeader { block } => {
self.swarm.state_mut().announce_new_block(block);
}
BlockValidation::ValidBlock { block } => {
self.swarm.state_mut().announce_new_block_hash(block);
}
},
BlockImportEvent::Outcome(outcome) => {
let BlockImportOutcome { peer, result } = outcome;
match result {
Ok(validated_block) => match validated_block {
BlockValidation::ValidHeader { block } => {
self.swarm.state_mut().update_peer_block(
&peer,
block.hash,
block.number(),
);
self.swarm.state_mut().announce_new_block(block);
}
BlockValidation::ValidBlock { block } => {
self.swarm.state_mut().announce_new_block_hash(block);
}
},
Err(_err) => {
self.swarm
.state_mut()
.peers_mut()
.apply_reputation_change(&peer, ReputationChangeKind::BadBlock);
}
}
}
}
}
/// Enforces [EIP-3675](https://eips.ethereum.org/EIPS/eip-3675#devp2p) consensus rules for the network protocol
///
/// Depending on the mode of the network:
/// - disconnect peer if in POS
/// - execute the closure if in POW
fn within_pow_or_disconnect<F>(&mut self, peer_id: PeerId, only_pow: F)
where
F: FnOnce(&mut Self),
{
// reject message in POS
if self.handle.mode().is_stake() {
// connections to peers which send invalid messages should be terminated
self.swarm
.sessions_mut()
.disconnect(peer_id, Some(DisconnectReason::SubprotocolSpecific));
} else {
only_pow(self);
}
}
/// Handles a received Message from the peer's session.
fn on_peer_message(&mut self, peer_id: PeerId, msg: PeerMessage<N>) {
match msg {
PeerMessage::NewBlockHashes(hashes) => {
self.within_pow_or_disconnect(peer_id, |this| {
// update peer's state, to track what blocks this peer has seen
this.swarm.state_mut().on_new_block_hashes(peer_id, hashes.0.clone());
// start block import process for the hashes
this.block_import.on_new_block(peer_id, NewBlockEvent::Hashes(hashes));
})
}
PeerMessage::NewBlock(block) => {
self.within_pow_or_disconnect(peer_id, move |this| {
this.swarm.state_mut().on_new_block(peer_id, block.hash);
// start block import process
this.block_import.on_new_block(peer_id, NewBlockEvent::Block(block));
});
}
PeerMessage::PooledTransactions(msg) => {
self.notify_tx_manager(NetworkTransactionEvent::IncomingPooledTransactionHashes {
peer_id,
msg,
});
}
PeerMessage::EthRequest(req) => {
self.on_eth_request(peer_id, req);
}
PeerMessage::ReceivedTransaction(msg) => {
self.notify_tx_manager(NetworkTransactionEvent::IncomingTransactions {
peer_id,
msg,
});
}
PeerMessage::SendTransactions(_) => {
unreachable!("Not emitted by session")
}
PeerMessage::BlockRangeUpdated(_) => {}
PeerMessage::Other(other) => {
debug!(target: "net", message_id=%other.id, "Ignoring unsupported message");
}
}
}
/// Handler for received messages from a handle
fn on_handle_message(&mut self, msg: NetworkHandleMessage<N>) {
match msg {
NetworkHandleMessage::DiscoveryListener(tx) => {
self.swarm.state_mut().discovery_mut().add_listener(tx);
}
NetworkHandleMessage::AnnounceBlock(block, hash) => {
if self.handle.mode().is_stake() {
// See [EIP-3675](https://eips.ethereum.org/EIPS/eip-3675#devp2p)
warn!(target: "net", "Peer performed block propagation, but it is not supported in proof of stake (EIP-3675)");
return
}
let msg = NewBlockMessage { hash, block: Arc::new(block) };
self.swarm.state_mut().announce_new_block(msg);
}
NetworkHandleMessage::EthRequest { peer_id, request } => {
self.swarm.sessions_mut().send_message(&peer_id, PeerMessage::EthRequest(request))
}
NetworkHandleMessage::SendTransaction { peer_id, msg } => {
self.swarm.sessions_mut().send_message(&peer_id, PeerMessage::SendTransactions(msg))
}
NetworkHandleMessage::SendPooledTransactionHashes { peer_id, msg } => self
.swarm
.sessions_mut()
.send_message(&peer_id, PeerMessage::PooledTransactions(msg)),
NetworkHandleMessage::AddTrustedPeerId(peer_id) => {
self.swarm.state_mut().add_trusted_peer_id(peer_id);
}
NetworkHandleMessage::AddPeerAddress(peer, kind, addr) => {
// only add peer if we are not shutting down
if !self.swarm.is_shutting_down() {
self.swarm.state_mut().add_peer_kind(peer, kind, addr);
}
}
NetworkHandleMessage::RemovePeer(peer_id, kind) => {
self.swarm.state_mut().remove_peer_kind(peer_id, kind);
}
NetworkHandleMessage::DisconnectPeer(peer_id, reason) => {
self.swarm.sessions_mut().disconnect(peer_id, reason);
}
NetworkHandleMessage::ConnectPeer(peer_id, kind, addr) => {
self.swarm.state_mut().add_and_connect(peer_id, kind, addr);
}
NetworkHandleMessage::SetNetworkState(net_state) => {
// Sets network connection state between Active and Hibernate.
// If hibernate stops the node to fill new outbound
// connections, this is beneficial for sync stages that do not require a network
// connection.
self.swarm.on_network_state_change(net_state);
}
NetworkHandleMessage::Shutdown(tx) => {
self.perform_network_shutdown();
let _ = tx.send(());
}
NetworkHandleMessage::ReputationChange(peer_id, kind) => {
self.swarm.state_mut().peers_mut().apply_reputation_change(&peer_id, kind);
}
NetworkHandleMessage::GetReputationById(peer_id, tx) => {
let _ = tx.send(self.swarm.state_mut().peers().get_reputation(&peer_id));
}
NetworkHandleMessage::FetchClient(tx) => {
let _ = tx.send(self.fetch_client());
}
NetworkHandleMessage::GetStatus(tx) => {
let _ = tx.send(self.status());
}
NetworkHandleMessage::StatusUpdate { head } => {
if let Some(transition) = self.swarm.sessions_mut().on_status_update(head) {
self.swarm.state_mut().update_fork_id(transition.current);
}
}
NetworkHandleMessage::GetPeerInfos(tx) => {
let _ = tx.send(self.get_peer_infos());
}
NetworkHandleMessage::GetPeerInfoById(peer_id, tx) => {
let _ = tx.send(self.get_peer_info_by_id(peer_id));
}
NetworkHandleMessage::GetPeerInfosByIds(peer_ids, tx) => {
let _ = tx.send(self.get_peer_infos_by_ids(peer_ids));
}
NetworkHandleMessage::GetPeerInfosByPeerKind(kind, tx) => {
let peer_ids = self.swarm.state().peers().peers_by_kind(kind);
let _ = tx.send(self.get_peer_infos_by_ids(peer_ids));
}
NetworkHandleMessage::AddRlpxSubProtocol(proto) => self.add_rlpx_sub_protocol(proto),
NetworkHandleMessage::GetTransactionsHandle(tx) => {
if let Some(ref tx_inner) = self.to_transactions_manager {
let _ = tx_inner.send(NetworkTransactionEvent::GetTransactionsHandle(tx));
} else {
let _ = tx.send(None);
}
}
NetworkHandleMessage::InternalBlockRangeUpdate(block_range_update) => {
self.swarm.sessions_mut().update_advertised_block_range(block_range_update);
}
NetworkHandleMessage::EthMessage { peer_id, message } => {
self.swarm.sessions_mut().send_message(&peer_id, message)
}
}
}
fn on_swarm_event(&mut self, event: SwarmEvent<N>) {
// handle event
match event {
SwarmEvent::ValidMessage { peer_id, message } => self.on_peer_message(peer_id, message),
SwarmEvent::TcpListenerClosed { remote_addr } => {
trace!(target: "net", ?remote_addr, "TCP listener closed.");
}
SwarmEvent::TcpListenerError(err) => {
trace!(target: "net", %err, "TCP connection error.");
}
SwarmEvent::IncomingTcpConnection { remote_addr, session_id } => {
trace!(target: "net", ?session_id, ?remote_addr, "Incoming connection");
self.metrics.total_incoming_connections.increment(1);
self.metrics
.incoming_connections
.set(self.swarm.state().peers().num_inbound_connections() as f64);
}
SwarmEvent::OutgoingTcpConnection { remote_addr, peer_id } => {
trace!(target: "net", ?remote_addr, ?peer_id, "Starting outbound connection.");
self.metrics.total_outgoing_connections.increment(1);
self.update_pending_connection_metrics()
}
SwarmEvent::SessionEstablished {
peer_id,
remote_addr,
client_version,
capabilities,
version,
messages,
status,
direction,
} => {
let total_active = self.num_active_peers.fetch_add(1, Ordering::Relaxed) + 1;
self.metrics.connected_peers.set(total_active as f64);
debug!(
target: "net",
?remote_addr,
%client_version,
?peer_id,
?total_active,
kind=%direction,
peer_enode=%NodeRecord::new(remote_addr, peer_id),
"Session established"
);
if direction.is_incoming() {
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | true |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network/src/state.rs | crates/net/network/src/state.rs | //! Keeps track of the state of the network.
use crate::{
cache::LruCache,
discovery::Discovery,
fetch::{BlockResponseOutcome, FetchAction, StateFetcher},
message::{BlockRequest, NewBlockMessage, PeerResponse, PeerResponseResult},
peers::{PeerAction, PeersManager},
session::BlockRangeInfo,
FetchClient,
};
use alloy_consensus::BlockHeader;
use alloy_primitives::B256;
use rand::seq::SliceRandom;
use reth_eth_wire::{
BlockHashNumber, Capabilities, DisconnectReason, EthNetworkPrimitives, NetworkPrimitives,
NewBlockHashes, NewBlockPayload, UnifiedStatus,
};
use reth_ethereum_forks::ForkId;
use reth_network_api::{DiscoveredEvent, DiscoveryEvent, PeerRequest, PeerRequestSender};
use reth_network_peers::PeerId;
use reth_network_types::{PeerAddr, PeerKind};
use reth_primitives_traits::Block;
use std::{
collections::{HashMap, VecDeque},
fmt,
net::{IpAddr, SocketAddr},
ops::Deref,
sync::{
atomic::{AtomicU64, AtomicUsize},
Arc,
},
task::{Context, Poll},
};
use tokio::sync::oneshot;
use tracing::{debug, trace};
/// Cache limit of blocks to keep track of for a single peer.
const PEER_BLOCK_CACHE_LIMIT: u32 = 512;
/// Wrapper type for the [`BlockNumReader`] trait.
pub(crate) struct BlockNumReader(Box<dyn reth_storage_api::BlockNumReader>);
impl BlockNumReader {
/// Create a new instance with the given reader.
pub fn new(reader: impl reth_storage_api::BlockNumReader + 'static) -> Self {
Self(Box::new(reader))
}
}
impl fmt::Debug for BlockNumReader {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("BlockNumReader").field("inner", &"<dyn BlockNumReader>").finish()
}
}
impl Deref for BlockNumReader {
type Target = Box<dyn reth_storage_api::BlockNumReader>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
/// The [`NetworkState`] keeps track of the state of all peers in the network.
///
/// This includes:
/// - [`Discovery`]: manages the discovery protocol, essentially a stream of discovery updates
/// - [`PeersManager`]: keeps track of connected peers and issues new outgoing connections
/// depending on the configured capacity.
/// - [`StateFetcher`]: streams download request (received from outside via channel) which are
/// then send to the session of the peer.
///
/// This type is also responsible for responding for received request.
#[derive(Debug)]
pub struct NetworkState<N: NetworkPrimitives = EthNetworkPrimitives> {
/// All active peers and their state.
active_peers: HashMap<PeerId, ActivePeer<N>>,
/// Manages connections to peers.
peers_manager: PeersManager,
/// Buffered messages until polled.
queued_messages: VecDeque<StateAction<N>>,
/// The client type that can interact with the chain.
///
/// This type is used to fetch the block number after we established a session and received the
/// [`UnifiedStatus`] block hash.
client: BlockNumReader,
/// Network discovery.
discovery: Discovery,
/// The type that handles requests.
///
/// The fetcher streams `RLPx` related requests on a per-peer basis to this type. This type
/// will then queue in the request and notify the fetcher once the result has been
/// received.
state_fetcher: StateFetcher<N>,
}
impl<N: NetworkPrimitives> NetworkState<N> {
/// Create a new state instance with the given params
pub(crate) fn new(
client: BlockNumReader,
discovery: Discovery,
peers_manager: PeersManager,
num_active_peers: Arc<AtomicUsize>,
) -> Self {
let state_fetcher = StateFetcher::new(peers_manager.handle(), num_active_peers);
Self {
active_peers: Default::default(),
peers_manager,
queued_messages: Default::default(),
client,
discovery,
state_fetcher,
}
}
/// Returns mutable access to the [`PeersManager`]
pub(crate) const fn peers_mut(&mut self) -> &mut PeersManager {
&mut self.peers_manager
}
/// Returns mutable access to the [`Discovery`]
pub(crate) const fn discovery_mut(&mut self) -> &mut Discovery {
&mut self.discovery
}
/// Returns access to the [`PeersManager`]
pub(crate) const fn peers(&self) -> &PeersManager {
&self.peers_manager
}
/// Returns a new [`FetchClient`]
pub(crate) fn fetch_client(&self) -> FetchClient<N> {
self.state_fetcher.client()
}
/// How many peers we're currently connected to.
pub fn num_active_peers(&self) -> usize {
self.active_peers.len()
}
/// Event hook for an activated session for the peer.
///
/// Returns `Ok` if the session is valid, returns an `Err` if the session is not accepted and
/// should be rejected.
pub(crate) fn on_session_activated(
&mut self,
peer: PeerId,
capabilities: Arc<Capabilities>,
status: Arc<UnifiedStatus>,
request_tx: PeerRequestSender<PeerRequest<N>>,
timeout: Arc<AtomicU64>,
range_info: Option<BlockRangeInfo>,
) {
debug_assert!(!self.active_peers.contains_key(&peer), "Already connected; not possible");
// find the corresponding block number
let block_number =
self.client.block_number(status.blockhash).ok().flatten().unwrap_or_default();
self.state_fetcher.new_active_peer(
peer,
status.blockhash,
block_number,
timeout,
range_info,
);
self.active_peers.insert(
peer,
ActivePeer {
best_hash: status.blockhash,
capabilities,
request_tx,
pending_response: None,
blocks: LruCache::new(PEER_BLOCK_CACHE_LIMIT),
},
);
}
/// Event hook for a disconnected session for the given peer.
///
/// This will remove the peer from the available set of peers and close all inflight requests.
pub(crate) fn on_session_closed(&mut self, peer: PeerId) {
self.active_peers.remove(&peer);
self.state_fetcher.on_session_closed(&peer);
}
/// Starts propagating the new block to peers that haven't reported the block yet.
///
/// This is supposed to be invoked after the block was validated.
///
/// > It then sends the block to a small fraction of connected peers (usually the square root of
/// > the total number of peers) using the `NewBlock` message.
///
/// See also <https://github.com/ethereum/devp2p/blob/master/caps/eth.md>
pub(crate) fn announce_new_block(&mut self, msg: NewBlockMessage<N::NewBlockPayload>) {
// send a `NewBlock` message to a fraction of the connected peers (square root of the total
// number of peers)
let num_propagate = (self.active_peers.len() as f64).sqrt() as u64 + 1;
let number = msg.block.block().header().number();
let mut count = 0;
// Shuffle to propagate to a random sample of peers on every block announcement
let mut peers: Vec<_> = self.active_peers.iter_mut().collect();
peers.shuffle(&mut rand::rng());
for (peer_id, peer) in peers {
if peer.blocks.contains(&msg.hash) {
// skip peers which already reported the block
continue
}
// Queue a `NewBlock` message for the peer
if count < num_propagate {
self.queued_messages
.push_back(StateAction::NewBlock { peer_id: *peer_id, block: msg.clone() });
// update peer block info
if self.state_fetcher.update_peer_block(peer_id, msg.hash, number) {
peer.best_hash = msg.hash;
}
// mark the block as seen by the peer
peer.blocks.insert(msg.hash);
count += 1;
}
if count >= num_propagate {
break
}
}
}
/// Completes the block propagation process started in [`NetworkState::announce_new_block()`]
/// but sending `NewBlockHash` broadcast to all peers that haven't seen it yet.
pub(crate) fn announce_new_block_hash(&mut self, msg: NewBlockMessage<N::NewBlockPayload>) {
let number = msg.block.block().header().number();
let hashes = NewBlockHashes(vec![BlockHashNumber { hash: msg.hash, number }]);
for (peer_id, peer) in &mut self.active_peers {
if peer.blocks.contains(&msg.hash) {
// skip peers which already reported the block
continue
}
if self.state_fetcher.update_peer_block(peer_id, msg.hash, number) {
peer.best_hash = msg.hash;
}
self.queued_messages.push_back(StateAction::NewBlockHashes {
peer_id: *peer_id,
hashes: hashes.clone(),
});
}
}
/// Updates the block information for the peer.
pub(crate) fn update_peer_block(&mut self, peer_id: &PeerId, hash: B256, number: u64) {
if let Some(peer) = self.active_peers.get_mut(peer_id) {
peer.best_hash = hash;
}
self.state_fetcher.update_peer_block(peer_id, hash, number);
}
/// Invoked when a new [`ForkId`] is activated.
pub(crate) fn update_fork_id(&self, fork_id: ForkId) {
self.discovery.update_fork_id(fork_id)
}
/// Invoked after a `NewBlock` message was received by the peer.
///
/// This will keep track of blocks we know a peer has
pub(crate) fn on_new_block(&mut self, peer_id: PeerId, hash: B256) {
// Mark the blocks as seen
if let Some(peer) = self.active_peers.get_mut(&peer_id) {
peer.blocks.insert(hash);
}
}
/// Invoked for a `NewBlockHashes` broadcast message.
pub(crate) fn on_new_block_hashes(&mut self, peer_id: PeerId, hashes: Vec<BlockHashNumber>) {
// Mark the blocks as seen
if let Some(peer) = self.active_peers.get_mut(&peer_id) {
peer.blocks.extend(hashes.into_iter().map(|b| b.hash));
}
}
/// Bans the [`IpAddr`] in the discovery service.
pub(crate) fn ban_ip_discovery(&self, ip: IpAddr) {
trace!(target: "net", ?ip, "Banning discovery");
self.discovery.ban_ip(ip)
}
/// Bans the [`PeerId`] and [`IpAddr`] in the discovery service.
pub(crate) fn ban_discovery(&self, peer_id: PeerId, ip: IpAddr) {
trace!(target: "net", ?peer_id, ?ip, "Banning discovery");
self.discovery.ban(peer_id, ip)
}
/// Marks the given peer as trusted.
pub(crate) fn add_trusted_peer_id(&mut self, peer_id: PeerId) {
self.peers_manager.add_trusted_peer_id(peer_id)
}
/// Adds a peer and its address with the given kind to the peerset.
pub(crate) fn add_peer_kind(&mut self, peer_id: PeerId, kind: PeerKind, addr: PeerAddr) {
self.peers_manager.add_peer_kind(peer_id, kind, addr, None)
}
/// Connects a peer and its address with the given kind
pub(crate) fn add_and_connect(&mut self, peer_id: PeerId, kind: PeerKind, addr: PeerAddr) {
self.peers_manager.add_and_connect_kind(peer_id, kind, addr, None)
}
/// Removes a peer and its address with the given kind from the peerset.
pub(crate) fn remove_peer_kind(&mut self, peer_id: PeerId, kind: PeerKind) {
match kind {
PeerKind::Basic | PeerKind::Static => self.peers_manager.remove_peer(peer_id),
PeerKind::Trusted => self.peers_manager.remove_peer_from_trusted_set(peer_id),
}
}
/// Event hook for events received from the discovery service.
fn on_discovery_event(&mut self, event: DiscoveryEvent) {
match event {
DiscoveryEvent::NewNode(DiscoveredEvent::EventQueued { peer_id, addr, fork_id }) => {
self.queued_messages.push_back(StateAction::DiscoveredNode {
peer_id,
addr,
fork_id,
});
}
DiscoveryEvent::EnrForkId(peer_id, fork_id) => {
self.queued_messages
.push_back(StateAction::DiscoveredEnrForkId { peer_id, fork_id });
}
}
}
/// Event hook for new actions derived from the peer management set.
fn on_peer_action(&mut self, action: PeerAction) {
match action {
PeerAction::Connect { peer_id, remote_addr } => {
self.queued_messages.push_back(StateAction::Connect { peer_id, remote_addr });
}
PeerAction::Disconnect { peer_id, reason } => {
self.state_fetcher.on_pending_disconnect(&peer_id);
self.queued_messages.push_back(StateAction::Disconnect { peer_id, reason });
}
PeerAction::DisconnectBannedIncoming { peer_id } |
PeerAction::DisconnectUntrustedIncoming { peer_id } => {
self.state_fetcher.on_pending_disconnect(&peer_id);
self.queued_messages.push_back(StateAction::Disconnect { peer_id, reason: None });
}
PeerAction::DiscoveryBanPeerId { peer_id, ip_addr } => {
self.ban_discovery(peer_id, ip_addr)
}
PeerAction::DiscoveryBanIp { ip_addr } => self.ban_ip_discovery(ip_addr),
PeerAction::PeerAdded(peer_id) => {
self.queued_messages.push_back(StateAction::PeerAdded(peer_id))
}
PeerAction::PeerRemoved(peer_id) => {
self.queued_messages.push_back(StateAction::PeerRemoved(peer_id))
}
PeerAction::BanPeer { .. } | PeerAction::UnBanPeer { .. } => {}
}
}
/// Sends The message to the peer's session and queues in a response.
///
/// Caution: this will replace an already pending response. It's the responsibility of the
/// caller to select the peer.
fn handle_block_request(&mut self, peer: PeerId, request: BlockRequest) {
if let Some(ref mut peer) = self.active_peers.get_mut(&peer) {
let (request, response) = match request {
BlockRequest::GetBlockHeaders(request) => {
let (response, rx) = oneshot::channel();
let request = PeerRequest::GetBlockHeaders { request, response };
let response = PeerResponse::BlockHeaders { response: rx };
(request, response)
}
BlockRequest::GetBlockBodies(request) => {
let (response, rx) = oneshot::channel();
let request = PeerRequest::GetBlockBodies { request, response };
let response = PeerResponse::BlockBodies { response: rx };
(request, response)
}
};
let _ = peer.request_tx.to_session_tx.try_send(request);
peer.pending_response = Some(response);
}
}
/// Handle the outcome of processed response, for example directly queue another request.
fn on_block_response_outcome(&mut self, outcome: BlockResponseOutcome) {
match outcome {
BlockResponseOutcome::Request(peer, request) => {
self.handle_block_request(peer, request);
}
BlockResponseOutcome::BadResponse(peer, reputation_change) => {
self.peers_manager.apply_reputation_change(&peer, reputation_change);
}
}
}
/// Invoked when received a response from a connected peer.
///
/// Delegates the response result to the fetcher which may return an outcome specific
/// instruction that needs to be handled in [`Self::on_block_response_outcome`]. This could be
/// a follow-up request or an instruction to slash the peer's reputation.
fn on_eth_response(&mut self, peer: PeerId, resp: PeerResponseResult<N>) {
let outcome = match resp {
PeerResponseResult::BlockHeaders(res) => {
self.state_fetcher.on_block_headers_response(peer, res)
}
PeerResponseResult::BlockBodies(res) => {
self.state_fetcher.on_block_bodies_response(peer, res)
}
_ => None,
};
if let Some(outcome) = outcome {
self.on_block_response_outcome(outcome);
}
}
/// Advances the state
pub(crate) fn poll(&mut self, cx: &mut Context<'_>) -> Poll<StateAction<N>> {
loop {
// drain buffered messages
if let Some(message) = self.queued_messages.pop_front() {
return Poll::Ready(message)
}
while let Poll::Ready(discovery) = self.discovery.poll(cx) {
self.on_discovery_event(discovery);
}
while let Poll::Ready(action) = self.state_fetcher.poll(cx) {
match action {
FetchAction::BlockRequest { peer_id, request } => {
self.handle_block_request(peer_id, request)
}
}
}
loop {
// need to buffer results here to make borrow checker happy
let mut closed_sessions = Vec::new();
let mut received_responses = Vec::new();
// poll all connected peers for responses
for (id, peer) in &mut self.active_peers {
let Some(mut response) = peer.pending_response.take() else { continue };
match response.poll(cx) {
Poll::Ready(res) => {
// check if the error is due to a closed channel to the session
if res.err().is_some_and(|err| err.is_channel_closed()) {
debug!(
target: "net",
?id,
"Request canceled, response channel from session closed."
);
// if the channel is closed, this means the peer session is also
// closed, in which case we can invoke the
// [Self::on_closed_session]
// immediately, preventing followup requests and propagate the
// connection dropped error
closed_sessions.push(*id);
} else {
received_responses.push((*id, res));
}
}
Poll::Pending => {
// not ready yet, store again.
peer.pending_response = Some(response);
}
};
}
for peer in closed_sessions {
self.on_session_closed(peer)
}
if received_responses.is_empty() {
break;
}
for (peer_id, resp) in received_responses {
self.on_eth_response(peer_id, resp);
}
}
// poll peer manager
while let Poll::Ready(action) = self.peers_manager.poll(cx) {
self.on_peer_action(action);
}
// We need to poll again in case we have received any responses because they may have
// triggered follow-up requests.
if self.queued_messages.is_empty() {
return Poll::Pending
}
}
}
}
/// Tracks the state of a Peer with an active Session.
///
/// For example known blocks,so we can decide what to announce.
#[derive(Debug)]
pub(crate) struct ActivePeer<N: NetworkPrimitives> {
/// Best block of the peer.
pub(crate) best_hash: B256,
/// The capabilities of the remote peer.
#[expect(dead_code)]
pub(crate) capabilities: Arc<Capabilities>,
/// A communication channel directly to the session task.
pub(crate) request_tx: PeerRequestSender<PeerRequest<N>>,
/// The response receiver for a currently active request to that peer.
pub(crate) pending_response: Option<PeerResponse<N>>,
/// Blocks we know the peer has.
pub(crate) blocks: LruCache<B256>,
}
/// Message variants triggered by the [`NetworkState`]
#[derive(Debug)]
pub(crate) enum StateAction<N: NetworkPrimitives> {
/// Dispatch a `NewBlock` message to the peer
NewBlock {
/// Target of the message
peer_id: PeerId,
/// The `NewBlock` message
block: NewBlockMessage<N::NewBlockPayload>,
},
NewBlockHashes {
/// Target of the message
peer_id: PeerId,
/// `NewBlockHashes` message to send to the peer.
hashes: NewBlockHashes,
},
/// Create a new connection to the given node.
Connect { remote_addr: SocketAddr, peer_id: PeerId },
/// Disconnect an existing connection
Disconnect {
peer_id: PeerId,
/// Why the disconnect was initiated
reason: Option<DisconnectReason>,
},
/// Retrieved a [`ForkId`] from the peer via ENR request, See <https://eips.ethereum.org/EIPS/eip-868>
DiscoveredEnrForkId {
peer_id: PeerId,
/// The reported [`ForkId`] by this peer.
fork_id: ForkId,
},
/// A new node was found through the discovery, possibly with a `ForkId`
DiscoveredNode { peer_id: PeerId, addr: PeerAddr, fork_id: Option<ForkId> },
/// A peer was added
PeerAdded(PeerId),
/// A peer was dropped
PeerRemoved(PeerId),
}
#[cfg(test)]
mod tests {
use crate::{
discovery::Discovery,
fetch::StateFetcher,
peers::PeersManager,
state::{BlockNumReader, NetworkState},
PeerRequest,
};
use alloy_consensus::Header;
use alloy_primitives::B256;
use reth_eth_wire::{BlockBodies, Capabilities, Capability, EthNetworkPrimitives, EthVersion};
use reth_ethereum_primitives::BlockBody;
use reth_network_api::PeerRequestSender;
use reth_network_p2p::{bodies::client::BodiesClient, error::RequestError};
use reth_network_peers::PeerId;
use reth_storage_api::noop::NoopProvider;
use std::{
future::poll_fn,
sync::{atomic::AtomicU64, Arc},
};
use tokio::sync::mpsc;
use tokio_stream::{wrappers::ReceiverStream, StreamExt};
/// Returns a testing instance of the [`NetworkState`].
fn state() -> NetworkState<EthNetworkPrimitives> {
let peers = PeersManager::default();
let handle = peers.handle();
NetworkState {
active_peers: Default::default(),
peers_manager: Default::default(),
queued_messages: Default::default(),
client: BlockNumReader(Box::new(NoopProvider::default())),
discovery: Discovery::noop(),
state_fetcher: StateFetcher::new(handle, Default::default()),
}
}
fn capabilities() -> Arc<Capabilities> {
Arc::new(vec![Capability::from(EthVersion::Eth67)].into())
}
// tests that ongoing requests are answered with connection dropped if the session that received
// that request is drops the request object.
#[tokio::test(flavor = "multi_thread")]
async fn test_dropped_active_session() {
let mut state = state();
let client = state.fetch_client();
let peer_id = PeerId::random();
let (tx, session_rx) = mpsc::channel(1);
let peer_tx = PeerRequestSender::new(peer_id, tx);
state.on_session_activated(
peer_id,
capabilities(),
Arc::default(),
peer_tx,
Arc::new(AtomicU64::new(1)),
None,
);
assert!(state.active_peers.contains_key(&peer_id));
let body = BlockBody { ommers: vec![Header::default()], ..Default::default() };
let body_response = body.clone();
// this mimics an active session that receives the requests from the state
tokio::task::spawn(async move {
let mut stream = ReceiverStream::new(session_rx);
let resp = stream.next().await.unwrap();
match resp {
PeerRequest::GetBlockBodies { response, .. } => {
response.send(Ok(BlockBodies(vec![body_response]))).unwrap();
}
_ => unreachable!(),
}
// wait for the next request, then drop
let _resp = stream.next().await.unwrap();
});
// spawn the state as future
tokio::task::spawn(async move {
loop {
poll_fn(|cx| state.poll(cx)).await;
}
});
// send requests to the state via the client
let (peer, bodies) = client.get_block_bodies(vec![B256::random()]).await.unwrap().split();
assert_eq!(peer, peer_id);
assert_eq!(bodies, vec![body]);
let resp = client.get_block_bodies(vec![B256::random()]).await;
assert!(resp.is_err());
assert_eq!(resp.unwrap_err(), RequestError::ConnectionDropped);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network/src/listener.rs | crates/net/network/src/listener.rs | //! Contains connection-oriented interfaces.
use futures::{ready, Stream, StreamExt};
use std::{
io,
net::SocketAddr,
pin::Pin,
task::{Context, Poll},
};
use tokio::net::{TcpListener, TcpStream};
/// A tcp connection listener.
///
/// Listens for incoming connections.
#[must_use = "Transport does nothing unless polled."]
#[derive(Debug)]
pub struct ConnectionListener {
/// Local address of the listener stream.
local_address: SocketAddr,
/// The active tcp listener for incoming connections.
incoming: TcpListenerStream,
}
impl ConnectionListener {
/// Creates a new [`TcpListener`] that listens for incoming connections.
pub async fn bind(addr: SocketAddr) -> io::Result<Self> {
let listener = TcpListener::bind(addr).await?;
let local_addr = listener.local_addr()?;
Ok(Self::new(listener, local_addr))
}
/// Creates a new connection listener stream.
pub(crate) const fn new(listener: TcpListener, local_address: SocketAddr) -> Self {
Self { local_address, incoming: TcpListenerStream { inner: listener } }
}
/// Polls the type to make progress.
pub fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<ListenerEvent> {
let this = self.get_mut();
match ready!(this.incoming.poll_next_unpin(cx)) {
Some(Ok((stream, remote_addr))) => {
if let Err(err) = stream.set_nodelay(true) {
tracing::warn!(target: "net", "set nodelay failed: {:?}", err);
}
Poll::Ready(ListenerEvent::Incoming { stream, remote_addr })
}
Some(Err(err)) => Poll::Ready(ListenerEvent::Error(err)),
None => {
Poll::Ready(ListenerEvent::ListenerClosed { local_address: this.local_address })
}
}
}
/// Returns the socket address this listener listens on.
pub const fn local_address(&self) -> SocketAddr {
self.local_address
}
}
/// Event type produced by the [`TcpListenerStream`].
pub enum ListenerEvent {
/// Received a new incoming.
Incoming {
/// Accepted connection
stream: TcpStream,
/// Address of the remote peer.
remote_addr: SocketAddr,
},
/// Returned when the underlying connection listener has been closed.
///
/// This is the case if the [`TcpListenerStream`] should ever return `None`
ListenerClosed {
/// Address of the closed listener.
local_address: SocketAddr,
},
/// Encountered an error when accepting a connection.
///
/// This is a non-fatal error as the listener continues to listen for new connections to
/// accept.
Error(io::Error),
}
/// A stream of incoming [`TcpStream`]s.
#[derive(Debug)]
struct TcpListenerStream {
/// listener for incoming connections.
inner: TcpListener,
}
impl Stream for TcpListenerStream {
type Item = io::Result<(TcpStream, SocketAddr)>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
match self.inner.poll_accept(cx) {
Poll::Ready(Ok(conn)) => Poll::Ready(Some(Ok(conn))),
Poll::Ready(Err(err)) => Poll::Ready(Some(Err(err))),
Poll::Pending => Poll::Pending,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::{
net::{Ipv4Addr, SocketAddrV4},
pin::pin,
};
use tokio::macros::support::poll_fn;
#[tokio::test(flavor = "multi_thread")]
async fn test_incoming_listener() {
let listener =
ConnectionListener::bind(SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, 0)))
.await
.unwrap();
let local_addr = listener.local_address();
tokio::task::spawn(async move {
let mut listener = pin!(listener);
match poll_fn(|cx| listener.as_mut().poll(cx)).await {
ListenerEvent::Incoming { .. } => {}
_ => {
panic!("unexpected event")
}
}
});
let _ = TcpStream::connect(local_addr).await.unwrap();
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network/src/error.rs | crates/net/network/src/error.rs | //! Possible errors when interacting with the network.
use crate::session::PendingSessionHandshakeError;
use reth_dns_discovery::resolver::ResolveError;
use reth_ecies::ECIESErrorImpl;
use reth_eth_wire::{
errors::{EthHandshakeError, EthStreamError, P2PHandshakeError, P2PStreamError},
DisconnectReason,
};
use reth_network_types::BackoffKind;
use std::{fmt, io, io::ErrorKind, net::SocketAddr};
/// Service kind.
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub enum ServiceKind {
/// Listener service.
Listener(SocketAddr),
/// Discovery service.
Discovery(SocketAddr),
}
impl ServiceKind {
/// Returns the appropriate flags for each variant.
pub const fn flags(&self) -> &'static str {
match self {
Self::Listener(_) => "--port",
Self::Discovery(_) => "--discovery.port",
}
}
}
impl fmt::Display for ServiceKind {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Listener(addr) => write!(f, "{addr} (listener service)"),
Self::Discovery(addr) => write!(f, "{addr} (discovery service)"),
}
}
}
/// All error variants for the network
#[derive(Debug, thiserror::Error)]
pub enum NetworkError {
/// General IO error.
#[error(transparent)]
Io(#[from] io::Error),
/// Error when an address is already in use.
#[error("address {kind} is already in use (os error 98). Choose a different port using {}", kind.flags())]
AddressAlreadyInUse {
/// Service kind.
kind: ServiceKind,
/// IO error.
error: io::Error,
},
/// IO error when creating the discovery service
#[error("failed to launch discovery service on {0}: {1}")]
Discovery(SocketAddr, io::Error),
/// An error occurred with discovery v5 node.
#[error("discv5 error, {0}")]
Discv5Error(#[from] reth_discv5::Error),
/// Error when setting up the DNS resolver failed
///
/// See also [`DnsResolver`](reth_dns_discovery::DnsResolver::from_system_conf)
#[error("failed to configure DNS resolver: {0}")]
DnsResolver(#[from] ResolveError),
}
impl NetworkError {
/// Converts a `std::io::Error` to a more descriptive `NetworkError`.
pub fn from_io_error(err: io::Error, kind: ServiceKind) -> Self {
match err.kind() {
ErrorKind::AddrInUse => Self::AddressAlreadyInUse { kind, error: err },
_ => {
if let ServiceKind::Discovery(address) = kind {
return Self::Discovery(address, err)
}
Self::Io(err)
}
}
}
}
/// Abstraction over errors that can lead to a failed session
#[auto_impl::auto_impl(&)]
pub(crate) trait SessionError: fmt::Debug + fmt::Display {
/// Returns true if the error indicates that the corresponding peer should be removed from peer
/// discovery, for example if it's using a different genesis hash.
fn merits_discovery_ban(&self) -> bool;
/// Returns true if the error indicates that we'll never be able to establish a connection to
/// that peer. For example, not matching capabilities or a mismatch in protocols.
///
/// Note: This does not necessarily mean that either of the peers are in violation of the
/// protocol but rather that they'll never be able to connect with each other. This check is
/// a superset of [`Self::merits_discovery_ban`] which checks if the peer should not be part
/// of the gossip network.
fn is_fatal_protocol_error(&self) -> bool;
/// Whether we should backoff.
///
/// Returns the severity of the backoff that should be applied, or `None`, if no backoff should
/// be applied.
///
/// In case of `Some(BackoffKind)` will temporarily prevent additional
/// connection attempts.
fn should_backoff(&self) -> Option<BackoffKind>;
}
impl SessionError for EthStreamError {
fn merits_discovery_ban(&self) -> bool {
match self {
Self::P2PStreamError(P2PStreamError::HandshakeError(
P2PHandshakeError::HelloNotInHandshake |
P2PHandshakeError::NonHelloMessageInHandshake,
)) => true,
Self::EthHandshakeError(err) => !matches!(err, EthHandshakeError::NoResponse),
_ => false,
}
}
fn is_fatal_protocol_error(&self) -> bool {
match self {
Self::P2PStreamError(err) => {
matches!(
err,
P2PStreamError::HandshakeError(
P2PHandshakeError::NoSharedCapabilities |
P2PHandshakeError::HelloNotInHandshake |
P2PHandshakeError::NonHelloMessageInHandshake |
P2PHandshakeError::Disconnected(
DisconnectReason::UselessPeer |
DisconnectReason::IncompatibleP2PProtocolVersion |
DisconnectReason::ProtocolBreach
)
) | P2PStreamError::UnknownReservedMessageId(_) |
P2PStreamError::EmptyProtocolMessage |
P2PStreamError::ParseSharedCapability(_) |
P2PStreamError::CapabilityNotShared |
P2PStreamError::Disconnected(
DisconnectReason::UselessPeer |
DisconnectReason::IncompatibleP2PProtocolVersion |
DisconnectReason::ProtocolBreach
) |
P2PStreamError::MismatchedProtocolVersion { .. }
)
}
Self::EthHandshakeError(err) => !matches!(err, EthHandshakeError::NoResponse),
_ => false,
}
}
fn should_backoff(&self) -> Option<BackoffKind> {
if let Some(err) = self.as_io() {
return err.should_backoff()
}
if let Some(err) = self.as_disconnected() {
return match err {
DisconnectReason::TooManyPeers |
DisconnectReason::AlreadyConnected |
DisconnectReason::PingTimeout |
DisconnectReason::DisconnectRequested |
DisconnectReason::TcpSubsystemError => Some(BackoffKind::Low),
DisconnectReason::ProtocolBreach |
DisconnectReason::UselessPeer |
DisconnectReason::IncompatibleP2PProtocolVersion |
DisconnectReason::NullNodeIdentity |
DisconnectReason::ClientQuitting |
DisconnectReason::UnexpectedHandshakeIdentity |
DisconnectReason::ConnectedToSelf |
DisconnectReason::SubprotocolSpecific => {
// These are considered fatal, and are handled by the
// [`SessionError::is_fatal_protocol_error`]
Some(BackoffKind::High)
}
}
}
// This only checks for a subset of error variants, the counterpart of
// [`SessionError::is_fatal_protocol_error`]
match self {
// timeouts
Self::EthHandshakeError(EthHandshakeError::NoResponse) |
Self::P2PStreamError(
P2PStreamError::HandshakeError(P2PHandshakeError::NoResponse) |
P2PStreamError::PingTimeout,
) => Some(BackoffKind::Low),
// malformed messages
Self::P2PStreamError(
P2PStreamError::Rlp(_) |
P2PStreamError::UnknownReservedMessageId(_) |
P2PStreamError::UnknownDisconnectReason(_) |
P2PStreamError::MessageTooBig { .. } |
P2PStreamError::EmptyProtocolMessage |
P2PStreamError::PingerError(_) |
P2PStreamError::Snap(_),
) => Some(BackoffKind::Medium),
_ => None,
}
}
}
impl SessionError for PendingSessionHandshakeError {
fn merits_discovery_ban(&self) -> bool {
match self {
Self::Eth(eth) => eth.merits_discovery_ban(),
Self::Ecies(err) => matches!(
err.inner(),
ECIESErrorImpl::TagCheckDecryptFailed |
ECIESErrorImpl::TagCheckHeaderFailed |
ECIESErrorImpl::TagCheckBodyFailed |
ECIESErrorImpl::InvalidAuthData |
ECIESErrorImpl::InvalidAckData |
ECIESErrorImpl::InvalidHeader |
ECIESErrorImpl::Secp256k1(_) |
ECIESErrorImpl::InvalidHandshake { .. }
),
Self::Timeout | Self::UnsupportedExtraCapability => false,
}
}
fn is_fatal_protocol_error(&self) -> bool {
match self {
Self::Eth(eth) => eth.is_fatal_protocol_error(),
Self::Ecies(err) => matches!(
err.inner(),
ECIESErrorImpl::TagCheckDecryptFailed |
ECIESErrorImpl::TagCheckHeaderFailed |
ECIESErrorImpl::TagCheckBodyFailed |
ECIESErrorImpl::InvalidAuthData |
ECIESErrorImpl::InvalidAckData |
ECIESErrorImpl::InvalidHeader |
ECIESErrorImpl::Secp256k1(_) |
ECIESErrorImpl::InvalidHandshake { .. }
),
Self::Timeout => false,
Self::UnsupportedExtraCapability => true,
}
}
fn should_backoff(&self) -> Option<BackoffKind> {
match self {
Self::Eth(eth) => eth.should_backoff(),
Self::Ecies(_) => Some(BackoffKind::Low),
Self::Timeout => Some(BackoffKind::Medium),
Self::UnsupportedExtraCapability => Some(BackoffKind::High),
}
}
}
impl SessionError for io::Error {
fn merits_discovery_ban(&self) -> bool {
false
}
fn is_fatal_protocol_error(&self) -> bool {
false
}
fn should_backoff(&self) -> Option<BackoffKind> {
match self.kind() {
// these usually happen when the remote instantly drops the connection, for example
// if the previous connection isn't properly cleaned up yet and the peer is temp.
// banned.
ErrorKind::ConnectionReset | ErrorKind::BrokenPipe => Some(BackoffKind::Low),
ErrorKind::ConnectionRefused => {
// peer is unreachable, e.g. port not open or down
Some(BackoffKind::High)
}
_ => Some(BackoffKind::Medium),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::net::{Ipv4Addr, SocketAddrV4};
#[test]
fn test_is_fatal_disconnect() {
let err = PendingSessionHandshakeError::Eth(EthStreamError::P2PStreamError(
P2PStreamError::HandshakeError(P2PHandshakeError::Disconnected(
DisconnectReason::UselessPeer,
)),
));
assert!(err.is_fatal_protocol_error());
}
#[test]
fn test_should_backoff() {
let err = EthStreamError::P2PStreamError(P2PStreamError::HandshakeError(
P2PHandshakeError::Disconnected(DisconnectReason::TooManyPeers),
));
assert_eq!(err.as_disconnected(), Some(DisconnectReason::TooManyPeers));
assert_eq!(err.should_backoff(), Some(BackoffKind::Low));
let err = EthStreamError::P2PStreamError(P2PStreamError::HandshakeError(
P2PHandshakeError::NoResponse,
));
assert_eq!(err.should_backoff(), Some(BackoffKind::Low));
}
#[test]
fn test_address_in_use_message() {
let addr = SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 1234));
let kinds = [ServiceKind::Discovery(addr), ServiceKind::Listener(addr)];
for kind in &kinds {
let err = NetworkError::AddressAlreadyInUse {
kind: *kind,
error: io::Error::from(ErrorKind::AddrInUse),
};
assert!(err.to_string().contains(kind.flags()));
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network/src/flattened_response.rs | crates/net/network/src/flattened_response.rs | use futures::Future;
use pin_project::pin_project;
use std::{
pin::Pin,
task::{Context, Poll},
};
use tokio::sync::oneshot::{error::RecvError, Receiver};
/// Flatten a [Receiver] message in order to get rid of the [`RecvError`] result
#[derive(Debug)]
#[pin_project]
pub struct FlattenedResponse<T> {
#[pin]
receiver: Receiver<T>,
}
impl<T, E> Future for FlattenedResponse<Result<T, E>>
where
E: From<RecvError>,
{
type Output = Result<T, E>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
this.receiver.poll(cx).map(|r| r.unwrap_or_else(|err| Err(err.into())))
}
}
impl<T> From<Receiver<T>> for FlattenedResponse<T> {
fn from(value: Receiver<T>) -> Self {
Self { receiver: value }
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network/src/peers.rs | crates/net/network/src/peers.rs | //! Peer related implementations
use crate::{
error::SessionError,
session::{Direction, PendingSessionHandshakeError},
swarm::NetworkConnectionState,
trusted_peers_resolver::TrustedPeersResolver,
};
use futures::StreamExt;
use reth_eth_wire::{errors::EthStreamError, DisconnectReason};
use reth_ethereum_forks::ForkId;
use reth_net_banlist::BanList;
use reth_network_api::test_utils::{PeerCommand, PeersHandle};
use reth_network_peers::{NodeRecord, PeerId};
use reth_network_types::{
is_connection_failed_reputation,
peers::{
config::PeerBackoffDurations,
reputation::{DEFAULT_REPUTATION, MAX_TRUSTED_PEER_REPUTATION_CHANGE},
},
ConnectionsConfig, Peer, PeerAddr, PeerConnectionState, PeerKind, PeersConfig,
ReputationChangeKind, ReputationChangeOutcome, ReputationChangeWeights,
};
use std::{
collections::{hash_map::Entry, HashMap, HashSet, VecDeque},
fmt::Display,
io::{self},
net::{IpAddr, SocketAddr},
task::{Context, Poll},
time::Duration,
};
use thiserror::Error;
use tokio::{
sync::mpsc,
time::{Instant, Interval},
};
use tokio_stream::wrappers::UnboundedReceiverStream;
use tracing::{trace, warn};
/// Maintains the state of _all_ the peers known to the network.
///
/// This is supposed to be owned by the network itself, but can be reached via the [`PeersHandle`].
/// From this type, connections to peers are established or disconnected, see [`PeerAction`].
///
/// The [`PeersManager`] will be notified on peer related changes
#[derive(Debug)]
pub struct PeersManager {
/// All peers known to the network
peers: HashMap<PeerId, Peer>,
/// The set of trusted peer ids.
///
/// This tracks peer ids that are considered trusted, but for which we don't necessarily have
/// an address: [`Self::add_trusted_peer_id`]
trusted_peer_ids: HashSet<PeerId>,
/// A resolver used to periodically resolve DNS names for trusted peers. This updates the
/// peer's address when the DNS records change.
trusted_peers_resolver: TrustedPeersResolver,
/// Copy of the sender half, so new [`PeersHandle`] can be created on demand.
manager_tx: mpsc::UnboundedSender<PeerCommand>,
/// Receiver half of the command channel.
handle_rx: UnboundedReceiverStream<PeerCommand>,
/// Buffered actions until the manager is polled.
queued_actions: VecDeque<PeerAction>,
/// Interval for triggering connections if there are free slots.
refill_slots_interval: Interval,
/// How to weigh reputation changes
reputation_weights: ReputationChangeWeights,
/// Tracks current slot stats.
connection_info: ConnectionInfo,
/// Tracks unwanted ips/peer ids.
ban_list: BanList,
/// Tracks currently backed off peers.
backed_off_peers: HashMap<PeerId, std::time::Instant>,
/// Interval at which to check for peers to unban and release from the backoff map.
release_interval: Interval,
/// How long to ban bad peers.
ban_duration: Duration,
/// How long peers to which we could not connect for non-fatal reasons, e.g.
/// [`DisconnectReason::TooManyPeers`], are put in time out.
backoff_durations: PeerBackoffDurations,
/// If non-trusted peers should be connected to, or the connection from non-trusted
/// incoming peers should be accepted.
trusted_nodes_only: bool,
/// Timestamp of the last time [`Self::tick`] was called.
last_tick: Instant,
/// Maximum number of backoff attempts before we give up on a peer and dropping.
max_backoff_count: u8,
/// Tracks the connection state of the node
net_connection_state: NetworkConnectionState,
/// How long to temporarily ban ip on an incoming connection attempt.
incoming_ip_throttle_duration: Duration,
}
impl PeersManager {
/// Create a new instance with the given config
pub fn new(config: PeersConfig) -> Self {
let PeersConfig {
refill_slots_interval,
connection_info,
reputation_weights,
ban_list,
ban_duration,
backoff_durations,
trusted_nodes,
trusted_nodes_only,
trusted_nodes_resolution_interval,
basic_nodes,
max_backoff_count,
incoming_ip_throttle_duration,
} = config;
let (manager_tx, handle_rx) = mpsc::unbounded_channel();
let now = Instant::now();
// We use half of the interval to decrease the max duration to `150%` in worst case
let unban_interval = ban_duration.min(backoff_durations.low) / 2;
let mut peers = HashMap::with_capacity(trusted_nodes.len() + basic_nodes.len());
let mut trusted_peer_ids = HashSet::with_capacity(trusted_nodes.len());
for trusted_peer in &trusted_nodes {
match trusted_peer.resolve_blocking() {
Ok(NodeRecord { address, tcp_port, udp_port, id }) => {
trusted_peer_ids.insert(id);
peers.entry(id).or_insert_with(|| {
Peer::trusted(PeerAddr::new_with_ports(address, tcp_port, Some(udp_port)))
});
}
Err(err) => {
warn!(target: "net::peers", ?err, "Failed to resolve trusted peer");
}
}
}
for NodeRecord { address, tcp_port, udp_port, id } in basic_nodes {
peers.entry(id).or_insert_with(|| {
Peer::new(PeerAddr::new_with_ports(address, tcp_port, Some(udp_port)))
});
}
Self {
peers,
trusted_peer_ids,
trusted_peers_resolver: TrustedPeersResolver::new(
trusted_nodes,
tokio::time::interval(trusted_nodes_resolution_interval), // 1 hour
),
manager_tx,
handle_rx: UnboundedReceiverStream::new(handle_rx),
queued_actions: Default::default(),
reputation_weights,
refill_slots_interval: tokio::time::interval(refill_slots_interval),
release_interval: tokio::time::interval_at(now + unban_interval, unban_interval),
connection_info: ConnectionInfo::new(connection_info),
ban_list,
backed_off_peers: Default::default(),
ban_duration,
backoff_durations,
trusted_nodes_only,
last_tick: Instant::now(),
max_backoff_count,
net_connection_state: NetworkConnectionState::default(),
incoming_ip_throttle_duration,
}
}
/// Returns a new [`PeersHandle`] that can send commands to this type.
pub(crate) fn handle(&self) -> PeersHandle {
PeersHandle::new(self.manager_tx.clone())
}
/// Returns the number of peers in the peer set
#[inline]
pub(crate) fn num_known_peers(&self) -> usize {
self.peers.len()
}
/// Returns an iterator over all peers
pub(crate) fn iter_peers(&self) -> impl Iterator<Item = NodeRecord> + '_ {
self.peers.iter().map(|(peer_id, v)| {
NodeRecord::new_with_ports(
v.addr.tcp().ip(),
v.addr.tcp().port(),
v.addr.udp().map(|addr| addr.port()),
*peer_id,
)
})
}
/// Returns the `NodeRecord` and `PeerKind` for the given peer id
pub(crate) fn peer_by_id(&self, peer_id: PeerId) -> Option<(NodeRecord, PeerKind)> {
self.peers.get(&peer_id).map(|v| {
(
NodeRecord::new_with_ports(
v.addr.tcp().ip(),
v.addr.tcp().port(),
v.addr.udp().map(|addr| addr.port()),
peer_id,
),
v.kind,
)
})
}
/// Returns an iterator over all peer ids for peers with the given kind
pub(crate) fn peers_by_kind(&self, kind: PeerKind) -> impl Iterator<Item = PeerId> + '_ {
self.peers.iter().filter_map(move |(peer_id, peer)| (peer.kind == kind).then_some(*peer_id))
}
/// Returns the number of currently active inbound connections.
#[inline]
pub(crate) const fn num_inbound_connections(&self) -> usize {
self.connection_info.num_inbound
}
/// Returns the number of currently __active__ outbound connections.
#[inline]
pub(crate) const fn num_outbound_connections(&self) -> usize {
self.connection_info.num_outbound
}
/// Returns the number of currently pending outbound connections.
#[inline]
pub(crate) const fn num_pending_outbound_connections(&self) -> usize {
self.connection_info.num_pending_out
}
/// Returns the number of currently backed off peers.
#[inline]
pub(crate) fn num_backed_off_peers(&self) -> usize {
self.backed_off_peers.len()
}
/// Returns the number of idle trusted peers.
fn num_idle_trusted_peers(&self) -> usize {
self.peers.iter().filter(|(_, peer)| peer.kind.is_trusted() && peer.state.is_idle()).count()
}
/// Invoked when a new _incoming_ tcp connection is accepted.
///
/// returns an error if the inbound ip address is on the ban list
pub(crate) fn on_incoming_pending_session(
&mut self,
addr: IpAddr,
) -> Result<(), InboundConnectionError> {
if self.ban_list.is_banned_ip(&addr) {
return Err(InboundConnectionError::IpBanned)
}
// check if we even have slots for a new incoming connection
if !self.connection_info.has_in_capacity() {
if self.trusted_peer_ids.is_empty() {
// if we don't have any incoming slots and no trusted peers, we don't accept any new
// connections
return Err(InboundConnectionError::ExceedsCapacity)
}
// there's an edge case here where no incoming connections besides from trusted peers
// are allowed (max_inbound == 0), in which case we still need to allow new pending
// incoming connections until all trusted peers are connected.
let num_idle_trusted_peers = self.num_idle_trusted_peers();
if num_idle_trusted_peers <= self.trusted_peer_ids.len() {
// we still want to limit concurrent pending connections
let max_inbound =
self.trusted_peer_ids.len().max(self.connection_info.config.max_inbound);
if self.connection_info.num_pending_in < max_inbound {
self.connection_info.inc_pending_in();
return Ok(())
}
}
// all trusted peers are either connected or connecting
return Err(InboundConnectionError::ExceedsCapacity)
}
// also cap the incoming connections we can process at once
if !self.connection_info.has_in_pending_capacity() {
return Err(InboundConnectionError::ExceedsCapacity)
}
// apply the rate limit
self.throttle_incoming_ip(addr);
self.connection_info.inc_pending_in();
Ok(())
}
/// Invoked when a previous call to [`Self::on_incoming_pending_session`] succeeded but it was
/// rejected.
pub(crate) const fn on_incoming_pending_session_rejected_internally(&mut self) {
self.connection_info.decr_pending_in();
}
/// Invoked when a pending session was closed.
pub(crate) const fn on_incoming_pending_session_gracefully_closed(&mut self) {
self.connection_info.decr_pending_in()
}
/// Invoked when a pending session was closed.
pub(crate) fn on_incoming_pending_session_dropped(
&mut self,
remote_addr: SocketAddr,
err: &PendingSessionHandshakeError,
) {
if err.is_fatal_protocol_error() {
self.ban_ip(remote_addr.ip());
if err.merits_discovery_ban() {
self.queued_actions
.push_back(PeerAction::DiscoveryBanIp { ip_addr: remote_addr.ip() })
}
}
self.connection_info.decr_pending_in();
}
/// Called when a new _incoming_ active session was established to the given peer.
///
/// This will update the state of the peer if not yet tracked.
///
/// If the reputation of the peer is below the `BANNED_REPUTATION` threshold, a disconnect will
/// be scheduled.
pub(crate) fn on_incoming_session_established(&mut self, peer_id: PeerId, addr: SocketAddr) {
self.connection_info.decr_pending_in();
// we only need to check the peer id here as the ip address will have been checked at
// on_incoming_pending_session. We also check if the peer is in the backoff list here.
if self.ban_list.is_banned_peer(&peer_id) {
self.queued_actions.push_back(PeerAction::DisconnectBannedIncoming { peer_id });
return
}
// check if the peer is trustable or not
let mut is_trusted = self.trusted_peer_ids.contains(&peer_id);
if self.trusted_nodes_only && !is_trusted {
self.queued_actions.push_back(PeerAction::DisconnectUntrustedIncoming { peer_id });
return
}
// start a new tick, so the peer is not immediately rewarded for the time since last tick
self.tick();
match self.peers.entry(peer_id) {
Entry::Occupied(mut entry) => {
let peer = entry.get_mut();
if peer.is_banned() {
self.queued_actions.push_back(PeerAction::DisconnectBannedIncoming { peer_id });
return
}
// it might be the case that we're also trying to connect to this peer at the same
// time, so we need to adjust the state here
if peer.state.is_pending_out() {
self.connection_info.decr_state(peer.state);
}
peer.state = PeerConnectionState::In;
is_trusted = is_trusted || peer.is_trusted();
}
Entry::Vacant(entry) => {
// peer is missing in the table, we add it but mark it as to be removed after
// disconnect, because we only know the outgoing port
let mut peer = Peer::with_state(PeerAddr::from_tcp(addr), PeerConnectionState::In);
peer.remove_after_disconnect = true;
entry.insert(peer);
self.queued_actions.push_back(PeerAction::PeerAdded(peer_id));
}
}
let has_in_capacity = self.connection_info.has_in_capacity();
// increment new incoming connection
self.connection_info.inc_in();
// disconnect the peer if we don't have capacity for more inbound connections
if !is_trusted && !has_in_capacity {
self.queued_actions.push_back(PeerAction::Disconnect {
peer_id,
reason: Some(DisconnectReason::TooManyPeers),
});
}
}
/// Bans the peer temporarily with the configured ban timeout
fn ban_peer(&mut self, peer_id: PeerId) {
let mut ban_duration = self.ban_duration;
if let Some(peer) = self.peers.get(&peer_id) {
if peer.is_trusted() || peer.is_static() {
// For misbehaving trusted or static peers, we provide a bit more leeway when
// penalizing them.
ban_duration = self.backoff_durations.low / 2;
}
}
self.ban_list.ban_peer_until(peer_id, std::time::Instant::now() + ban_duration);
self.queued_actions.push_back(PeerAction::BanPeer { peer_id });
}
/// Bans the IP temporarily with the configured ban timeout
fn ban_ip(&mut self, ip: IpAddr) {
self.ban_list.ban_ip_until(ip, std::time::Instant::now() + self.ban_duration);
}
/// Bans the IP temporarily to rate limit inbound connection attempts per IP.
fn throttle_incoming_ip(&mut self, ip: IpAddr) {
self.ban_list
.ban_ip_until(ip, std::time::Instant::now() + self.incoming_ip_throttle_duration);
}
/// Temporarily puts the peer in timeout by inserting it into the backedoff peers set
fn backoff_peer_until(&mut self, peer_id: PeerId, until: std::time::Instant) {
trace!(target: "net::peers", ?peer_id, "backing off");
if let Some(peer) = self.peers.get_mut(&peer_id) {
peer.backed_off = true;
self.backed_off_peers.insert(peer_id, until);
}
}
/// Unbans the peer
fn unban_peer(&mut self, peer_id: PeerId) {
self.ban_list.unban_peer(&peer_id);
self.queued_actions.push_back(PeerAction::UnBanPeer { peer_id });
}
/// Tick function to update reputation of all connected peers.
/// Peers are rewarded with reputation increases for the time they are connected since the last
/// tick. This is to prevent peers from being disconnected eventually due to slashed
/// reputation because of some bad messages (most likely transaction related)
fn tick(&mut self) {
let now = Instant::now();
// Determine the number of seconds since the last tick.
// Ensuring that now is always greater than last_tick to account for issues with system
// time.
let secs_since_last_tick =
if self.last_tick > now { 0 } else { (now - self.last_tick).as_secs() as i32 };
self.last_tick = now;
// update reputation via seconds connected
for peer in self.peers.iter_mut().filter(|(_, peer)| peer.state.is_connected()) {
// update reputation via seconds connected, but keep the target _around_ the default
// reputation.
if peer.1.reputation < DEFAULT_REPUTATION {
peer.1.reputation += secs_since_last_tick;
}
}
}
/// Returns the tracked reputation for a peer.
pub(crate) fn get_reputation(&self, peer_id: &PeerId) -> Option<i32> {
self.peers.get(peer_id).map(|peer| peer.reputation)
}
/// Apply the corresponding reputation change to the given peer.
///
/// If the peer is a trusted peer, it will be exempt from reputation slashing for certain
/// reputation changes that can be attributed to network conditions. If the peer is a
/// trusted peer, it will also be less strict with the reputation slashing.
pub(crate) fn apply_reputation_change(&mut self, peer_id: &PeerId, rep: ReputationChangeKind) {
let outcome = if let Some(peer) = self.peers.get_mut(peer_id) {
// First check if we should reset the reputation
if rep.is_reset() {
peer.reset_reputation()
} else {
let mut reputation_change = self.reputation_weights.change(rep).as_i32();
if peer.is_trusted() || peer.is_static() {
// exempt trusted and static peers from reputation slashing for
if matches!(
rep,
ReputationChangeKind::Dropped |
ReputationChangeKind::BadAnnouncement |
ReputationChangeKind::Timeout |
ReputationChangeKind::AlreadySeenTransaction
) {
return
}
// also be less strict with the reputation slashing for trusted peers
if reputation_change < MAX_TRUSTED_PEER_REPUTATION_CHANGE {
// this caps the reputation change to the maximum allowed for trusted peers
reputation_change = MAX_TRUSTED_PEER_REPUTATION_CHANGE;
}
}
peer.apply_reputation(reputation_change, rep)
}
} else {
return
};
match outcome {
ReputationChangeOutcome::None => {}
ReputationChangeOutcome::Ban => {
self.ban_peer(*peer_id);
}
ReputationChangeOutcome::Unban => self.unban_peer(*peer_id),
ReputationChangeOutcome::DisconnectAndBan => {
self.queued_actions.push_back(PeerAction::Disconnect {
peer_id: *peer_id,
reason: Some(DisconnectReason::DisconnectRequested),
});
self.ban_peer(*peer_id);
}
}
}
/// Gracefully disconnected a pending _outgoing_ session
pub(crate) fn on_outgoing_pending_session_gracefully_closed(&mut self, peer_id: &PeerId) {
if let Some(peer) = self.peers.get_mut(peer_id) {
self.connection_info.decr_state(peer.state);
peer.state = PeerConnectionState::Idle;
}
}
/// Invoked when an _outgoing_ pending session was closed during authentication or the
/// handshake.
pub(crate) fn on_outgoing_pending_session_dropped(
&mut self,
remote_addr: &SocketAddr,
peer_id: &PeerId,
err: &PendingSessionHandshakeError,
) {
self.on_connection_failure(remote_addr, peer_id, err, ReputationChangeKind::FailedToConnect)
}
/// Gracefully disconnected an active session
pub(crate) fn on_active_session_gracefully_closed(&mut self, peer_id: PeerId) {
match self.peers.entry(peer_id) {
Entry::Occupied(mut entry) => {
self.connection_info.decr_state(entry.get().state);
if entry.get().remove_after_disconnect && !entry.get().is_trusted() {
// this peer should be removed from the set
entry.remove();
self.queued_actions.push_back(PeerAction::PeerRemoved(peer_id));
} else {
// reset the peer's state
// we reset the backoff counter since we're able to establish a successful
// session to that peer
entry.get_mut().severe_backoff_counter = 0;
entry.get_mut().state = PeerConnectionState::Idle;
return
}
}
Entry::Vacant(_) => return,
}
self.fill_outbound_slots();
}
/// Called when a _pending_ outbound connection is successful.
pub(crate) fn on_active_outgoing_established(&mut self, peer_id: PeerId) {
if let Some(peer) = self.peers.get_mut(&peer_id) {
self.connection_info.decr_state(peer.state);
self.connection_info.inc_out();
peer.state = PeerConnectionState::Out;
}
}
/// Called when an _active_ session to a peer was forcefully dropped due to an error.
///
/// Depending on whether the error is fatal, the peer will be removed from the peer set
/// otherwise its reputation is slashed.
pub(crate) fn on_active_session_dropped(
&mut self,
remote_addr: &SocketAddr,
peer_id: &PeerId,
err: &EthStreamError,
) {
self.on_connection_failure(remote_addr, peer_id, err, ReputationChangeKind::Dropped)
}
/// Called when an attempt to create an _outgoing_ pending session failed while setting up a tcp
/// connection.
pub(crate) fn on_outgoing_connection_failure(
&mut self,
remote_addr: &SocketAddr,
peer_id: &PeerId,
err: &io::Error,
) {
// there's a race condition where we accepted an incoming connection while we were trying to
// connect to the same peer at the same time. if the outgoing connection failed
// after the incoming connection was accepted, we can ignore this error
if let Some(peer) = self.peers.get(peer_id) {
if peer.state.is_incoming() {
// we already have an active connection to the peer, so we can ignore this error
return
}
if peer.is_trusted() && is_connection_failed_reputation(peer.reputation) {
// trigger resolution task for trusted peer since multiple connection failures
// occurred
self.trusted_peers_resolver.interval.reset_immediately();
}
}
self.on_connection_failure(remote_addr, peer_id, err, ReputationChangeKind::FailedToConnect)
}
fn on_connection_failure(
&mut self,
remote_addr: &SocketAddr,
peer_id: &PeerId,
err: impl SessionError,
reputation_change: ReputationChangeKind,
) {
trace!(target: "net::peers", ?remote_addr, ?peer_id, %err, "handling failed connection");
if err.is_fatal_protocol_error() {
trace!(target: "net::peers", ?remote_addr, ?peer_id, %err, "fatal connection error");
// remove the peer to which we can't establish a connection due to protocol related
// issues.
if let Entry::Occupied(mut entry) = self.peers.entry(*peer_id) {
self.connection_info.decr_state(entry.get().state);
// only remove if the peer is not trusted
if entry.get().is_trusted() {
entry.get_mut().state = PeerConnectionState::Idle;
} else {
entry.remove();
self.queued_actions.push_back(PeerAction::PeerRemoved(*peer_id));
// If the error is caused by a peer that should be banned from discovery
if err.merits_discovery_ban() {
self.queued_actions.push_back(PeerAction::DiscoveryBanPeerId {
peer_id: *peer_id,
ip_addr: remote_addr.ip(),
})
}
}
}
// ban the peer
self.ban_peer(*peer_id);
} else {
let mut backoff_until = None;
let mut remove_peer = false;
if let Some(peer) = self.peers.get_mut(peer_id) {
if let Some(kind) = err.should_backoff() {
if peer.is_trusted() || peer.is_static() {
// provide a bit more leeway for trusted peers and use a lower backoff so
// that we keep re-trying them after backing off shortly
let backoff = self.backoff_durations.low / 2;
backoff_until = Some(std::time::Instant::now() + backoff);
} else {
// Increment peer.backoff_counter
if kind.is_severe() {
peer.severe_backoff_counter =
peer.severe_backoff_counter.saturating_add(1);
}
let backoff_time =
self.backoff_durations.backoff_until(kind, peer.severe_backoff_counter);
// The peer has signaled that it is currently unable to process any more
// connections, so we will hold off on attempting any new connections for a
// while
backoff_until = Some(backoff_time);
}
} else {
// If the error was not a backoff error, we reduce the peer's reputation
let reputation_change = self.reputation_weights.change(reputation_change);
peer.reputation = peer.reputation.saturating_add(reputation_change.as_i32());
};
self.connection_info.decr_state(peer.state);
peer.state = PeerConnectionState::Idle;
if peer.severe_backoff_counter > self.max_backoff_count &&
!peer.is_trusted() &&
!peer.is_static()
{
// mark peer for removal if it has been backoff too many times and is _not_
// trusted or static
remove_peer = true;
}
}
// remove peer if it has been marked for removal
if remove_peer {
let (peer_id, _) = self.peers.remove_entry(peer_id).expect("peer must exist");
self.queued_actions.push_back(PeerAction::PeerRemoved(peer_id));
} else if let Some(backoff_until) = backoff_until {
// otherwise, backoff the peer if marked as such
self.backoff_peer_until(*peer_id, backoff_until);
}
}
self.fill_outbound_slots();
}
/// Invoked if a pending session was disconnected because there's already a connection to the
/// peer.
///
/// If the session was an outgoing connection, this means that the peer initiated a connection
/// to us at the same time and this connection is already established.
pub(crate) const fn on_already_connected(&mut self, direction: Direction) {
match direction {
Direction::Incoming => {
// need to decrement the ingoing counter
self.connection_info.decr_pending_in();
}
Direction::Outgoing(_) => {
// cleanup is handled when the incoming active session is activated in
// `on_incoming_session_established`
}
}
}
/// Called as follow-up for a discovered peer.
///
/// The [`ForkId`] is retrieved from an ENR record that the peer announces over the discovery
/// protocol
pub(crate) fn set_discovered_fork_id(&mut self, peer_id: PeerId, fork_id: ForkId) {
if let Some(peer) = self.peers.get_mut(&peer_id) {
trace!(target: "net::peers", ?peer_id, ?fork_id, "set discovered fork id");
peer.fork_id = Some(fork_id);
}
}
/// Called for a newly discovered peer.
///
/// If the peer already exists, then the address, kind and `fork_id` will be updated.
pub(crate) fn add_peer(&mut self, peer_id: PeerId, addr: PeerAddr, fork_id: Option<ForkId>) {
self.add_peer_kind(peer_id, PeerKind::Basic, addr, fork_id)
}
/// Marks the given peer as trusted.
pub(crate) fn add_trusted_peer_id(&mut self, peer_id: PeerId) {
self.trusted_peer_ids.insert(peer_id);
}
/// Called for a newly discovered trusted peer.
///
/// If the peer already exists, then the address and kind will be updated.
#[cfg_attr(not(test), expect(dead_code))]
pub(crate) fn add_trusted_peer(&mut self, peer_id: PeerId, addr: PeerAddr) {
self.add_peer_kind(peer_id, PeerKind::Trusted, addr, None)
}
/// Called for a newly discovered peer.
///
/// If the peer already exists, then the address, kind and `fork_id` will be updated.
pub(crate) fn add_peer_kind(
&mut self,
peer_id: PeerId,
kind: PeerKind,
addr: PeerAddr,
fork_id: Option<ForkId>,
) {
if self.ban_list.is_banned(&peer_id, &addr.tcp().ip()) {
return
}
match self.peers.entry(peer_id) {
Entry::Occupied(mut entry) => {
let peer = entry.get_mut();
peer.kind = kind;
peer.fork_id = fork_id;
peer.addr = addr;
if peer.state.is_incoming() {
// now that we have an actual discovered address, for that peer and not just the
// ip of the incoming connection, we don't need to remove the peer after
// disconnecting, See `on_incoming_session_established`
peer.remove_after_disconnect = false;
}
}
Entry::Vacant(entry) => {
trace!(target: "net::peers", ?peer_id, addr=?addr.tcp(), "discovered new node");
let mut peer = Peer::with_kind(addr, kind);
peer.fork_id = fork_id;
entry.insert(peer);
self.queued_actions.push_back(PeerAction::PeerAdded(peer_id));
}
}
if kind.is_trusted() {
self.trusted_peer_ids.insert(peer_id);
}
}
/// Removes the tracked node from the set.
pub(crate) fn remove_peer(&mut self, peer_id: PeerId) {
let Entry::Occupied(entry) = self.peers.entry(peer_id) else { return };
if entry.get().is_trusted() {
return
}
let mut peer = entry.remove();
trace!(target: "net::peers", ?peer_id, "remove discovered node");
self.queued_actions.push_back(PeerAction::PeerRemoved(peer_id));
if peer.state.is_connected() {
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | true |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network/src/swarm.rs | crates/net/network/src/swarm.rs | use crate::{
listener::{ConnectionListener, ListenerEvent},
message::PeerMessage,
peers::InboundConnectionError,
protocol::IntoRlpxSubProtocol,
session::{Direction, PendingSessionHandshakeError, SessionEvent, SessionId, SessionManager},
state::{NetworkState, StateAction},
};
use futures::Stream;
use reth_eth_wire::{
errors::EthStreamError, Capabilities, DisconnectReason, EthNetworkPrimitives, EthVersion,
NetworkPrimitives, UnifiedStatus,
};
use reth_network_api::{PeerRequest, PeerRequestSender};
use reth_network_peers::PeerId;
use std::{
io,
net::SocketAddr,
pin::Pin,
sync::Arc,
task::{Context, Poll},
};
use tracing::{debug, trace};
#[cfg_attr(doc, aquamarine::aquamarine)]
/// Contains the connectivity related state of the network.
///
/// A swarm emits [`SwarmEvent`]s when polled.
///
/// It manages the [`ConnectionListener`] and delegates new incoming connections to the
/// [`SessionManager`]. Outgoing connections are either initiated on demand or triggered by the
/// [`NetworkState`] and also delegated to the [`NetworkState`].
///
/// Following diagram displays the dataflow contained in the [`Swarm`]
///
/// The [`ConnectionListener`] yields incoming [`TcpStream`]s from peers that are spawned as session
/// tasks. After a successful `RLPx` authentication, the task is ready to accept ETH requests or
/// broadcast messages. A task listens for messages from the [`SessionManager`] which include
/// broadcast messages like `Transactions` or internal commands, for example to disconnect the
/// session.
///
/// The [`NetworkState`] keeps track of all connected and discovered peers and can initiate outgoing
/// connections. For each active session, the [`NetworkState`] keeps a sender half of the ETH
/// request channel for the created session and sends requests it receives from the
/// [`StateFetcher`], which receives request objects from the client interfaces responsible for
/// downloading headers and bodies.
///
/// `include_mmd!("docs/mermaid/swarm.mmd`")
#[derive(Debug)]
#[must_use = "Swarm does nothing unless polled"]
pub(crate) struct Swarm<N: NetworkPrimitives = EthNetworkPrimitives> {
/// Listens for new incoming connections.
incoming: ConnectionListener,
/// All sessions.
sessions: SessionManager<N>,
/// Tracks the entire state of the network and handles events received from the sessions.
state: NetworkState<N>,
}
// === impl Swarm ===
impl<N: NetworkPrimitives> Swarm<N> {
/// Configures a new swarm instance.
pub(crate) const fn new(
incoming: ConnectionListener,
sessions: SessionManager<N>,
state: NetworkState<N>,
) -> Self {
Self { incoming, sessions, state }
}
/// Adds a protocol handler to the `RLPx` sub-protocol list.
pub(crate) fn add_rlpx_sub_protocol(&mut self, protocol: impl IntoRlpxSubProtocol) {
self.sessions_mut().add_rlpx_sub_protocol(protocol);
}
/// Access to the state.
pub(crate) const fn state(&self) -> &NetworkState<N> {
&self.state
}
/// Mutable access to the state.
pub(crate) const fn state_mut(&mut self) -> &mut NetworkState<N> {
&mut self.state
}
/// Access to the [`ConnectionListener`].
pub(crate) const fn listener(&self) -> &ConnectionListener {
&self.incoming
}
/// Access to the [`SessionManager`].
pub(crate) const fn sessions(&self) -> &SessionManager<N> {
&self.sessions
}
/// Mutable access to the [`SessionManager`].
pub(crate) const fn sessions_mut(&mut self) -> &mut SessionManager<N> {
&mut self.sessions
}
}
impl<N: NetworkPrimitives> Swarm<N> {
/// Triggers a new outgoing connection to the given node
pub(crate) fn dial_outbound(&mut self, remote_addr: SocketAddr, remote_id: PeerId) {
self.sessions.dial_outbound(remote_addr, remote_id)
}
/// Handles a polled [`SessionEvent`]
///
/// This either updates the state or produces a new [`SwarmEvent`] that is bubbled up to the
/// manager.
fn on_session_event(&mut self, event: SessionEvent<N>) -> Option<SwarmEvent<N>> {
match event {
SessionEvent::SessionEstablished {
peer_id,
remote_addr,
client_version,
capabilities,
version,
status,
messages,
direction,
timeout,
range_info,
} => {
self.state.on_session_activated(
peer_id,
capabilities.clone(),
status.clone(),
messages.clone(),
timeout,
range_info,
);
Some(SwarmEvent::SessionEstablished {
peer_id,
remote_addr,
client_version,
capabilities,
version,
messages,
status,
direction,
})
}
SessionEvent::AlreadyConnected { peer_id, remote_addr, direction } => {
trace!(target: "net", ?peer_id, ?remote_addr, ?direction, "already connected");
self.state.peers_mut().on_already_connected(direction);
None
}
SessionEvent::ValidMessage { peer_id, message } => {
Some(SwarmEvent::ValidMessage { peer_id, message })
}
SessionEvent::IncomingPendingSessionClosed { remote_addr, error } => {
Some(SwarmEvent::IncomingPendingSessionClosed { remote_addr, error })
}
SessionEvent::OutgoingPendingSessionClosed { remote_addr, peer_id, error } => {
Some(SwarmEvent::OutgoingPendingSessionClosed { remote_addr, peer_id, error })
}
SessionEvent::Disconnected { peer_id, remote_addr } => {
self.state.on_session_closed(peer_id);
Some(SwarmEvent::SessionClosed { peer_id, remote_addr, error: None })
}
SessionEvent::SessionClosedOnConnectionError { peer_id, remote_addr, error } => {
self.state.on_session_closed(peer_id);
Some(SwarmEvent::SessionClosed { peer_id, remote_addr, error: Some(error) })
}
SessionEvent::OutgoingConnectionError { remote_addr, peer_id, error } => {
Some(SwarmEvent::OutgoingConnectionError { peer_id, remote_addr, error })
}
SessionEvent::BadMessage { peer_id } => Some(SwarmEvent::BadMessage { peer_id }),
SessionEvent::ProtocolBreach { peer_id } => {
Some(SwarmEvent::ProtocolBreach { peer_id })
}
}
}
/// Callback for events produced by [`ConnectionListener`].
///
/// Depending on the event, this will produce a new [`SwarmEvent`].
fn on_connection(&mut self, event: ListenerEvent) -> Option<SwarmEvent<N>> {
match event {
ListenerEvent::Error(err) => return Some(SwarmEvent::TcpListenerError(err)),
ListenerEvent::ListenerClosed { local_address: address } => {
return Some(SwarmEvent::TcpListenerClosed { remote_addr: address })
}
ListenerEvent::Incoming { stream, remote_addr } => {
// Reject incoming connection if node is shutting down.
if self.is_shutting_down() {
return None
}
// ensure we can handle an incoming connection from this address
if let Err(err) =
self.state_mut().peers_mut().on_incoming_pending_session(remote_addr.ip())
{
match err {
InboundConnectionError::IpBanned => {
trace!(target: "net", ?remote_addr, "The incoming ip address is in the ban list");
}
InboundConnectionError::ExceedsCapacity => {
trace!(target: "net", ?remote_addr, "No capacity for incoming connection");
self.sessions.try_disconnect_incoming_connection(
stream,
DisconnectReason::TooManyPeers,
);
}
}
return None
}
match self.sessions.on_incoming(stream, remote_addr) {
Ok(session_id) => {
trace!(target: "net", ?remote_addr, "Incoming connection");
return Some(SwarmEvent::IncomingTcpConnection { session_id, remote_addr })
}
Err(err) => {
trace!(target: "net", %err, "Incoming connection rejected, capacity already reached.");
self.state_mut()
.peers_mut()
.on_incoming_pending_session_rejected_internally();
}
}
}
}
None
}
/// Hook for actions pulled from the state
fn on_state_action(&mut self, event: StateAction<N>) -> Option<SwarmEvent<N>> {
match event {
StateAction::Connect { remote_addr, peer_id } => {
self.dial_outbound(remote_addr, peer_id);
return Some(SwarmEvent::OutgoingTcpConnection { remote_addr, peer_id })
}
StateAction::Disconnect { peer_id, reason } => {
self.sessions.disconnect(peer_id, reason);
}
StateAction::NewBlock { peer_id, block: msg } => {
let msg = PeerMessage::NewBlock(msg);
self.sessions.send_message(&peer_id, msg);
}
StateAction::NewBlockHashes { peer_id, hashes } => {
let msg = PeerMessage::NewBlockHashes(hashes);
self.sessions.send_message(&peer_id, msg);
}
StateAction::PeerAdded(peer_id) => return Some(SwarmEvent::PeerAdded(peer_id)),
StateAction::PeerRemoved(peer_id) => return Some(SwarmEvent::PeerRemoved(peer_id)),
StateAction::DiscoveredNode { peer_id, addr, fork_id } => {
// Don't try to connect to peer if node is shutting down
if self.is_shutting_down() {
return None
}
// Insert peer only if no fork id or a valid fork id
if fork_id.map_or_else(|| true, |f| self.sessions.is_valid_fork_id(f)) {
self.state_mut().peers_mut().add_peer(peer_id, addr, fork_id);
}
}
StateAction::DiscoveredEnrForkId { peer_id, fork_id } => {
if self.sessions.is_valid_fork_id(fork_id) {
self.state_mut().peers_mut().set_discovered_fork_id(peer_id, fork_id);
} else {
debug!(target: "net", ?peer_id, remote_fork_id=?fork_id, our_fork_id=?self.sessions.fork_id(), "fork id mismatch, removing peer");
self.state_mut().peers_mut().remove_peer(peer_id);
}
}
}
None
}
/// Set network connection state to `ShuttingDown`
pub(crate) const fn on_shutdown_requested(&mut self) {
self.state_mut().peers_mut().on_shutdown();
}
/// Checks if the node's network connection state is '`ShuttingDown`'
#[inline]
pub(crate) const fn is_shutting_down(&self) -> bool {
self.state().peers().connection_state().is_shutting_down()
}
/// Set network connection state to `Hibernate` or `Active`
pub(crate) const fn on_network_state_change(&mut self, network_state: NetworkConnectionState) {
self.state_mut().peers_mut().on_network_state_change(network_state);
}
}
impl<N: NetworkPrimitives> Stream for Swarm<N> {
type Item = SwarmEvent<N>;
/// This advances all components.
///
/// Processes, delegates (internal) commands received from the
/// [`NetworkManager`](crate::NetworkManager), then polls the [`SessionManager`] which
/// yields messages produced by individual peer sessions that are then handled. Least
/// priority are incoming connections that are handled and delegated to
/// the [`SessionManager`] to turn them into a session.
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = self.get_mut();
// This loop advances the network's state prioritizing local work [NetworkState] over work
// coming in from the network [SessionManager], [ConnectionListener]
// Existing connections are prioritized over new __incoming__ connections
loop {
while let Poll::Ready(action) = this.state.poll(cx) {
if let Some(event) = this.on_state_action(action) {
return Poll::Ready(Some(event))
}
}
// poll all sessions
match this.sessions.poll(cx) {
Poll::Pending => {}
Poll::Ready(event) => {
if let Some(event) = this.on_session_event(event) {
return Poll::Ready(Some(event))
}
continue
}
}
// poll listener for incoming connections
match Pin::new(&mut this.incoming).poll(cx) {
Poll::Pending => {}
Poll::Ready(event) => {
if let Some(event) = this.on_connection(event) {
return Poll::Ready(Some(event))
}
continue
}
}
return Poll::Pending
}
}
}
/// All events created or delegated by the [`Swarm`] that represents changes to the state of the
/// network.
pub(crate) enum SwarmEvent<N: NetworkPrimitives = EthNetworkPrimitives> {
/// Events related to the actual network protocol.
ValidMessage {
/// The peer that sent the message
peer_id: PeerId,
/// Message received from the peer
message: PeerMessage<N>,
},
/// Received a bad message from the peer.
BadMessage {
/// Identifier of the remote peer.
peer_id: PeerId,
},
/// Remote peer is considered in protocol violation
ProtocolBreach {
/// Identifier of the remote peer.
peer_id: PeerId,
},
/// The underlying tcp listener closed.
TcpListenerClosed {
/// Address of the closed listener.
remote_addr: SocketAddr,
},
/// The underlying tcp listener encountered an error that we bubble up.
TcpListenerError(io::Error),
/// Received an incoming tcp connection.
///
/// This represents the first step in the session authentication process. The swarm will
/// produce subsequent events once the stream has been authenticated, or was rejected.
IncomingTcpConnection {
/// The internal session identifier under which this connection is currently tracked.
session_id: SessionId,
/// Address of the remote peer.
remote_addr: SocketAddr,
},
/// An outbound connection is initiated.
OutgoingTcpConnection {
/// Address of the remote peer.
peer_id: PeerId,
remote_addr: SocketAddr,
},
SessionEstablished {
peer_id: PeerId,
remote_addr: SocketAddr,
client_version: Arc<str>,
capabilities: Arc<Capabilities>,
/// negotiated eth version
version: EthVersion,
messages: PeerRequestSender<PeerRequest<N>>,
status: Arc<UnifiedStatus>,
direction: Direction,
},
SessionClosed {
peer_id: PeerId,
remote_addr: SocketAddr,
/// Whether the session was closed due to an error
error: Option<EthStreamError>,
},
/// Admin rpc: new peer added
PeerAdded(PeerId),
/// Admin rpc: peer removed
PeerRemoved(PeerId),
/// Closed an incoming pending session during authentication.
IncomingPendingSessionClosed {
remote_addr: SocketAddr,
error: Option<PendingSessionHandshakeError>,
},
/// Closed an outgoing pending session during authentication.
OutgoingPendingSessionClosed {
remote_addr: SocketAddr,
peer_id: PeerId,
error: Option<PendingSessionHandshakeError>,
},
/// Failed to establish a tcp stream to the given address/node
OutgoingConnectionError { remote_addr: SocketAddr, peer_id: PeerId, error: io::Error },
}
/// Represents the state of the connection of the node. If shutting down,
/// new connections won't be established.
/// When in hibernation mode, the node will not initiate new outbound connections. This is
/// beneficial for sync stages that do not require a network connection.
#[derive(Debug, Default)]
pub enum NetworkConnectionState {
/// Node is active, new outbound connections will be established.
#[default]
Active,
/// Node is shutting down, no new outbound connections will be established.
ShuttingDown,
/// Hibernate Network connection, no new outbound connections will be established.
Hibernate,
}
impl NetworkConnectionState {
/// Returns true if the node is active.
pub(crate) const fn is_active(&self) -> bool {
matches!(self, Self::Active)
}
/// Returns true if the node is shutting down.
pub(crate) const fn is_shutting_down(&self) -> bool {
matches!(self, Self::ShuttingDown)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network/src/protocol.rs | crates/net/network/src/protocol.rs | //! Support for handling additional RLPx-based application-level protocols.
//!
//! See also <https://github.com/ethereum/devp2p/blob/master/README.md>
use alloy_primitives::bytes::BytesMut;
use futures::Stream;
use reth_eth_wire::{
capability::SharedCapabilities, multiplex::ProtocolConnection, protocol::Protocol,
};
use reth_network_api::{Direction, PeerId};
use std::{
fmt,
net::SocketAddr,
ops::{Deref, DerefMut},
pin::Pin,
};
/// A trait that allows to offer additional RLPx-based application-level protocols when establishing
/// a peer-to-peer connection.
pub trait ProtocolHandler: fmt::Debug + Send + Sync + 'static {
/// The type responsible for negotiating the protocol with the remote.
type ConnectionHandler: ConnectionHandler;
/// Invoked when a new incoming connection from the remote is requested
///
/// If protocols for this outgoing should be announced to the remote, return a connection
/// handler.
fn on_incoming(&self, socket_addr: SocketAddr) -> Option<Self::ConnectionHandler>;
/// Invoked when a new outgoing connection to the remote is requested.
///
/// If protocols for this outgoing should be announced to the remote, return a connection
/// handler.
fn on_outgoing(
&self,
socket_addr: SocketAddr,
peer_id: PeerId,
) -> Option<Self::ConnectionHandler>;
}
/// A trait that allows to authenticate a protocol after the `RLPx` connection was established.
pub trait ConnectionHandler: Send + Sync + 'static {
/// The connection that yields messages to send to the remote.
///
/// The connection will be closed when this stream resolves.
type Connection: Stream<Item = BytesMut> + Send + 'static;
/// Returns the protocol to announce when the `RLPx` connection will be established.
///
/// This will be negotiated with the remote peer.
fn protocol(&self) -> Protocol;
/// Invoked when the `RLPx` connection has been established by the peer does not share the
/// protocol.
fn on_unsupported_by_peer(
self,
supported: &SharedCapabilities,
direction: Direction,
peer_id: PeerId,
) -> OnNotSupported;
/// Invoked when the `RLPx` connection was established.
///
/// The returned future should resolve when the connection should disconnect.
fn into_connection(
self,
direction: Direction,
peer_id: PeerId,
conn: ProtocolConnection,
) -> Self::Connection;
}
/// What to do when a protocol is not supported by the remote.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
pub enum OnNotSupported {
/// Proceed with the connection and ignore the protocol.
#[default]
KeepAlive,
/// Disconnect the connection.
Disconnect,
}
/// A wrapper type for a `RLPx` sub-protocol.
#[derive(Debug)]
pub struct RlpxSubProtocol(Box<dyn DynProtocolHandler>);
/// A helper trait to convert a [`ProtocolHandler`] into a dynamic type
pub trait IntoRlpxSubProtocol {
/// Converts the type into a [`RlpxSubProtocol`].
fn into_rlpx_sub_protocol(self) -> RlpxSubProtocol;
}
impl<T> IntoRlpxSubProtocol for T
where
T: ProtocolHandler + Send + Sync + 'static,
{
fn into_rlpx_sub_protocol(self) -> RlpxSubProtocol {
RlpxSubProtocol(Box::new(self))
}
}
impl IntoRlpxSubProtocol for RlpxSubProtocol {
fn into_rlpx_sub_protocol(self) -> RlpxSubProtocol {
self
}
}
/// Additional RLPx-based sub-protocols.
#[derive(Debug, Default)]
pub struct RlpxSubProtocols {
/// All extra protocols
protocols: Vec<RlpxSubProtocol>,
}
impl RlpxSubProtocols {
/// Adds a new protocol.
pub fn push(&mut self, protocol: impl IntoRlpxSubProtocol) {
self.protocols.push(protocol.into_rlpx_sub_protocol());
}
/// Returns all additional protocol handlers that should be announced to the remote during the
/// Rlpx handshake on an incoming connection.
pub(crate) fn on_incoming(&self, socket_addr: SocketAddr) -> RlpxSubProtocolHandlers {
RlpxSubProtocolHandlers(
self.protocols
.iter()
.filter_map(|protocol| protocol.0.on_incoming(socket_addr))
.collect(),
)
}
/// Returns all additional protocol handlers that should be announced to the remote during the
/// Rlpx handshake on an outgoing connection.
pub(crate) fn on_outgoing(
&self,
socket_addr: SocketAddr,
peer_id: PeerId,
) -> RlpxSubProtocolHandlers {
RlpxSubProtocolHandlers(
self.protocols
.iter()
.filter_map(|protocol| protocol.0.on_outgoing(socket_addr, peer_id))
.collect(),
)
}
}
/// A set of additional RLPx-based sub-protocol connection handlers.
#[derive(Default)]
pub(crate) struct RlpxSubProtocolHandlers(pub(crate) Vec<Box<dyn DynConnectionHandler>>);
impl RlpxSubProtocolHandlers {
/// Returns all handlers.
pub(crate) fn into_iter(self) -> impl Iterator<Item = Box<dyn DynConnectionHandler>> {
self.0.into_iter()
}
}
impl Deref for RlpxSubProtocolHandlers {
type Target = Vec<Box<dyn DynConnectionHandler>>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for RlpxSubProtocolHandlers {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
pub(crate) trait DynProtocolHandler: fmt::Debug + Send + Sync + 'static {
fn on_incoming(&self, socket_addr: SocketAddr) -> Option<Box<dyn DynConnectionHandler>>;
fn on_outgoing(
&self,
socket_addr: SocketAddr,
peer_id: PeerId,
) -> Option<Box<dyn DynConnectionHandler>>;
}
impl<T: ProtocolHandler> DynProtocolHandler for T {
fn on_incoming(&self, socket_addr: SocketAddr) -> Option<Box<dyn DynConnectionHandler>> {
T::on_incoming(self, socket_addr)
.map(|handler| Box::new(handler) as Box<dyn DynConnectionHandler>)
}
fn on_outgoing(
&self,
socket_addr: SocketAddr,
peer_id: PeerId,
) -> Option<Box<dyn DynConnectionHandler>> {
T::on_outgoing(self, socket_addr, peer_id)
.map(|handler| Box::new(handler) as Box<dyn DynConnectionHandler>)
}
}
/// Wrapper trait for internal ease of use.
pub(crate) trait DynConnectionHandler: Send + Sync + 'static {
fn protocol(&self) -> Protocol;
fn on_unsupported_by_peer(
self: Box<Self>,
supported: &SharedCapabilities,
direction: Direction,
peer_id: PeerId,
) -> OnNotSupported;
fn into_connection(
self: Box<Self>,
direction: Direction,
peer_id: PeerId,
conn: ProtocolConnection,
) -> Pin<Box<dyn Stream<Item = BytesMut> + Send + 'static>>;
}
impl<T: ConnectionHandler> DynConnectionHandler for T {
fn protocol(&self) -> Protocol {
T::protocol(self)
}
fn on_unsupported_by_peer(
self: Box<Self>,
supported: &SharedCapabilities,
direction: Direction,
peer_id: PeerId,
) -> OnNotSupported {
T::on_unsupported_by_peer(*self, supported, direction, peer_id)
}
fn into_connection(
self: Box<Self>,
direction: Direction,
peer_id: PeerId,
conn: ProtocolConnection,
) -> Pin<Box<dyn Stream<Item = BytesMut> + Send + 'static>> {
Box::pin(T::into_connection(*self, direction, peer_id, conn))
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network/src/metrics.rs | crates/net/network/src/metrics.rs | use metrics::Histogram;
use reth_eth_wire::DisconnectReason;
use reth_ethereum_primitives::TxType;
use reth_metrics::{
metrics::{Counter, Gauge},
Metrics,
};
/// Scope for monitoring transactions sent from the manager to the tx manager
pub(crate) const NETWORK_POOL_TRANSACTIONS_SCOPE: &str = "network.pool.transactions";
/// Metrics for the entire network, handled by `NetworkManager`
#[derive(Metrics)]
#[metrics(scope = "network")]
pub struct NetworkMetrics {
/// Number of currently connected peers
pub(crate) connected_peers: Gauge,
/// Number of currently backed off peers
pub(crate) backed_off_peers: Gauge,
/// Number of peers known to the node
pub(crate) tracked_peers: Gauge,
/// Cumulative number of failures of pending sessions
pub(crate) pending_session_failures: Counter,
/// Total number of sessions closed
pub(crate) closed_sessions: Counter,
/// Number of active incoming connections
pub(crate) incoming_connections: Gauge,
/// Number of active outgoing connections
pub(crate) outgoing_connections: Gauge,
/// Number of currently pending outgoing connections
pub(crate) pending_outgoing_connections: Gauge,
/// Total number of pending connections, incoming and outgoing.
pub(crate) total_pending_connections: Gauge,
/// Total Number of incoming connections handled
pub(crate) total_incoming_connections: Counter,
/// Total Number of outgoing connections established
pub(crate) total_outgoing_connections: Counter,
/// Number of invalid/malformed messages received from peers
pub(crate) invalid_messages_received: Counter,
/// Number of Eth Requests dropped due to channel being at full capacity
pub(crate) total_dropped_eth_requests_at_full_capacity: Counter,
/* ================ POLL DURATION ================ */
/* -- Total poll duration of `NetworksManager` future -- */
/// Duration in seconds of call to
/// [`NetworkManager`](crate::NetworkManager)'s poll function.
///
/// True duration of this call, should be sum of the accumulated durations of calling nested
// items.
pub(crate) duration_poll_network_manager: Gauge,
/* -- Poll duration of items nested in `NetworkManager` future -- */
/// Time spent streaming messages sent over the [`NetworkHandle`](crate::NetworkHandle), which
/// can be cloned and shared via [`NetworkManager::handle`](crate::NetworkManager::handle), in
/// one call to poll the [`NetworkManager`](crate::NetworkManager) future. At least
/// [`TransactionsManager`](crate::transactions::TransactionsManager) holds this handle.
///
/// Duration in seconds.
pub(crate) acc_duration_poll_network_handle: Gauge,
/// Time spent polling [`Swarm`](crate::swarm::Swarm), in one call to poll the
/// [`NetworkManager`](crate::NetworkManager) future.
///
/// Duration in seconds.
pub(crate) acc_duration_poll_swarm: Gauge,
}
/// Metrics for `SessionManager`
#[derive(Metrics)]
#[metrics(scope = "network")]
pub struct SessionManagerMetrics {
/// Number of successful outgoing dial attempts.
pub(crate) total_dial_successes: Counter,
/// Number of dropped outgoing peer messages.
pub(crate) total_outgoing_peer_messages_dropped: Counter,
/// Number of queued outgoing messages
pub(crate) queued_outgoing_messages: Gauge,
}
/// Metrics for the [`TransactionsManager`](crate::transactions::TransactionsManager).
#[derive(Metrics)]
#[metrics(scope = "network")]
pub struct TransactionsManagerMetrics {
/* ================ BROADCAST ================ */
/// Total number of propagated transactions
pub(crate) propagated_transactions: Counter,
/// Total number of reported bad transactions
pub(crate) reported_bad_transactions: Counter,
/* -- Freq txns already marked as seen by peer -- */
/// Total number of messages from a peer, announcing transactions that have already been
/// marked as seen by that peer.
pub(crate) messages_with_hashes_already_seen_by_peer: Counter,
/// Total number of messages from a peer, with transaction that have already been marked as
/// seen by that peer.
pub(crate) messages_with_transactions_already_seen_by_peer: Counter,
/// Total number of occurrences, of a peer announcing a transaction that has already been
/// marked as seen by that peer.
pub(crate) occurrences_hash_already_seen_by_peer: Counter,
/// Total number of times a transaction is seen from a peer, that has already been marked as
/// seen by that peer.
pub(crate) occurrences_of_transaction_already_seen_by_peer: Counter,
/* -- Freq txns already in pool -- */
/// Total number of times a hash is announced that is already in the local pool.
pub(crate) occurrences_hashes_already_in_pool: Counter,
/// Total number of times a transaction is sent that is already in the local pool.
pub(crate) occurrences_transactions_already_in_pool: Counter,
/* ================ POOL IMPORTS ================ */
/// Number of transactions about to be imported into the pool.
pub(crate) pending_pool_imports: Gauge,
/// Total number of bad imports, imports that fail because the transaction is badly formed
/// (i.e. have no chance of passing validation, unlike imports that fail due to e.g. nonce
/// gaps).
pub(crate) bad_imports: Counter,
/// Number of inflight requests at which the
/// [`TransactionPool`](reth_transaction_pool::TransactionPool) is considered to be at
/// capacity. Note, this is not a limit to the number of inflight requests, but a health
/// measure.
pub(crate) capacity_pending_pool_imports: Counter,
/* ================ POLL DURATION ================ */
/* -- Total poll duration of `TransactionsManager` future -- */
/// Duration in seconds of call to
/// [`TransactionsManager`](crate::transactions::TransactionsManager)'s poll function.
///
/// Updating metrics could take time, so the true duration of this call could
/// be longer than the sum of the accumulated durations of polling nested items.
pub(crate) duration_poll_tx_manager: Gauge,
/* -- Poll duration of items nested in `TransactionsManager` future -- */
/// Accumulated time spent streaming session updates and updating peers accordingly, in
/// one call to poll the [`TransactionsManager`](crate::transactions::TransactionsManager)
/// future.
///
/// Duration in seconds.
pub(crate) acc_duration_poll_network_events: Gauge,
/// Accumulated time spent flushing the queue of batched pending pool imports into pool, in
/// one call to poll the [`TransactionsManager`](crate::transactions::TransactionsManager)
/// future.
///
/// Duration in seconds.
pub(crate) acc_duration_poll_pending_pool_imports: Gauge,
/// Accumulated time spent streaming transaction and announcement broadcast, queueing for
/// pool import or requesting respectively, in one call to poll the
/// [`TransactionsManager`](crate::transactions::TransactionsManager) future.
///
/// Duration in seconds.
pub(crate) acc_duration_poll_transaction_events: Gauge,
/// Accumulated time spent streaming fetch events, queueing for pool import on successful
/// fetch, in one call to poll the
/// [`TransactionsManager`](crate::transactions::TransactionsManager) future.
///
/// Duration in seconds.
pub(crate) acc_duration_poll_fetch_events: Gauge,
/// Accumulated time spent streaming and propagating transactions that were successfully
/// imported into the pool, in one call to poll the
/// [`TransactionsManager`](crate::transactions::TransactionsManager) future.
///
/// Duration in seconds.
pub(crate) acc_duration_poll_imported_transactions: Gauge,
/// Accumulated time spent assembling and sending requests for hashes fetching pending, in
/// one call to poll the [`TransactionsManager`](crate::transactions::TransactionsManager)
/// future.
///
/// Duration in seconds.
pub(crate) acc_duration_fetch_pending_hashes: Gauge,
/// Accumulated time spent streaming commands and propagating, fetching and serving
/// transactions accordingly, in one call to poll the
/// [`TransactionsManager`](crate::transactions::TransactionsManager) future.
///
/// Duration in seconds.
pub(crate) acc_duration_poll_commands: Gauge,
}
/// Metrics for the [`TransactionsManager`](crate::transactions::TransactionsManager).
#[derive(Metrics)]
#[metrics(scope = "network")]
pub struct TransactionFetcherMetrics {
/// Currently active outgoing [`GetPooledTransactions`](reth_eth_wire::GetPooledTransactions)
/// requests.
pub(crate) inflight_transaction_requests: Gauge,
/// Number of inflight requests at which the
/// [`TransactionFetcher`](crate::transactions::TransactionFetcher) is considered to be at
/// capacity. Note, this is not a limit to the number of inflight requests, but a health
/// measure.
pub(crate) capacity_inflight_requests: Counter,
/// Hashes in currently active outgoing
/// [`GetPooledTransactions`](reth_eth_wire::GetPooledTransactions) requests.
pub(crate) hashes_inflight_transaction_requests: Gauge,
/// How often we failed to send a request to the peer because the channel was full.
pub(crate) egress_peer_channel_full: Counter,
/// Total number of hashes pending fetch.
pub(crate) hashes_pending_fetch: Gauge,
/// Total number of fetched transactions.
pub(crate) fetched_transactions: Counter,
/// Total number of transactions that were received in
/// [`PooledTransactions`](reth_eth_wire::PooledTransactions) responses, that weren't
/// requested.
pub(crate) unsolicited_transactions: Counter,
/* ================ SEARCH DURATION ================ */
/// Time spent searching for an idle peer in call to
/// [`TransactionFetcher::find_any_idle_fallback_peer_for_any_pending_hash`](crate::transactions::TransactionFetcher::find_any_idle_fallback_peer_for_any_pending_hash).
///
/// Duration in seconds.
pub(crate) duration_find_idle_fallback_peer_for_any_pending_hash: Gauge,
/// Time spent searching for hashes pending fetch, announced by a given peer in
/// [`TransactionFetcher::fill_request_from_hashes_pending_fetch`](crate::transactions::TransactionFetcher::fill_request_from_hashes_pending_fetch).
///
/// Duration in seconds.
pub(crate) duration_fill_request_from_hashes_pending_fetch: Gauge,
}
/// Measures the duration of executing the given code block. The duration is added to the given
/// accumulator value passed as a mutable reference.
#[macro_export]
macro_rules! duration_metered_exec {
($code:expr, $acc:expr) => {{
let start = std::time::Instant::now();
let res = $code;
$acc += start.elapsed();
res
}};
}
/// Metrics for Disconnection types
///
/// These are just counters, and ideally we would implement these metrics on a peer-by-peer basis,
/// in that we do not double-count peers for `TooManyPeers` if we make an outgoing connection and
/// get disconnected twice
#[derive(Metrics)]
#[metrics(scope = "network")]
pub struct DisconnectMetrics {
/// Number of peer disconnects due to `DisconnectRequested` (0x00)
pub(crate) disconnect_requested: Counter,
/// Number of peer disconnects due to `TcpSubsystemError` (0x01)
pub(crate) tcp_subsystem_error: Counter,
/// Number of peer disconnects due to `ProtocolBreach` (0x02)
pub(crate) protocol_breach: Counter,
/// Number of peer disconnects due to `UselessPeer` (0x03)
pub(crate) useless_peer: Counter,
/// Number of peer disconnects due to `TooManyPeers` (0x04)
pub(crate) too_many_peers: Counter,
/// Number of peer disconnects due to `AlreadyConnected` (0x05)
pub(crate) already_connected: Counter,
/// Number of peer disconnects due to `IncompatibleP2PProtocolVersion` (0x06)
pub(crate) incompatible: Counter,
/// Number of peer disconnects due to `NullNodeIdentity` (0x07)
pub(crate) null_node_identity: Counter,
/// Number of peer disconnects due to `ClientQuitting` (0x08)
pub(crate) client_quitting: Counter,
/// Number of peer disconnects due to `UnexpectedHandshakeIdentity` (0x09)
pub(crate) unexpected_identity: Counter,
/// Number of peer disconnects due to `ConnectedToSelf` (0x0a)
pub(crate) connected_to_self: Counter,
/// Number of peer disconnects due to `PingTimeout` (0x0b)
pub(crate) ping_timeout: Counter,
/// Number of peer disconnects due to `SubprotocolSpecific` (0x10)
pub(crate) subprotocol_specific: Counter,
}
impl DisconnectMetrics {
/// Increments the proper counter for the given disconnect reason
pub(crate) fn increment(&self, reason: DisconnectReason) {
match reason {
DisconnectReason::DisconnectRequested => self.disconnect_requested.increment(1),
DisconnectReason::TcpSubsystemError => self.tcp_subsystem_error.increment(1),
DisconnectReason::ProtocolBreach => self.protocol_breach.increment(1),
DisconnectReason::UselessPeer => self.useless_peer.increment(1),
DisconnectReason::TooManyPeers => self.too_many_peers.increment(1),
DisconnectReason::AlreadyConnected => self.already_connected.increment(1),
DisconnectReason::IncompatibleP2PProtocolVersion => self.incompatible.increment(1),
DisconnectReason::NullNodeIdentity => self.null_node_identity.increment(1),
DisconnectReason::ClientQuitting => self.client_quitting.increment(1),
DisconnectReason::UnexpectedHandshakeIdentity => self.unexpected_identity.increment(1),
DisconnectReason::ConnectedToSelf => self.connected_to_self.increment(1),
DisconnectReason::PingTimeout => self.ping_timeout.increment(1),
DisconnectReason::SubprotocolSpecific => self.subprotocol_specific.increment(1),
}
}
}
/// Metrics for the `EthRequestHandler`
#[derive(Metrics)]
#[metrics(scope = "network")]
pub struct EthRequestHandlerMetrics {
/// Number of `GetBlockHeaders` requests received
pub(crate) eth_headers_requests_received_total: Counter,
/// Number of `GetReceipts` requests received
pub(crate) eth_receipts_requests_received_total: Counter,
/// Number of `GetBlockBodies` requests received
pub(crate) eth_bodies_requests_received_total: Counter,
/// Number of `GetNodeData` requests received
pub(crate) eth_node_data_requests_received_total: Counter,
/// Duration in seconds of call to poll
/// [`EthRequestHandler`](crate::eth_requests::EthRequestHandler).
pub(crate) acc_duration_poll_eth_req_handler: Gauge,
}
/// Eth67 announcement metrics, track entries by `TxType`
#[derive(Metrics)]
#[metrics(scope = "network.transaction_fetcher")]
pub struct AnnouncedTxTypesMetrics {
/// Histogram for tracking frequency of legacy transaction type
pub(crate) legacy: Histogram,
/// Histogram for tracking frequency of EIP-2930 transaction type
pub(crate) eip2930: Histogram,
/// Histogram for tracking frequency of EIP-1559 transaction type
pub(crate) eip1559: Histogram,
/// Histogram for tracking frequency of EIP-4844 transaction type
pub(crate) eip4844: Histogram,
/// Histogram for tracking frequency of EIP-7702 transaction type
pub(crate) eip7702: Histogram,
/// Histogram for tracking frequency of seismic transaction type
pub(crate) seismic: Histogram,
}
/// Counts the number of transactions by their type in a block or collection.
///
/// This struct keeps track of the count of different transaction types
/// as defined by various Ethereum Improvement Proposals (EIPs).
#[derive(Debug, Default)]
pub struct TxTypesCounter {
/// Count of legacy transactions (pre-EIP-2718).
pub(crate) legacy: usize,
/// Count of transactions conforming to EIP-2930 (Optional access lists).
pub(crate) eip2930: usize,
/// Count of transactions conforming to EIP-1559 (Fee market change).
pub(crate) eip1559: usize,
/// Count of transactions conforming to EIP-4844 (Shard Blob Transactions).
pub(crate) eip4844: usize,
/// Count of transactions conforming to EIP-7702 (Restricted Storage Windows).
pub(crate) eip7702: usize,
/// Count of seismic transactions conforming
pub(crate) seismic: usize,
}
impl TxTypesCounter {
pub(crate) const fn increase_by_tx_type(&mut self, tx_type: TxType) {
match tx_type {
TxType::Legacy => {
self.legacy += 1;
}
TxType::Eip2930 => {
self.eip2930 += 1;
}
TxType::Eip1559 => {
self.eip1559 += 1;
}
TxType::Eip4844 => {
self.eip4844 += 1;
}
TxType::Eip7702 => {
self.eip7702 += 1;
}
}
}
}
impl AnnouncedTxTypesMetrics {
/// Update metrics during announcement validation, by examining each announcement entry based on
/// `TxType`
pub(crate) fn update_eth68_announcement_metrics(&self, tx_types_counter: TxTypesCounter) {
self.legacy.record(tx_types_counter.legacy as f64);
self.eip2930.record(tx_types_counter.eip2930 as f64);
self.eip1559.record(tx_types_counter.eip1559 as f64);
self.eip4844.record(tx_types_counter.eip4844 as f64);
self.eip7702.record(tx_types_counter.eip7702 as f64);
self.seismic.record(tx_types_counter.seismic as f64);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network/src/import.rs | crates/net/network/src/import.rs | //! This module provides an abstraction over block import in the form of the `BlockImport` trait.
use crate::message::NewBlockMessage;
use reth_eth_wire::NewBlock;
use reth_eth_wire_types::broadcast::NewBlockHashes;
use reth_network_peers::PeerId;
use std::{
error::Error,
task::{Context, Poll},
};
/// Abstraction over block import.
pub trait BlockImport<B = NewBlock>: std::fmt::Debug + Send + Sync {
/// Invoked for a received block announcement from the peer.
///
/// For a `NewBlock` message:
/// > When a `NewBlock` announcement message is received from a peer, the client first verifies
/// > the basic header validity of the block, checking whether the proof-of-work value is valid.
///
/// For a `NewBlockHashes` message, hash announcement should be processed accordingly.
///
/// The results are expected to be returned via [`BlockImport::poll`].
fn on_new_block(&mut self, peer_id: PeerId, incoming_block: NewBlockEvent<B>);
/// Returns the results of a [`BlockImport::on_new_block`]
fn poll(&mut self, cx: &mut Context<'_>) -> Poll<BlockImportEvent<B>>;
}
/// Represents different types of block announcement events from the network.
#[derive(Debug, Clone)]
pub enum NewBlockEvent<B = NewBlock> {
/// A new full block announcement
Block(NewBlockMessage<B>),
/// Only the hashes of new blocks
Hashes(NewBlockHashes),
}
/// Represents different types of block import events
#[derive(Debug)]
pub enum BlockImportEvent<B = reth_ethereum_primitives::Block> {
/// General block announcement and validation status
Announcement(BlockValidation<B>),
/// Result of a peer-specific block import
Outcome(BlockImportOutcome<B>),
}
/// Outcome of the [`BlockImport`]'s block handling.
#[derive(Debug)]
pub struct BlockImportOutcome<B = reth_ethereum_primitives::Block> {
/// Sender of the block announcement message.
pub peer: PeerId,
/// The result after validating the block
pub result: Result<BlockValidation<B>, BlockImportError>,
}
/// Represents the successful validation of a received block announcement.
#[derive(Debug)]
pub enum BlockValidation<B> {
/// Basic Header validity check, after which the block should be relayed to peers via a
/// `NewBlock` message
ValidHeader {
/// received block
block: NewBlockMessage<B>,
},
/// Successfully imported: state-root matches after execution. The block should be relayed via
/// `NewBlockHashes`
ValidBlock {
/// validated block.
block: NewBlockMessage<B>,
},
}
/// Represents the error case of a failed block import
#[derive(Debug, thiserror::Error)]
pub enum BlockImportError {
/// Consensus error
#[error(transparent)]
Consensus(#[from] reth_consensus::ConsensusError),
/// Other error
#[error(transparent)]
Other(#[from] Box<dyn Error + Send + Sync>),
}
/// An implementation of `BlockImport` used in Proof-of-Stake consensus that does nothing.
///
/// Block propagation over devp2p is invalid in POS: [EIP-3675](https://eips.ethereum.org/EIPS/eip-3675#devp2p)
#[derive(Debug, Default)]
#[non_exhaustive]
pub struct ProofOfStakeBlockImport;
impl<B> BlockImport<B> for ProofOfStakeBlockImport {
fn on_new_block(&mut self, _peer_id: PeerId, _incoming_block: NewBlockEvent<B>) {}
fn poll(&mut self, _cx: &mut Context<'_>) -> Poll<BlockImportEvent<B>> {
Poll::Pending
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network/src/cache.rs | crates/net/network/src/cache.rs | //! Network cache support
use alloy_primitives::map::DefaultHashBuilder;
use core::hash::BuildHasher;
use derive_more::{Deref, DerefMut};
use itertools::Itertools;
use schnellru::{ByLength, Limiter, Unlimited};
use std::{fmt, hash::Hash};
/// A minimal LRU cache based on a [`LruMap`](schnellru::LruMap) with limited capacity.
///
/// If the length exceeds the set capacity, the oldest element will be removed
/// In the limit, for each element inserted the oldest existing element will be removed.
pub struct LruCache<T: Hash + Eq + fmt::Debug> {
limit: u32,
inner: LruMap<T, ()>,
}
impl<T: Hash + Eq + fmt::Debug> LruCache<T> {
/// Creates a new [`LruCache`] using the given limit
pub fn new(limit: u32) -> Self {
// limit of lru map is one element more, so can give eviction feedback, which isn't
// supported by LruMap
Self { inner: LruMap::new(limit + 1), limit }
}
/// Insert an element into the set.
///
/// This operation uses `get_or_insert` from the underlying `schnellru::LruMap` which:
/// - Automatically evicts the least recently used item if capacity is exceeded
///
/// This method is more efficient than [`insert_and_get_evicted`](Self::insert_and_get_evicted)
/// as it performs fewer checks. Use this method when you don't need information about
/// evicted values.
///
/// If the set did not have this value present, true is returned.
/// If the set did have this value present, false is returned.
pub fn insert(&mut self, entry: T) -> bool {
let mut is_new = false;
self.inner.get_or_insert(entry, || {
is_new = true;
});
is_new
}
/// Same as [`insert`](Self::insert) but returns a tuple, where the second index is the evicted
/// value, if one was evicted.
pub fn insert_and_get_evicted(&mut self, entry: T) -> (bool, Option<T>) {
let new = self.inner.peek(&entry).is_none();
let evicted =
(new && (self.limit as usize) <= self.inner.len()).then(|| self.remove_lru()).flatten();
_ = self.inner.get_or_insert(entry, || ());
(new, evicted)
}
/// Gets the given element, if exists, and promotes to lru.
pub fn get(&mut self, entry: &T) -> Option<&T> {
let _ = self.inner.get(entry)?;
self.iter().next()
}
/// Iterates through entries and returns a reference to the given entry, if exists, without
/// promoting to lru.
///
/// NOTE: Use this for type that have custom impl of [`PartialEq`] and [`Eq`], that aren't
/// unique by all fields. If `PartialEq` and `Eq` are derived for a type, it's more efficient to
/// call [`contains`](Self::contains).
pub fn find(&self, entry: &T) -> Option<&T> {
self.iter().find(|key| *key == entry)
}
/// Remove the least recently used entry and return it.
///
/// If the `LruCache` is empty or if the eviction feedback is
/// configured, this will return None.
#[inline]
fn remove_lru(&mut self) -> Option<T> {
self.inner.pop_oldest().map(|(k, ())| k)
}
/// Expels the given value. Returns true if the value existed.
pub fn remove(&mut self, value: &T) -> bool {
self.inner.remove(value).is_some()
}
/// Returns `true` if the set contains a value.
pub fn contains(&self, value: &T) -> bool {
self.inner.peek(value).is_some()
}
/// Returns an iterator over all cached entries in lru order
pub fn iter(&self) -> impl Iterator<Item = &T> + '_ {
self.inner.iter().map(|(k, ())| k)
}
/// Returns number of elements currently in cache.
pub fn len(&self) -> usize {
self.inner.len()
}
/// Returns `true` if there are currently no elements in the cache.
pub fn is_empty(&self) -> bool {
self.inner.is_empty()
}
}
impl<T> Extend<T> for LruCache<T>
where
T: Eq + Hash + fmt::Debug,
{
fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
for item in iter {
_ = self.insert(item);
}
}
}
impl<T> fmt::Debug for LruCache<T>
where
T: fmt::Debug + Hash + Eq,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut debug_struct = f.debug_struct("LruCache");
debug_struct.field("limit", &self.limit);
debug_struct.field(
"ret %iter",
&format_args!("Iter: {{{} }}", self.iter().map(|k| format!(" {k:?}")).format(",")),
);
debug_struct.finish()
}
}
/// Wrapper of [`schnellru::LruMap`] that implements [`fmt::Debug`] and with the common hash
/// builder.
#[derive(Deref, DerefMut, Default)]
pub struct LruMap<K, V, L = ByLength, S = DefaultHashBuilder>(schnellru::LruMap<K, V, L, S>)
where
K: Hash + PartialEq,
L: Limiter<K, V>,
S: BuildHasher;
impl<K, V, L, S> fmt::Debug for LruMap<K, V, L, S>
where
K: Hash + PartialEq + fmt::Display,
V: fmt::Debug,
L: Limiter<K, V> + fmt::Debug,
S: BuildHasher,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut debug_struct = f.debug_struct("LruMap");
debug_struct.field("limiter", self.limiter());
debug_struct.field(
"ret %iter",
&format_args!(
"Iter: {{{} }}",
self.iter().map(|(k, v)| format!(" {k}: {v:?}")).format(",")
),
);
debug_struct.finish()
}
}
impl<K, V> LruMap<K, V>
where
K: Hash + PartialEq,
{
/// Returns a new cache with default limiter and hash builder.
pub fn new(max_length: u32) -> Self {
Self(schnellru::LruMap::with_hasher(ByLength::new(max_length), Default::default()))
}
}
impl<K, V> LruMap<K, V, Unlimited>
where
K: Hash + PartialEq,
{
/// Returns a new cache with [`Unlimited`] limiter and default hash builder.
pub fn new_unlimited() -> Self {
Self(schnellru::LruMap::with_hasher(Unlimited, Default::default()))
}
}
#[cfg(test)]
mod test {
use super::*;
use derive_more::{Constructor, Display};
use std::hash::Hasher;
#[derive(Debug, Hash, PartialEq, Eq, Display, Clone, Copy)]
struct Key(i8);
#[derive(Debug, Eq, Constructor, Clone, Copy)]
struct CompoundKey {
// type unique for id
id: i8,
other: i8,
}
impl PartialEq for CompoundKey {
fn eq(&self, other: &Self) -> bool {
self.id == other.id
}
}
impl Hash for CompoundKey {
fn hash<H: Hasher>(&self, state: &mut H) {
self.id.hash(state)
}
}
#[test]
fn test_cache_should_insert_into_empty_set() {
let mut cache = LruCache::new(5);
let entry = "entry";
assert!(cache.insert(entry));
assert!(cache.contains(&entry));
}
#[test]
fn test_cache_should_not_insert_same_value_twice() {
let mut cache = LruCache::new(5);
let entry = "entry";
assert!(cache.insert(entry));
assert!(!cache.insert(entry));
}
#[test]
fn test_cache_should_remove_oldest_element_when_exceeding_limit() {
let mut cache = LruCache::new(1); // LruCache limit will be 2, check LruCache::new
let old_entry = "old_entry";
let new_entry = "new_entry";
cache.insert(old_entry);
cache.insert("entry");
cache.insert(new_entry);
assert!(cache.contains(&new_entry));
assert!(!cache.contains(&old_entry));
}
#[test]
fn test_cache_should_extend_an_array() {
let mut cache = LruCache::new(5);
let entries = ["some_entry", "another_entry"];
cache.extend(entries);
for e in entries {
assert!(cache.contains(&e));
}
}
#[test]
#[expect(dead_code)]
fn test_debug_impl_lru_map() {
#[derive(Debug)]
struct Value(i8);
let mut cache = LruMap::new(2);
let key_1 = Key(1);
let value_1 = Value(11);
cache.insert(key_1, value_1);
let key_2 = Key(2);
let value_2 = Value(22);
cache.insert(key_2, value_2);
assert_eq!(
"LruMap { limiter: ByLength { max_length: 2 }, ret %iter: Iter: { 2: Value(22), 1: Value(11) } }",
format!("{cache:?}")
)
}
#[test]
fn test_debug_impl_lru_cache() {
let mut cache = LruCache::new(2);
let key_1 = Key(1);
cache.insert(key_1);
let key_2 = Key(2);
cache.insert(key_2);
assert_eq!(
"LruCache { limit: 2, ret %iter: Iter: { Key(2), Key(1) } }",
format!("{cache:?}")
)
}
#[test]
fn get() {
let mut cache = LruCache::new(2);
let key_1 = Key(1);
cache.insert(key_1);
let key_2 = Key(2);
cache.insert(key_2);
// promotes key 1 to lru
_ = cache.get(&key_1);
assert_eq!(
"LruCache { limit: 2, ret %iter: Iter: { Key(1), Key(2) } }",
format!("{cache:?}")
)
}
#[test]
fn get_ty_custom_eq_impl() {
let mut cache = LruCache::new(2);
let key_1 = CompoundKey::new(1, 11);
cache.insert(key_1);
let key_2 = CompoundKey::new(2, 22);
cache.insert(key_2);
let key = cache.get(&key_1);
assert_eq!(key_1.other, key.unwrap().other)
}
#[test]
fn peek() {
let mut cache = LruCache::new(2);
let key_1 = Key(1);
cache.insert(key_1);
let key_2 = Key(2);
cache.insert(key_2);
// doesn't promote key 1 to lru
_ = cache.find(&key_1);
assert_eq!(
"LruCache { limit: 2, ret %iter: Iter: { Key(2), Key(1) } }",
format!("{cache:?}")
)
}
#[test]
fn peek_ty_custom_eq_impl() {
let mut cache = LruCache::new(2);
let key_1 = CompoundKey::new(1, 11);
cache.insert(key_1);
let key_2 = CompoundKey::new(2, 22);
cache.insert(key_2);
let key = cache.find(&key_1);
assert_eq!(key_1.other, key.unwrap().other)
}
#[test]
fn test_insert_methods() {
let mut cache = LruCache::new(2);
// Test basic insert
assert!(cache.insert("first")); // new entry
assert!(!cache.insert("first")); // existing entry
assert!(cache.insert("second")); // new entry
// Test insert_and_get_evicted
let (is_new, evicted) = cache.insert_and_get_evicted("third");
assert!(is_new); // should be new entry
assert_eq!(evicted, Some("first")); // should evict
assert!(cache.contains(&"second"));
assert!(cache.contains(&"third"));
assert!(!cache.contains(&"first"));
// Test insert_and_get_evicted with existing entry
let (is_new, evicted) = cache.insert_and_get_evicted("second");
assert!(!is_new); // should not be new
assert_eq!(evicted, None); // should not evict anything
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network/src/message.rs | crates/net/network/src/message.rs | //! Capability messaging
//!
//! An `RLPx` stream is multiplexed via the prepended message-id of a framed message.
//! Capabilities are exchanged via the `RLPx` `Hello` message as pairs of `(id, version)`, <https://github.com/ethereum/devp2p/blob/master/rlpx.md#capability-messaging>
use crate::types::Receipts69;
use alloy_consensus::{BlockHeader, ReceiptWithBloom};
use alloy_primitives::{Bytes, B256};
use futures::FutureExt;
use reth_eth_wire::{
message::RequestPair, BlockBodies, BlockHeaders, BlockRangeUpdate, EthMessage,
EthNetworkPrimitives, GetBlockBodies, GetBlockHeaders, NetworkPrimitives, NewBlock,
NewBlockHashes, NewBlockPayload, NewPooledTransactionHashes, NodeData, PooledTransactions,
Receipts, SharedTransactions, Transactions,
};
use reth_eth_wire_types::RawCapabilityMessage;
use reth_network_api::PeerRequest;
use reth_network_p2p::error::{RequestError, RequestResult};
use reth_primitives_traits::Block;
use std::{
sync::Arc,
task::{ready, Context, Poll},
};
use tokio::sync::oneshot;
/// Internal form of a `NewBlock` message
#[derive(Debug, Clone)]
pub struct NewBlockMessage<P = NewBlock<reth_ethereum_primitives::Block>> {
/// Hash of the block
pub hash: B256,
/// Raw received message
pub block: Arc<P>,
}
// === impl NewBlockMessage ===
impl<P: NewBlockPayload> NewBlockMessage<P> {
/// Returns the block number of the block
pub fn number(&self) -> u64 {
self.block.block().header().number()
}
}
/// All Bi-directional eth-message variants that can be sent to a session or received from a
/// session.
#[derive(Debug)]
pub enum PeerMessage<N: NetworkPrimitives = EthNetworkPrimitives> {
/// Announce new block hashes
NewBlockHashes(NewBlockHashes),
/// Broadcast new block.
NewBlock(NewBlockMessage<N::NewBlockPayload>),
/// Received transactions _from_ the peer
ReceivedTransaction(Transactions<N::BroadcastedTransaction>),
/// Broadcast transactions _from_ local _to_ a peer.
SendTransactions(SharedTransactions<N::BroadcastedTransaction>),
/// Send new pooled transactions
PooledTransactions(NewPooledTransactionHashes),
/// All `eth` request variants.
EthRequest(PeerRequest<N>),
/// Announces when `BlockRange` is updated.
BlockRangeUpdated(BlockRangeUpdate),
/// Any other or manually crafted eth message.
///
/// Caution: It is expected that this is a valid `eth_` capability message.
Other(RawCapabilityMessage),
}
/// Request Variants that only target block related data.
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum BlockRequest {
/// Requests block headers from the peer.
///
/// The response should be sent through the channel.
GetBlockHeaders(GetBlockHeaders),
/// Requests block bodies from the peer.
///
/// The response should be sent through the channel.
GetBlockBodies(GetBlockBodies),
}
/// Corresponding variant for [`PeerRequest`].
#[derive(Debug)]
pub enum PeerResponse<N: NetworkPrimitives = EthNetworkPrimitives> {
/// Represents a response to a request for block headers.
BlockHeaders {
/// The receiver channel for the response to a block headers request.
response: oneshot::Receiver<RequestResult<BlockHeaders<N::BlockHeader>>>,
},
/// Represents a response to a request for block bodies.
BlockBodies {
/// The receiver channel for the response to a block bodies request.
response: oneshot::Receiver<RequestResult<BlockBodies<N::BlockBody>>>,
},
/// Represents a response to a request for pooled transactions.
PooledTransactions {
/// The receiver channel for the response to a pooled transactions request.
response: oneshot::Receiver<RequestResult<PooledTransactions<N::PooledTransaction>>>,
},
/// Represents a response to a request for `NodeData`.
NodeData {
/// The receiver channel for the response to a `NodeData` request.
response: oneshot::Receiver<RequestResult<NodeData>>,
},
/// Represents a response to a request for receipts.
Receipts {
/// The receiver channel for the response to a receipts request.
response: oneshot::Receiver<RequestResult<Receipts<N::Receipt>>>,
},
/// Represents a response to a request for receipts.
///
/// This is a variant of `Receipts` that was introduced in `eth/69`.
/// The difference is that this variant does not require the inclusion of bloom filters in the
/// response, making it more lightweight.
Receipts69 {
/// The receiver channel for the response to a receipts request.
response: oneshot::Receiver<RequestResult<Receipts69<N::Receipt>>>,
},
}
// === impl PeerResponse ===
impl<N: NetworkPrimitives> PeerResponse<N> {
/// Polls the type to completion.
pub(crate) fn poll(&mut self, cx: &mut Context<'_>) -> Poll<PeerResponseResult<N>> {
macro_rules! poll_request {
($response:ident, $item:ident, $cx:ident) => {
match ready!($response.poll_unpin($cx)) {
Ok(res) => PeerResponseResult::$item(res.map(|item| item.0)),
Err(err) => PeerResponseResult::$item(Err(err.into())),
}
};
}
let res = match self {
Self::BlockHeaders { response } => {
poll_request!(response, BlockHeaders, cx)
}
Self::BlockBodies { response } => {
poll_request!(response, BlockBodies, cx)
}
Self::PooledTransactions { response } => {
poll_request!(response, PooledTransactions, cx)
}
Self::NodeData { response } => {
poll_request!(response, NodeData, cx)
}
Self::Receipts { response } => {
poll_request!(response, Receipts, cx)
}
Self::Receipts69 { response } => {
poll_request!(response, Receipts69, cx)
}
};
Poll::Ready(res)
}
}
/// All response variants for [`PeerResponse`]
#[derive(Debug)]
pub enum PeerResponseResult<N: NetworkPrimitives = EthNetworkPrimitives> {
/// Represents a result containing block headers or an error.
BlockHeaders(RequestResult<Vec<N::BlockHeader>>),
/// Represents a result containing block bodies or an error.
BlockBodies(RequestResult<Vec<N::BlockBody>>),
/// Represents a result containing pooled transactions or an error.
PooledTransactions(RequestResult<Vec<N::PooledTransaction>>),
/// Represents a result containing node data or an error.
NodeData(RequestResult<Vec<Bytes>>),
/// Represents a result containing receipts or an error.
Receipts(RequestResult<Vec<Vec<ReceiptWithBloom<N::Receipt>>>>),
/// Represents a result containing receipts or an error for eth/69.
Receipts69(RequestResult<Vec<Vec<N::Receipt>>>),
}
// === impl PeerResponseResult ===
impl<N: NetworkPrimitives> PeerResponseResult<N> {
/// Converts this response into an [`EthMessage`]
pub fn try_into_message(self, id: u64) -> RequestResult<EthMessage<N>> {
macro_rules! to_message {
($response:ident, $item:ident, $request_id:ident) => {
match $response {
Ok(res) => {
let request = RequestPair { request_id: $request_id, message: $item(res) };
Ok(EthMessage::$item(request))
}
Err(err) => Err(err),
}
};
}
match self {
Self::BlockHeaders(resp) => {
to_message!(resp, BlockHeaders, id)
}
Self::BlockBodies(resp) => {
to_message!(resp, BlockBodies, id)
}
Self::PooledTransactions(resp) => {
to_message!(resp, PooledTransactions, id)
}
Self::NodeData(resp) => {
to_message!(resp, NodeData, id)
}
Self::Receipts(resp) => {
to_message!(resp, Receipts, id)
}
Self::Receipts69(resp) => {
to_message!(resp, Receipts69, id)
}
}
}
/// Returns the `Err` value if the result is an error.
pub fn err(&self) -> Option<&RequestError> {
match self {
Self::BlockHeaders(res) => res.as_ref().err(),
Self::BlockBodies(res) => res.as_ref().err(),
Self::PooledTransactions(res) => res.as_ref().err(),
Self::NodeData(res) => res.as_ref().err(),
Self::Receipts(res) => res.as_ref().err(),
Self::Receipts69(res) => res.as_ref().err(),
}
}
/// Returns whether this result is an error.
pub fn is_err(&self) -> bool {
self.err().is_some()
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network/src/eth_requests.rs | crates/net/network/src/eth_requests.rs | //! Blocks/Headers management for the p2p network.
use crate::{
budget::DEFAULT_BUDGET_TRY_DRAIN_DOWNLOADERS, metered_poll_nested_stream_with_budget,
metrics::EthRequestHandlerMetrics,
};
use alloy_consensus::{BlockHeader, ReceiptWithBloom};
use alloy_eips::BlockHashOrNumber;
use alloy_rlp::Encodable;
use futures::StreamExt;
use reth_eth_wire::{
BlockBodies, BlockHeaders, EthNetworkPrimitives, GetBlockBodies, GetBlockHeaders, GetNodeData,
GetReceipts, HeadersDirection, NetworkPrimitives, NodeData, Receipts, Receipts69,
};
use reth_network_api::test_utils::PeersHandle;
use reth_network_p2p::error::RequestResult;
use reth_network_peers::PeerId;
use reth_primitives_traits::Block;
use reth_storage_api::{BlockReader, HeaderProvider};
use std::{
future::Future,
pin::Pin,
task::{Context, Poll},
time::Duration,
};
use tokio::sync::{mpsc::Receiver, oneshot};
use tokio_stream::wrappers::ReceiverStream;
// Limits: <https://github.com/ethereum/go-ethereum/blob/b0d44338bbcefee044f1f635a84487cbbd8f0538/eth/protocols/eth/handler.go#L34-L56>
/// Maximum number of receipts to serve.
///
/// Used to limit lookups.
pub const MAX_RECEIPTS_SERVE: usize = 1024;
/// Maximum number of block headers to serve.
///
/// Used to limit lookups.
pub const MAX_HEADERS_SERVE: usize = 1024;
/// Maximum number of block headers to serve.
///
/// Used to limit lookups. With 24KB block sizes nowadays, the practical limit will always be
/// `SOFT_RESPONSE_LIMIT`.
pub const MAX_BODIES_SERVE: usize = 1024;
/// Maximum size of replies to data retrievals: 2MB
pub const SOFT_RESPONSE_LIMIT: usize = 2 * 1024 * 1024;
/// Manages eth related requests on top of the p2p network.
///
/// This can be spawned to another task and is supposed to be run as background service.
#[derive(Debug)]
#[must_use = "Manager does nothing unless polled."]
pub struct EthRequestHandler<C, N: NetworkPrimitives = EthNetworkPrimitives> {
/// The client type that can interact with the chain.
client: C,
/// Used for reporting peers.
// TODO use to report spammers
#[expect(dead_code)]
peers: PeersHandle,
/// Incoming request from the [`NetworkManager`](crate::NetworkManager).
incoming_requests: ReceiverStream<IncomingEthRequest<N>>,
/// Metrics for the eth request handler.
metrics: EthRequestHandlerMetrics,
}
// === impl EthRequestHandler ===
impl<C, N: NetworkPrimitives> EthRequestHandler<C, N> {
/// Create a new instance
pub fn new(client: C, peers: PeersHandle, incoming: Receiver<IncomingEthRequest<N>>) -> Self {
Self {
client,
peers,
incoming_requests: ReceiverStream::new(incoming),
metrics: Default::default(),
}
}
}
impl<C, N> EthRequestHandler<C, N>
where
N: NetworkPrimitives,
C: BlockReader,
{
/// Returns the list of requested headers
fn get_headers_response(&self, request: GetBlockHeaders) -> Vec<C::Header> {
let GetBlockHeaders { start_block, limit, skip, direction } = request;
let mut headers = Vec::new();
let mut block: BlockHashOrNumber = match start_block {
BlockHashOrNumber::Hash(start) => start.into(),
BlockHashOrNumber::Number(num) => {
let Some(hash) = self.client.block_hash(num).unwrap_or_default() else {
return headers
};
hash.into()
}
};
let skip = skip as u64;
let mut total_bytes = 0;
for _ in 0..limit {
if let Some(header) = self.client.header_by_hash_or_number(block).unwrap_or_default() {
let number = header.number();
let parent_hash = header.parent_hash();
total_bytes += header.length();
headers.push(header);
if headers.len() >= MAX_HEADERS_SERVE || total_bytes > SOFT_RESPONSE_LIMIT {
break
}
match direction {
HeadersDirection::Rising => {
if let Some(next) = number.checked_add(1).and_then(|n| n.checked_add(skip))
{
block = next.into()
} else {
break
}
}
HeadersDirection::Falling => {
if skip > 0 {
// prevent under flows for block.number == 0 and `block.number - skip <
// 0`
if let Some(next) =
number.checked_sub(1).and_then(|num| num.checked_sub(skip))
{
block = next.into()
} else {
break
}
} else {
block = parent_hash.into()
}
}
}
} else {
break
}
}
headers
}
fn on_headers_request(
&self,
_peer_id: PeerId,
request: GetBlockHeaders,
response: oneshot::Sender<RequestResult<BlockHeaders<C::Header>>>,
) {
self.metrics.eth_headers_requests_received_total.increment(1);
let headers = self.get_headers_response(request);
let _ = response.send(Ok(BlockHeaders(headers)));
}
fn on_bodies_request(
&self,
_peer_id: PeerId,
request: GetBlockBodies,
response: oneshot::Sender<RequestResult<BlockBodies<<C::Block as Block>::Body>>>,
) {
self.metrics.eth_bodies_requests_received_total.increment(1);
let mut bodies = Vec::new();
let mut total_bytes = 0;
for hash in request.0 {
if let Some(block) = self.client.block_by_hash(hash).unwrap_or_default() {
let body = block.into_body();
total_bytes += body.length();
bodies.push(body);
if bodies.len() >= MAX_BODIES_SERVE || total_bytes > SOFT_RESPONSE_LIMIT {
break
}
} else {
break
}
}
let _ = response.send(Ok(BlockBodies(bodies)));
}
fn on_receipts_request(
&self,
_peer_id: PeerId,
request: GetReceipts,
response: oneshot::Sender<RequestResult<Receipts<C::Receipt>>>,
) {
self.metrics.eth_receipts_requests_received_total.increment(1);
let receipts = self.get_receipts_response(request, |receipts_by_block| {
receipts_by_block.into_iter().map(ReceiptWithBloom::from).collect::<Vec<_>>()
});
let _ = response.send(Ok(Receipts(receipts)));
}
fn on_receipts69_request(
&self,
_peer_id: PeerId,
request: GetReceipts,
response: oneshot::Sender<RequestResult<Receipts69<C::Receipt>>>,
) {
self.metrics.eth_receipts_requests_received_total.increment(1);
let receipts = self.get_receipts_response(request, |receipts_by_block| {
// skip bloom filter for eth69
receipts_by_block
});
let _ = response.send(Ok(Receipts69(receipts)));
}
#[inline]
fn get_receipts_response<T, F>(&self, request: GetReceipts, transform_fn: F) -> Vec<Vec<T>>
where
F: Fn(Vec<C::Receipt>) -> Vec<T>,
T: Encodable,
{
let mut receipts = Vec::new();
let mut total_bytes = 0;
for hash in request.0 {
if let Some(receipts_by_block) =
self.client.receipts_by_block(BlockHashOrNumber::Hash(hash)).unwrap_or_default()
{
let transformed_receipts = transform_fn(receipts_by_block);
total_bytes += transformed_receipts.length();
receipts.push(transformed_receipts);
if receipts.len() >= MAX_RECEIPTS_SERVE || total_bytes > SOFT_RESPONSE_LIMIT {
break
}
} else {
break
}
}
receipts
}
}
/// An endless future.
///
/// This should be spawned or used as part of `tokio::select!`.
impl<C, N> Future for EthRequestHandler<C, N>
where
N: NetworkPrimitives,
C: BlockReader<Block = N::Block, Receipt = N::Receipt>
+ HeaderProvider<Header = N::BlockHeader>
+ Unpin,
{
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.get_mut();
let mut acc = Duration::ZERO;
let maybe_more_incoming_requests = metered_poll_nested_stream_with_budget!(
acc,
"net::eth",
"Incoming eth requests stream",
DEFAULT_BUDGET_TRY_DRAIN_DOWNLOADERS,
this.incoming_requests.poll_next_unpin(cx),
|incoming| {
match incoming {
IncomingEthRequest::GetBlockHeaders { peer_id, request, response } => {
this.on_headers_request(peer_id, request, response)
}
IncomingEthRequest::GetBlockBodies { peer_id, request, response } => {
this.on_bodies_request(peer_id, request, response)
}
IncomingEthRequest::GetNodeData { .. } => {
this.metrics.eth_node_data_requests_received_total.increment(1);
}
IncomingEthRequest::GetReceipts { peer_id, request, response } => {
this.on_receipts_request(peer_id, request, response)
}
IncomingEthRequest::GetReceipts69 { peer_id, request, response } => {
this.on_receipts69_request(peer_id, request, response)
}
}
},
);
this.metrics.acc_duration_poll_eth_req_handler.set(acc.as_secs_f64());
// stream is fully drained and import futures pending
if maybe_more_incoming_requests {
// make sure we're woken up again
cx.waker().wake_by_ref();
}
Poll::Pending
}
}
/// All `eth` request related to blocks delegated by the network.
#[derive(Debug)]
pub enum IncomingEthRequest<N: NetworkPrimitives = EthNetworkPrimitives> {
/// Request Block headers from the peer.
///
/// The response should be sent through the channel.
GetBlockHeaders {
/// The ID of the peer to request block headers from.
peer_id: PeerId,
/// The specific block headers requested.
request: GetBlockHeaders,
/// The channel sender for the response containing block headers.
response: oneshot::Sender<RequestResult<BlockHeaders<N::BlockHeader>>>,
},
/// Request Block bodies from the peer.
///
/// The response should be sent through the channel.
GetBlockBodies {
/// The ID of the peer to request block bodies from.
peer_id: PeerId,
/// The specific block bodies requested.
request: GetBlockBodies,
/// The channel sender for the response containing block bodies.
response: oneshot::Sender<RequestResult<BlockBodies<N::BlockBody>>>,
},
/// Request Node Data from the peer.
///
/// The response should be sent through the channel.
GetNodeData {
/// The ID of the peer to request node data from.
peer_id: PeerId,
/// The specific node data requested.
request: GetNodeData,
/// The channel sender for the response containing node data.
response: oneshot::Sender<RequestResult<NodeData>>,
},
/// Request Receipts from the peer.
///
/// The response should be sent through the channel.
GetReceipts {
/// The ID of the peer to request receipts from.
peer_id: PeerId,
/// The specific receipts requested.
request: GetReceipts,
/// The channel sender for the response containing receipts.
response: oneshot::Sender<RequestResult<Receipts<N::Receipt>>>,
},
/// Request Receipts from the peer without bloom filter.
///
/// The response should be sent through the channel.
GetReceipts69 {
/// The ID of the peer to request receipts from.
peer_id: PeerId,
/// The specific receipts requested.
request: GetReceipts,
/// The channel sender for the response containing Receipts69.
response: oneshot::Sender<RequestResult<Receipts69<N::Receipt>>>,
},
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network/src/budget.rs | crates/net/network/src/budget.rs | /// Default budget to try and drain streams.
///
/// Default is 10 iterations.
pub const DEFAULT_BUDGET_TRY_DRAIN_STREAM: u32 = 10;
/// Default budget to try and drain headers and bodies download streams.
///
/// Default is 2 iterations.
pub const DEFAULT_BUDGET_TRY_DRAIN_DOWNLOADERS: u32 = 2;
/// Default budget to try and drain [`Swarm`](crate::swarm::Swarm).
///
/// Default is 10 [`SwarmEvent`](crate::swarm::SwarmEvent)s.
pub const DEFAULT_BUDGET_TRY_DRAIN_SWARM: u32 = 10;
/// Default budget to try and drain pending messages from [`NetworkHandle`](crate::NetworkHandle)
/// channel. Polling the [`TransactionsManager`](crate::transactions::TransactionsManager) future
/// sends these types of messages.
//
// Default is 40 outgoing transaction messages.
pub const DEFAULT_BUDGET_TRY_DRAIN_NETWORK_HANDLE_CHANNEL: u32 =
4 * DEFAULT_BUDGET_TRY_DRAIN_STREAM;
/// Default budget to try and drain stream of
/// [`NetworkTransactionEvent`](crate::transactions::NetworkTransactionEvent)s from
/// [`NetworkManager`](crate::NetworkManager).
///
/// Default is 10 incoming transaction messages.
pub const DEFAULT_BUDGET_TRY_DRAIN_NETWORK_TRANSACTION_EVENTS: u32 = DEFAULT_BUDGET_TRY_DRAIN_SWARM;
/// Default budget to try and flush pending pool imports to pool. This number reflects the number
/// of transactions that can be queued for import to pool in each iteration of the loop in the
/// [`TransactionsManager`](crate::transactions::TransactionsManager) future.
//
// Default is 40 pending pool imports.
pub const DEFAULT_BUDGET_TRY_DRAIN_PENDING_POOL_IMPORTS: u32 = 4 * DEFAULT_BUDGET_TRY_DRAIN_STREAM;
/// Default budget to try and stream hashes of successfully imported transactions from the pool.
///
/// Default is naturally same as the number of transactions to attempt importing,
/// [`DEFAULT_BUDGET_TRY_DRAIN_PENDING_POOL_IMPORTS`], so 40 pool imports.
pub const DEFAULT_BUDGET_TRY_DRAIN_POOL_IMPORTS: u32 =
DEFAULT_BUDGET_TRY_DRAIN_PENDING_POOL_IMPORTS;
/// Polls the given stream. Breaks with `true` if there maybe is more work.
#[macro_export]
macro_rules! poll_nested_stream_with_budget {
($target:literal, $label:literal, $budget:ident, $poll_stream:expr, $on_ready_some:expr $(, $on_ready_none:expr;)? $(,)?) => {{
let mut budget: u32 = $budget;
loop {
match $poll_stream {
Poll::Ready(Some(item)) => {
$on_ready_some(item);
budget -= 1;
if budget == 0 {
break true
}
}
Poll::Ready(None) => {
$($on_ready_none;)? // todo: handle error case with $target and $label
break false
}
Poll::Pending => break false,
}
}
}};
}
/// Metered poll of the given stream. Breaks with `true` if there maybe is more work.
#[macro_export]
macro_rules! metered_poll_nested_stream_with_budget {
($acc:expr, $target:literal, $label:literal, $budget:ident, $poll_stream:expr, $on_ready_some:expr $(, $on_ready_none:expr;)? $(,)?) => {{
$crate::duration_metered_exec!(
{
$crate::poll_nested_stream_with_budget!($target, $label, $budget, $poll_stream, $on_ready_some $(, $on_ready_none;)?)
},
$acc
)
}};
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/net/network/src/session/conn.rs | crates/net/network/src/session/conn.rs | //! Connection types for a session
use futures::{Sink, Stream};
use reth_ecies::stream::ECIESStream;
use reth_eth_wire::{
errors::EthStreamError,
message::EthBroadcastMessage,
multiplex::{ProtocolProxy, RlpxSatelliteStream},
EthMessage, EthNetworkPrimitives, EthStream, EthVersion, NetworkPrimitives, P2PStream,
};
use reth_eth_wire_types::RawCapabilityMessage;
use std::{
pin::Pin,
task::{Context, Poll},
};
use tokio::net::TcpStream;
/// The type of the underlying peer network connection.
pub type EthPeerConnection<N> = EthStream<P2PStream<ECIESStream<TcpStream>>, N>;
/// Various connection types that at least support the ETH protocol.
pub type EthSatelliteConnection<N = EthNetworkPrimitives> =
RlpxSatelliteStream<ECIESStream<TcpStream>, EthStream<ProtocolProxy, N>>;
/// Connection types that support the ETH protocol.
///
/// This can be either:
/// - A connection that only supports the ETH protocol
/// - A connection that supports the ETH protocol and at least one other `RLPx` protocol
// This type is boxed because the underlying stream is ~6KB,
// mostly coming from `P2PStream`'s `snap::Encoder` (2072), and `ECIESStream` (3600).
#[derive(Debug)]
pub enum EthRlpxConnection<N: NetworkPrimitives = EthNetworkPrimitives> {
/// A connection that only supports the ETH protocol.
EthOnly(Box<EthPeerConnection<N>>),
/// A connection that supports the ETH protocol and __at least one other__ `RLPx` protocol.
Satellite(Box<EthSatelliteConnection<N>>),
}
impl<N: NetworkPrimitives> EthRlpxConnection<N> {
/// Returns the negotiated ETH version.
#[inline]
pub(crate) const fn version(&self) -> EthVersion {
match self {
Self::EthOnly(conn) => conn.version(),
Self::Satellite(conn) => conn.primary().version(),
}
}
/// Consumes this type and returns the wrapped [`P2PStream`].
#[inline]
pub(crate) fn into_inner(self) -> P2PStream<ECIESStream<TcpStream>> {
match self {
Self::EthOnly(conn) => conn.into_inner(),
Self::Satellite(conn) => conn.into_inner(),
}
}
/// Returns mutable access to the underlying stream.
#[inline]
pub(crate) fn inner_mut(&mut self) -> &mut P2PStream<ECIESStream<TcpStream>> {
match self {
Self::EthOnly(conn) => conn.inner_mut(),
Self::Satellite(conn) => conn.inner_mut(),
}
}
/// Returns access to the underlying stream.
#[inline]
pub(crate) const fn inner(&self) -> &P2PStream<ECIESStream<TcpStream>> {
match self {
Self::EthOnly(conn) => conn.inner(),
Self::Satellite(conn) => conn.inner(),
}
}
/// Same as [`Sink::start_send`] but accepts a [`EthBroadcastMessage`] instead.
#[inline]
pub fn start_send_broadcast(
&mut self,
item: EthBroadcastMessage<N>,
) -> Result<(), EthStreamError> {
match self {
Self::EthOnly(conn) => conn.start_send_broadcast(item),
Self::Satellite(conn) => conn.primary_mut().start_send_broadcast(item),
}
}
/// Sends a raw capability message over the connection
pub fn start_send_raw(&mut self, msg: RawCapabilityMessage) -> Result<(), EthStreamError> {
match self {
Self::EthOnly(conn) => conn.start_send_raw(msg),
Self::Satellite(conn) => conn.primary_mut().start_send_raw(msg),
}
}
}
impl<N: NetworkPrimitives> From<EthPeerConnection<N>> for EthRlpxConnection<N> {
#[inline]
fn from(conn: EthPeerConnection<N>) -> Self {
Self::EthOnly(Box::new(conn))
}
}
impl<N: NetworkPrimitives> From<EthSatelliteConnection<N>> for EthRlpxConnection<N> {
#[inline]
fn from(conn: EthSatelliteConnection<N>) -> Self {
Self::Satellite(Box::new(conn))
}
}
macro_rules! delegate_call {
($self:ident.$method:ident($($args:ident),+)) => {
unsafe {
match $self.get_unchecked_mut() {
Self::EthOnly(l) => Pin::new_unchecked(l).$method($($args),+),
Self::Satellite(r) => Pin::new_unchecked(r).$method($($args),+),
}
}
}
}
impl<N: NetworkPrimitives> Stream for EthRlpxConnection<N> {
type Item = Result<EthMessage<N>, EthStreamError>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
delegate_call!(self.poll_next(cx))
}
}
impl<N: NetworkPrimitives> Sink<EthMessage<N>> for EthRlpxConnection<N> {
type Error = EthStreamError;
fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
delegate_call!(self.poll_ready(cx))
}
fn start_send(self: Pin<&mut Self>, item: EthMessage<N>) -> Result<(), Self::Error> {
delegate_call!(self.start_send(item))
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
delegate_call!(self.poll_flush(cx))
}
fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
delegate_call!(self.poll_close(cx))
}
}
#[cfg(test)]
mod tests {
use super::*;
const fn assert_eth_stream<N, St>()
where
N: NetworkPrimitives,
St: Stream<Item = Result<EthMessage<N>, EthStreamError>> + Sink<EthMessage<N>>,
{
}
#[test]
const fn test_eth_stream_variants() {
assert_eth_stream::<EthNetworkPrimitives, EthSatelliteConnection<EthNetworkPrimitives>>();
assert_eth_stream::<EthNetworkPrimitives, EthRlpxConnection<EthNetworkPrimitives>>();
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.