repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/bin/reth-bench/src/bench_mode.rs | bin/reth-bench/src/bench_mode.rs | //! The benchmark mode defines whether the benchmark should run for a closed or open range of
//! blocks.
use std::ops::RangeInclusive;
/// Whether or not the benchmark should run as a continuous stream of payloads.
#[derive(Debug, PartialEq, Eq)]
pub enum BenchMode {
// TODO: just include the start block in `Continuous`
/// Run the benchmark as a continuous stream of payloads, until the benchmark is interrupted.
Continuous,
/// Run the benchmark for a specific range of blocks.
Range(RangeInclusive<u64>),
}
impl BenchMode {
/// Check if the block number is in the range
pub fn contains(&self, block_number: u64) -> bool {
match self {
Self::Continuous => true,
Self::Range(range) => range.contains(&block_number),
}
}
/// Create a [`BenchMode`] from optional `from` and `to` fields.
pub fn new(from: Option<u64>, to: Option<u64>) -> Result<Self, eyre::Error> {
// If neither `--from` nor `--to` are provided, we will run the benchmark continuously,
// starting at the latest block.
match (from, to) {
(Some(from), Some(to)) => Ok(Self::Range(from..=to)),
(None, None) => Ok(Self::Continuous),
_ => {
// both or neither are allowed, everything else is ambiguous
Err(eyre::eyre!("`from` and `to` must be provided together, or not at all."))
}
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/bin/reth-bench/src/main.rs | bin/reth-bench/src/main.rs | //! # reth-benchmark
//!
//! This is a tool that converts existing blocks into a stream of blocks for benchmarking purposes.
//! These blocks are then fed into reth as a stream of execution payloads.
#![doc(
// TODO: seismic
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#[global_allocator]
static ALLOC: reth_cli_util::allocator::Allocator = reth_cli_util::allocator::new_allocator();
pub mod authenticated_transport;
pub mod bench;
pub mod bench_mode;
pub mod valid_payload;
use bench::BenchmarkCommand;
use clap::Parser;
use reth_cli_runner::CliRunner;
fn main() {
// Enable backtraces unless a RUST_BACKTRACE value has already been explicitly provided.
if std::env::var_os("RUST_BACKTRACE").is_none() {
std::env::set_var("RUST_BACKTRACE", "1");
}
// Run until either exit or sigint or sigterm
let runner = CliRunner::try_default_runtime().unwrap();
runner
.run_command_until_exit(|ctx| {
let command = BenchmarkCommand::parse();
command.execute(ctx)
})
.unwrap();
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/bin/reth-bench/src/authenticated_transport.rs | bin/reth-bench/src/authenticated_transport.rs | //! This contains an authenticated rpc transport that can be used to send engine API newPayload
//! requests.
use std::sync::Arc;
use alloy_json_rpc::{RequestPacket, ResponsePacket};
use alloy_pubsub::{PubSubConnect, PubSubFrontend};
use alloy_rpc_types_engine::{Claims, JwtSecret};
use alloy_transport::{
utils::guess_local_url, Authorization, BoxTransport, TransportConnect, TransportError,
TransportErrorKind, TransportFut,
};
use alloy_transport_http::{reqwest::Url, Http, ReqwestTransport};
use alloy_transport_ipc::IpcConnect;
use alloy_transport_ws::WsConnect;
use futures::FutureExt;
use reqwest::header::HeaderValue;
use std::task::{Context, Poll};
use tokio::sync::RwLock;
use tower::Service;
/// An enum representing the different transports that can be used to connect to a runtime.
/// Only meant to be used internally by [`AuthenticatedTransport`].
#[derive(Clone, Debug)]
pub enum InnerTransport {
/// HTTP transport
Http(ReqwestTransport),
/// `WebSocket` transport
Ws(PubSubFrontend),
/// IPC transport
Ipc(PubSubFrontend),
}
impl InnerTransport {
/// Connects to a transport based on the given URL and JWT. Returns an [`InnerTransport`] and
/// the [`Claims`] generated from the jwt.
async fn connect(
url: Url,
jwt: JwtSecret,
) -> Result<(Self, Claims), AuthenticatedTransportError> {
match url.scheme() {
"http" | "https" => Self::connect_http(url, jwt),
"ws" | "wss" => Self::connect_ws(url, jwt).await,
"file" => Ok((Self::connect_ipc(url).await?, Claims::default())),
_ => Err(AuthenticatedTransportError::BadScheme(url.scheme().to_string())),
}
}
/// Connects to an HTTP [`alloy_transport_http::Http`] transport. Returns an [`InnerTransport`]
/// and the [Claims] generated from the jwt.
fn connect_http(
url: Url,
jwt: JwtSecret,
) -> Result<(Self, Claims), AuthenticatedTransportError> {
let mut client_builder =
reqwest::Client::builder().tls_built_in_root_certs(url.scheme() == "https");
let mut headers = reqwest::header::HeaderMap::new();
// Add the JWT to the headers if we can decode it.
let (auth, claims) =
build_auth(jwt).map_err(|e| AuthenticatedTransportError::InvalidJwt(e.to_string()))?;
let mut auth_value: HeaderValue =
HeaderValue::from_str(&auth.to_string()).expect("Header should be valid string");
auth_value.set_sensitive(true);
headers.insert(reqwest::header::AUTHORIZATION, auth_value);
client_builder = client_builder.default_headers(headers);
let client =
client_builder.build().map_err(AuthenticatedTransportError::HttpConstructionError)?;
let inner = Self::Http(Http::with_client(client, url));
Ok((inner, claims))
}
/// Connects to a `WebSocket` [`alloy_transport_ws::WsConnect`] transport. Returns an
/// [`InnerTransport`] and the [`Claims`] generated from the jwt.
async fn connect_ws(
url: Url,
jwt: JwtSecret,
) -> Result<(Self, Claims), AuthenticatedTransportError> {
// Add the JWT to the headers if we can decode it.
let (auth, claims) =
build_auth(jwt).map_err(|e| AuthenticatedTransportError::InvalidJwt(e.to_string()))?;
let inner = WsConnect::new(url.clone())
.with_auth(auth)
.into_service()
.await
.map(Self::Ws)
.map_err(|e| AuthenticatedTransportError::TransportError(e, url.to_string()))?;
Ok((inner, claims))
}
/// Connects to an IPC [`alloy_transport_ipc::IpcConnect`] transport. Returns an
/// [`InnerTransport`]. Does not return any [`Claims`] because IPC does not require them.
async fn connect_ipc(url: Url) -> Result<Self, AuthenticatedTransportError> {
// IPC, even for engine, typically does not require auth because it's local
IpcConnect::new(url.to_string())
.into_service()
.await
.map(InnerTransport::Ipc)
.map_err(|e| AuthenticatedTransportError::TransportError(e, url.to_string()))
}
}
/// An authenticated transport that can be used to send requests that contain a jwt bearer token.
#[derive(Debug, Clone)]
pub struct AuthenticatedTransport {
/// The inner actual transport used.
///
/// Also contains the current claims being used. This is used to determine whether or not we
/// should create another client.
inner_and_claims: Arc<RwLock<(InnerTransport, Claims)>>,
/// The current jwt is being used. This is so we can recreate claims.
jwt: JwtSecret,
/// The current URL is being used. This is so we can recreate the client if needed.
url: Url,
}
/// An error that can occur when creating an authenticated transport.
#[derive(Debug, thiserror::Error)]
pub enum AuthenticatedTransportError {
/// The URL is invalid.
#[error("The URL is invalid")]
InvalidUrl,
/// Failed to lock transport
#[error("Failed to lock transport")]
LockFailed,
/// The JWT is invalid.
#[error("The JWT is invalid: {0}")]
InvalidJwt(String),
/// The transport failed to connect.
#[error("The transport failed to connect to {1}, transport error: {0}")]
TransportError(TransportError, String),
/// The http client could not be built.
#[error("The http client could not be built")]
HttpConstructionError(reqwest::Error),
/// The scheme is invalid.
#[error("The URL scheme is invalid: {0}")]
BadScheme(String),
}
impl AuthenticatedTransport {
/// Create a new builder with the given URL.
pub async fn connect(url: Url, jwt: JwtSecret) -> Result<Self, AuthenticatedTransportError> {
let (inner, claims) = InnerTransport::connect(url.clone(), jwt).await?;
Ok(Self { inner_and_claims: Arc::new(RwLock::new((inner, claims))), jwt, url })
}
/// Sends a request using the underlying transport.
///
/// For sending the actual request, this action is delegated down to the underlying transport
/// through Tower's [`tower::Service::call`]. See tower's [`tower::Service`] trait for more
/// information.
fn request(&self, req: RequestPacket) -> TransportFut<'static> {
let this = self.clone();
Box::pin(async move {
let mut inner_and_claims = this.inner_and_claims.write().await;
// shift the iat forward by one second so there is some buffer time
let mut shifted_claims = inner_and_claims.1;
shifted_claims.iat -= 1;
// if the claims are out of date, reset the inner transport
if !shifted_claims.is_within_time_window() {
let (new_inner, new_claims) =
InnerTransport::connect(this.url.clone(), this.jwt).await.map_err(|e| {
TransportError::Transport(TransportErrorKind::Custom(Box::new(e)))
})?;
*inner_and_claims = (new_inner, new_claims);
}
match inner_and_claims.0 {
InnerTransport::Http(ref mut http) => http.call(req),
InnerTransport::Ws(ref mut ws) => ws.call(req),
InnerTransport::Ipc(ref mut ipc) => ipc.call(req),
}
.await
})
}
}
fn build_auth(secret: JwtSecret) -> eyre::Result<(Authorization, Claims)> {
// Generate claims (iat with current timestamp), this happens by default using the Default trait
// for Claims.
let claims = Claims::default();
let token = secret.encode(&claims)?;
let auth = Authorization::Bearer(token);
Ok((auth, claims))
}
/// This specifies how to connect to an authenticated transport.
#[derive(Clone, Debug)]
pub struct AuthenticatedTransportConnect {
/// The URL to connect to.
url: Url,
/// The JWT secret is used to authenticate the transport.
jwt: JwtSecret,
}
impl AuthenticatedTransportConnect {
/// Create a new builder with the given URL.
pub const fn new(url: Url, jwt: JwtSecret) -> Self {
Self { url, jwt }
}
}
impl TransportConnect for AuthenticatedTransportConnect {
fn is_local(&self) -> bool {
guess_local_url(&self.url)
}
async fn get_transport(&self) -> Result<BoxTransport, TransportError> {
Ok(BoxTransport::new(
AuthenticatedTransport::connect(self.url.clone(), self.jwt)
.map(|res| match res {
Ok(transport) => Ok(transport),
Err(err) => {
Err(TransportError::Transport(TransportErrorKind::Custom(Box::new(err))))
}
})
.await?,
))
}
}
impl tower::Service<RequestPacket> for AuthenticatedTransport {
type Response = ResponsePacket;
type Error = TransportError;
type Future = TransportFut<'static>;
fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, req: RequestPacket) -> Self::Future {
self.request(req)
}
}
impl tower::Service<RequestPacket> for &AuthenticatedTransport {
type Response = ResponsePacket;
type Error = TransportError;
type Future = TransportFut<'static>;
fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, req: RequestPacket) -> Self::Future {
self.request(req)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/bin/reth-bench/src/bench/new_payload_only.rs | bin/reth-bench/src/bench/new_payload_only.rs | //! Runs the `reth bench` command, sending only newPayload, without a forkchoiceUpdated call.
use crate::{
bench::{
context::BenchContext,
output::{
NewPayloadResult, TotalGasOutput, TotalGasRow, GAS_OUTPUT_SUFFIX,
NEW_PAYLOAD_OUTPUT_SUFFIX,
},
},
valid_payload::{block_to_new_payload, call_new_payload},
};
use alloy_provider::Provider;
use clap::Parser;
use csv::Writer;
use eyre::Context;
use reth_cli_runner::CliContext;
use reth_node_core::args::BenchmarkArgs;
use std::time::{Duration, Instant};
use tracing::{debug, info};
/// `reth benchmark new-payload-only` command
#[derive(Debug, Parser)]
pub struct Command {
/// The RPC url to use for getting data.
#[arg(long, value_name = "RPC_URL", verbatim_doc_comment)]
rpc_url: String,
#[command(flatten)]
benchmark: BenchmarkArgs,
}
impl Command {
/// Execute `benchmark new-payload-only` command
pub async fn execute(self, _ctx: CliContext) -> eyre::Result<()> {
let BenchContext {
benchmark_mode,
block_provider,
auth_provider,
mut next_block,
is_optimism,
} = BenchContext::new(&self.benchmark, self.rpc_url).await?;
let (sender, mut receiver) = tokio::sync::mpsc::channel(1000);
tokio::task::spawn(async move {
while benchmark_mode.contains(next_block) {
let block_res = block_provider
.get_block_by_number(next_block.into())
.full()
.await
.wrap_err_with(|| format!("Failed to fetch block by number {next_block}"));
let block = block_res.unwrap().unwrap();
let header = block.header.clone();
let (version, params) = block_to_new_payload(block, is_optimism).unwrap();
next_block += 1;
sender.send((header, version, params)).await.unwrap();
}
});
// put results in a summary vec so they can be printed at the end
let mut results = Vec::new();
let total_benchmark_duration = Instant::now();
let mut total_wait_time = Duration::ZERO;
while let Some((header, version, params)) = {
let wait_start = Instant::now();
let result = receiver.recv().await;
total_wait_time += wait_start.elapsed();
result
} {
// just put gas used here
let gas_used = header.gas_used;
let block_number = header.number;
debug!(
target: "reth-bench",
number=?header.number,
"Sending payload to engine",
);
let start = Instant::now();
call_new_payload(&auth_provider, version, params).await?;
let new_payload_result = NewPayloadResult { gas_used, latency: start.elapsed() };
info!(%new_payload_result);
// current duration since the start of the benchmark minus the time
// waiting for blocks
let current_duration = total_benchmark_duration.elapsed() - total_wait_time;
// record the current result
let row = TotalGasRow { block_number, gas_used, time: current_duration };
results.push((row, new_payload_result));
}
let (gas_output_results, new_payload_results): (_, Vec<NewPayloadResult>) =
results.into_iter().unzip();
// write the csv output to files
if let Some(path) = self.benchmark.output {
// first write the new payload results to a file
let output_path = path.join(NEW_PAYLOAD_OUTPUT_SUFFIX);
info!("Writing newPayload call latency output to file: {:?}", output_path);
let mut writer = Writer::from_path(output_path)?;
for result in new_payload_results {
writer.serialize(result)?;
}
writer.flush()?;
// now write the gas output to a file
let output_path = path.join(GAS_OUTPUT_SUFFIX);
info!("Writing total gas output to file: {:?}", output_path);
let mut writer = Writer::from_path(output_path)?;
for row in &gas_output_results {
writer.serialize(row)?;
}
writer.flush()?;
info!("Finished writing benchmark output files to {:?}.", path);
}
// accumulate the results and calculate the overall Ggas/s
let gas_output = TotalGasOutput::new(gas_output_results);
info!(
total_duration=?gas_output.total_duration,
total_gas_used=?gas_output.total_gas_used,
blocks_processed=?gas_output.blocks_processed,
"Total Ggas/s: {:.4}",
gas_output.total_gigagas_per_second()
);
Ok(())
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/bin/reth-bench/src/bench/mod.rs | bin/reth-bench/src/bench/mod.rs | //! `reth benchmark` command. Collection of various benchmarking routines.
use clap::{Parser, Subcommand};
use reth_cli_runner::CliContext;
use reth_node_core::args::LogArgs;
use reth_tracing::FileWorkerGuard;
mod context;
mod new_payload_fcu;
mod new_payload_only;
mod output;
mod send_payload;
/// `reth bench` command
#[derive(Debug, Parser)]
pub struct BenchmarkCommand {
#[command(subcommand)]
command: Subcommands,
#[command(flatten)]
logs: LogArgs,
}
/// `reth benchmark` subcommands
#[derive(Subcommand, Debug)]
pub enum Subcommands {
/// Benchmark which calls `newPayload`, then `forkchoiceUpdated`.
NewPayloadFcu(new_payload_fcu::Command),
/// Benchmark which only calls subsequent `newPayload` calls.
NewPayloadOnly(new_payload_only::Command),
/// Command for generating and sending an `engine_newPayload` request constructed from an RPC
/// block.
///
/// This command takes a JSON block input (either from a file or stdin) and generates
/// an execution payload that can be used with the `engine_newPayloadV*` API.
///
/// One powerful use case is pairing this command with the `cast block` command, for example:
///
/// `cast block latest --full --json | reth-bench send-payload --rpc-url localhost:5000
/// --jwt-secret $(cat ~/.local/share/reth/mainnet/jwt.hex)`
SendPayload(send_payload::Command),
}
impl BenchmarkCommand {
/// Execute `benchmark` command
pub async fn execute(self, ctx: CliContext) -> eyre::Result<()> {
// Initialize tracing
let _guard = self.init_tracing()?;
match self.command {
Subcommands::NewPayloadFcu(command) => command.execute(ctx).await,
Subcommands::NewPayloadOnly(command) => command.execute(ctx).await,
Subcommands::SendPayload(command) => command.execute(ctx).await,
}
}
/// Initializes tracing with the configured options.
///
/// If file logging is enabled, this function returns a guard that must be kept alive to ensure
/// that all logs are flushed to disk.
pub fn init_tracing(&self) -> eyre::Result<Option<FileWorkerGuard>> {
let guard = self.logs.init_tracing()?;
Ok(guard)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/bin/reth-bench/src/bench/new_payload_fcu.rs | bin/reth-bench/src/bench/new_payload_fcu.rs | //! Runs the `reth bench` command, calling first newPayload for each block, then calling
//! forkchoiceUpdated.
use crate::{
bench::{
context::BenchContext,
output::{
CombinedResult, NewPayloadResult, TotalGasOutput, TotalGasRow, COMBINED_OUTPUT_SUFFIX,
GAS_OUTPUT_SUFFIX,
},
},
valid_payload::{block_to_new_payload, call_forkchoice_updated, call_new_payload},
};
use alloy_provider::Provider;
use alloy_rpc_types_engine::ForkchoiceState;
use clap::Parser;
use csv::Writer;
use eyre::Context;
use humantime::parse_duration;
use reth_cli_runner::CliContext;
use reth_node_core::args::BenchmarkArgs;
use std::time::{Duration, Instant};
use tracing::{debug, info};
/// `reth benchmark new-payload-fcu` command
#[derive(Debug, Parser)]
pub struct Command {
/// The RPC url to use for getting data.
#[arg(long, value_name = "RPC_URL", verbatim_doc_comment)]
rpc_url: String,
/// How long to wait after a forkchoice update before sending the next payload.
#[arg(long, value_name = "WAIT_TIME", value_parser = parse_duration, verbatim_doc_comment)]
wait_time: Option<Duration>,
#[command(flatten)]
benchmark: BenchmarkArgs,
}
impl Command {
/// Execute `benchmark new-payload-fcu` command
pub async fn execute(self, _ctx: CliContext) -> eyre::Result<()> {
let BenchContext {
benchmark_mode,
block_provider,
auth_provider,
mut next_block,
is_optimism,
} = BenchContext::new(&self.benchmark, self.rpc_url).await?;
let (sender, mut receiver) = tokio::sync::mpsc::channel(1000);
tokio::task::spawn(async move {
while benchmark_mode.contains(next_block) {
let block_res = block_provider
.get_block_by_number(next_block.into())
.full()
.await
.wrap_err_with(|| format!("Failed to fetch block by number {next_block}"));
let block = block_res.unwrap().unwrap();
let header = block.header.clone();
let (version, params) = block_to_new_payload(block, is_optimism).unwrap();
let head_block_hash = header.hash;
let safe_block_hash =
block_provider.get_block_by_number(header.number.saturating_sub(32).into());
let finalized_block_hash =
block_provider.get_block_by_number(header.number.saturating_sub(64).into());
let (safe, finalized) = tokio::join!(safe_block_hash, finalized_block_hash,);
let safe_block_hash = safe.unwrap().expect("finalized block exists").header.hash;
let finalized_block_hash =
finalized.unwrap().expect("finalized block exists").header.hash;
next_block += 1;
sender
.send((
header,
version,
params,
head_block_hash,
safe_block_hash,
finalized_block_hash,
))
.await
.unwrap();
}
});
// put results in a summary vec so they can be printed at the end
let mut results = Vec::new();
let total_benchmark_duration = Instant::now();
let mut total_wait_time = Duration::ZERO;
while let Some((header, version, params, head, safe, finalized)) = {
let wait_start = Instant::now();
let result = receiver.recv().await;
total_wait_time += wait_start.elapsed();
result
} {
// just put gas used here
let gas_used = header.gas_used;
let block_number = header.number;
debug!(target: "reth-bench", ?block_number, "Sending payload",);
// construct fcu to call
let forkchoice_state = ForkchoiceState {
head_block_hash: head,
safe_block_hash: safe,
finalized_block_hash: finalized,
};
let start = Instant::now();
call_new_payload(&auth_provider, version, params).await?;
let new_payload_result = NewPayloadResult { gas_used, latency: start.elapsed() };
call_forkchoice_updated(&auth_provider, version, forkchoice_state, None).await?;
// calculate the total duration and the fcu latency, record
let total_latency = start.elapsed();
let fcu_latency = total_latency - new_payload_result.latency;
let combined_result =
CombinedResult { block_number, new_payload_result, fcu_latency, total_latency };
// current duration since the start of the benchmark minus the time
// waiting for blocks
let current_duration = total_benchmark_duration.elapsed() - total_wait_time;
// convert gas used to gigagas, then compute gigagas per second
info!(%combined_result);
// wait if we need to
if let Some(wait_time) = self.wait_time {
tokio::time::sleep(wait_time).await;
}
// record the current result
let gas_row = TotalGasRow { block_number, gas_used, time: current_duration };
results.push((gas_row, combined_result));
}
let (gas_output_results, combined_results): (_, Vec<CombinedResult>) =
results.into_iter().unzip();
// write the csv output to files
if let Some(path) = self.benchmark.output {
// first write the combined results to a file
let output_path = path.join(COMBINED_OUTPUT_SUFFIX);
info!("Writing engine api call latency output to file: {:?}", output_path);
let mut writer = Writer::from_path(output_path)?;
for result in combined_results {
writer.serialize(result)?;
}
writer.flush()?;
// now write the gas output to a file
let output_path = path.join(GAS_OUTPUT_SUFFIX);
info!("Writing total gas output to file: {:?}", output_path);
let mut writer = Writer::from_path(output_path)?;
for row in &gas_output_results {
writer.serialize(row)?;
}
writer.flush()?;
info!("Finished writing benchmark output files to {:?}.", path);
}
// accumulate the results and calculate the overall Ggas/s
let gas_output = TotalGasOutput::new(gas_output_results);
info!(
total_duration=?gas_output.total_duration,
total_gas_used=?gas_output.total_gas_used,
blocks_processed=?gas_output.blocks_processed,
"Total Ggas/s: {:.4}",
gas_output.total_gigagas_per_second()
);
Ok(())
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/bin/reth-bench/src/bench/output.rs | bin/reth-bench/src/bench/output.rs | //! Contains various benchmark output formats, either for logging or for
//! serialization to / from files.
use reth_primitives_traits::constants::GIGAGAS;
use serde::{ser::SerializeStruct, Serialize};
use std::time::Duration;
/// This is the suffix for gas output csv files.
pub(crate) const GAS_OUTPUT_SUFFIX: &str = "total_gas.csv";
/// This is the suffix for combined output csv files.
pub(crate) const COMBINED_OUTPUT_SUFFIX: &str = "combined_latency.csv";
/// This is the suffix for new payload output csv files.
pub(crate) const NEW_PAYLOAD_OUTPUT_SUFFIX: &str = "new_payload_latency.csv";
/// This represents the results of a single `newPayload` call in the benchmark, containing the gas
/// used and the `newPayload` latency.
#[derive(Debug)]
pub(crate) struct NewPayloadResult {
/// The gas used in the `newPayload` call.
pub(crate) gas_used: u64,
/// The latency of the `newPayload` call.
pub(crate) latency: Duration,
}
impl NewPayloadResult {
/// Returns the gas per second processed in the `newPayload` call.
pub(crate) fn gas_per_second(&self) -> f64 {
self.gas_used as f64 / self.latency.as_secs_f64()
}
}
impl std::fmt::Display for NewPayloadResult {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"New payload processed at {:.4} Ggas/s, used {} total gas. Latency: {:?}",
self.gas_per_second() / GIGAGAS as f64,
self.gas_used,
self.latency
)
}
}
/// This is another [`Serialize`] implementation for the [`NewPayloadResult`] struct, serializing
/// the duration as microseconds because the csv writer would fail otherwise.
impl Serialize for NewPayloadResult {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::ser::Serializer,
{
// convert the time to microseconds
let time = self.latency.as_micros();
let mut state = serializer.serialize_struct("NewPayloadResult", 2)?;
state.serialize_field("gas_used", &self.gas_used)?;
state.serialize_field("latency", &time)?;
state.end()
}
}
/// This represents the combined results of a `newPayload` call and a `forkchoiceUpdated` call in
/// the benchmark, containing the gas used, the `newPayload` latency, and the `forkchoiceUpdated`
/// latency.
#[derive(Debug)]
pub(crate) struct CombinedResult {
/// The block number of the block being processed.
pub(crate) block_number: u64,
/// The `newPayload` result.
pub(crate) new_payload_result: NewPayloadResult,
/// The latency of the `forkchoiceUpdated` call.
pub(crate) fcu_latency: Duration,
/// The latency of both calls combined.
pub(crate) total_latency: Duration,
}
impl CombinedResult {
/// Returns the gas per second, including the `newPayload` _and_ `forkchoiceUpdated` duration.
pub(crate) fn combined_gas_per_second(&self) -> f64 {
self.new_payload_result.gas_used as f64 / self.total_latency.as_secs_f64()
}
}
impl std::fmt::Display for CombinedResult {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"Payload {} processed at {:.4} Ggas/s, used {} total gas. Combined gas per second: {:.4} Ggas/s. fcu latency: {:?}, newPayload latency: {:?}",
self.block_number,
self.new_payload_result.gas_per_second() / GIGAGAS as f64,
self.new_payload_result.gas_used,
self.combined_gas_per_second() / GIGAGAS as f64,
self.fcu_latency,
self.new_payload_result.latency
)
}
}
/// This is a [`Serialize`] implementation for the [`CombinedResult`] struct, serializing the
/// durations as microseconds because the csv writer would fail otherwise.
impl Serialize for CombinedResult {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::ser::Serializer,
{
// convert the time to microseconds
let fcu_latency = self.fcu_latency.as_micros();
let new_payload_latency = self.new_payload_result.latency.as_micros();
let total_latency = self.total_latency.as_micros();
let mut state = serializer.serialize_struct("CombinedResult", 5)?;
// flatten the new payload result because this is meant for CSV writing
state.serialize_field("block_number", &self.block_number)?;
state.serialize_field("gas_used", &self.new_payload_result.gas_used)?;
state.serialize_field("new_payload_latency", &new_payload_latency)?;
state.serialize_field("fcu_latency", &fcu_latency)?;
state.serialize_field("total_latency", &total_latency)?;
state.end()
}
}
/// This represents a row of total gas data in the benchmark.
#[derive(Debug)]
pub(crate) struct TotalGasRow {
/// The block number of the block being processed.
pub(crate) block_number: u64,
/// The total gas used in the block.
pub(crate) gas_used: u64,
/// Time since the start of the benchmark.
pub(crate) time: Duration,
}
/// This represents the aggregated output, meant to show gas per second metrics, of a benchmark run.
#[derive(Debug)]
pub(crate) struct TotalGasOutput {
/// The total gas used in the benchmark.
pub(crate) total_gas_used: u64,
/// The total duration of the benchmark.
pub(crate) total_duration: Duration,
/// The total gas used per second.
pub(crate) total_gas_per_second: f64,
/// The number of blocks processed.
pub(crate) blocks_processed: u64,
}
impl TotalGasOutput {
/// Create a new [`TotalGasOutput`] from a list of [`TotalGasRow`].
pub(crate) fn new(rows: Vec<TotalGasRow>) -> Self {
// the duration is obtained from the last row
let total_duration =
rows.last().map(|row| row.time).expect("the row has at least one element");
let blocks_processed = rows.len() as u64;
let total_gas_used: u64 = rows.into_iter().map(|row| row.gas_used).sum();
let total_gas_per_second = total_gas_used as f64 / total_duration.as_secs_f64();
Self { total_gas_used, total_duration, total_gas_per_second, blocks_processed }
}
/// Return the total gigagas per second.
pub(crate) fn total_gigagas_per_second(&self) -> f64 {
self.total_gas_per_second / GIGAGAS as f64
}
}
/// This serializes the `time` field of the [`TotalGasRow`] to microseconds.
///
/// This is essentially just for the csv writer, which would have headers
impl Serialize for TotalGasRow {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::ser::Serializer,
{
// convert the time to microseconds
let time = self.time.as_micros();
let mut state = serializer.serialize_struct("TotalGasRow", 3)?;
state.serialize_field("block_number", &self.block_number)?;
state.serialize_field("gas_used", &self.gas_used)?;
state.serialize_field("time", &time)?;
state.end()
}
}
#[cfg(test)]
mod tests {
use super::*;
use csv::Writer;
use std::io::BufRead;
#[test]
fn test_write_total_gas_row_csv() {
let row = TotalGasRow { block_number: 1, gas_used: 1_000, time: Duration::from_secs(1) };
let mut writer = Writer::from_writer(vec![]);
writer.serialize(row).unwrap();
let result = writer.into_inner().unwrap();
// parse into Lines
let mut result = result.as_slice().lines();
// assert header
let expected_first_line = "block_number,gas_used,time";
let first_line = result.next().unwrap().unwrap();
assert_eq!(first_line, expected_first_line);
let expected_second_line = "1,1000,1000000";
let second_line = result.next().unwrap().unwrap();
assert_eq!(second_line, expected_second_line);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/bin/reth-bench/src/bench/context.rs | bin/reth-bench/src/bench/context.rs | //! This contains the [`BenchContext`], which is information that all replay-based benchmarks need.
//! The initialization code is also the same, so this can be shared across benchmark commands.
use crate::{authenticated_transport::AuthenticatedTransportConnect, bench_mode::BenchMode};
use alloy_eips::BlockNumberOrTag;
use alloy_primitives::address;
use alloy_provider::{network::AnyNetwork, Provider, RootProvider};
use alloy_rpc_client::ClientBuilder;
use alloy_rpc_types_engine::JwtSecret;
use reqwest::Url;
use reth_node_core::args::BenchmarkArgs;
use tracing::info;
/// This is intended to be used by benchmarks that replay blocks from an RPC.
///
/// It contains an authenticated provider for engine API queries, a block provider for block
/// queries, a [`BenchMode`] to determine whether the benchmark should run for a closed or open
/// range of blocks, and the next block to fetch.
pub(crate) struct BenchContext {
/// The auth provider is used for engine API queries.
pub(crate) auth_provider: RootProvider<AnyNetwork>,
/// The block provider is used for block queries.
pub(crate) block_provider: RootProvider<AnyNetwork>,
/// The benchmark mode, which defines whether the benchmark should run for a closed or open
/// range of blocks.
pub(crate) benchmark_mode: BenchMode,
/// The next block to fetch.
pub(crate) next_block: u64,
/// Whether the chain is an OP rollup.
pub(crate) is_optimism: bool,
}
impl BenchContext {
/// This is the initialization code for most benchmarks, taking in a [`BenchmarkArgs`] and
/// returning the providers needed to run a benchmark.
pub(crate) async fn new(bench_args: &BenchmarkArgs, rpc_url: String) -> eyre::Result<Self> {
info!("Running benchmark using data from RPC URL: {}", rpc_url);
// Ensure that output directory exists and is a directory
if let Some(output) = &bench_args.output {
if output.is_file() {
return Err(eyre::eyre!("Output path must be a directory"));
}
// Create the directory if it doesn't exist
if !output.exists() {
std::fs::create_dir_all(output)?;
info!("Created output directory: {:?}", output);
}
}
// set up alloy client for blocks
let client = ClientBuilder::default().http(rpc_url.parse()?);
let block_provider = RootProvider::<AnyNetwork>::new(client);
// Check if this is an OP chain by checking code at a predeploy address.
let is_optimism = !block_provider
.get_code_at(address!("0x420000000000000000000000000000000000000F"))
.await?
.is_empty();
// construct the authenticated provider
let auth_jwt = bench_args
.auth_jwtsecret
.clone()
.ok_or_else(|| eyre::eyre!("--jwt-secret must be provided for authenticated RPC"))?;
// fetch jwt from file
//
// the jwt is hex encoded so we will decode it after
let jwt = std::fs::read_to_string(auth_jwt)?;
let jwt = JwtSecret::from_hex(jwt)?;
// get engine url
let auth_url = Url::parse(&bench_args.engine_rpc_url)?;
// construct the authed transport
info!("Connecting to Engine RPC at {} for replay", auth_url);
let auth_transport = AuthenticatedTransportConnect::new(auth_url, jwt);
let client = ClientBuilder::default().connect_with(auth_transport).await?;
let auth_provider = RootProvider::<AnyNetwork>::new(client);
// Computes the block range for the benchmark.
//
// - If `--advance` is provided, fetches the latest block and sets:
// - `from = head + 1`
// - `to = head + advance`
// - Otherwise, uses the values from `--from` and `--to`.
let (from, to) = if let Some(advance) = bench_args.advance {
if advance == 0 {
return Err(eyre::eyre!("--advance must be greater than 0"));
}
let head_block = auth_provider
.get_block_by_number(BlockNumberOrTag::Latest)
.await?
.ok_or_else(|| eyre::eyre!("Failed to fetch latest block for --advance"))?;
let head_number = head_block.header.number;
(Some(head_number), Some(head_number + advance))
} else {
(bench_args.from, bench_args.to)
};
// If neither `--from` nor `--to` are provided, we will run the benchmark continuously,
// starting at the latest block.
let mut benchmark_mode = BenchMode::new(from, to)?;
let first_block = match benchmark_mode {
BenchMode::Continuous => {
// fetch Latest block
block_provider.get_block_by_number(BlockNumberOrTag::Latest).full().await?.unwrap()
}
BenchMode::Range(ref mut range) => {
match range.next() {
Some(block_number) => {
// fetch first block in range
block_provider
.get_block_by_number(block_number.into())
.full()
.await?
.unwrap()
}
None => {
return Err(eyre::eyre!(
"Benchmark mode range is empty, please provide a larger range"
));
}
}
}
};
let next_block = first_block.header.number + 1;
Ok(Self { auth_provider, block_provider, benchmark_mode, next_block, is_optimism })
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/bin/reth-bench/src/bench/send_payload.rs | bin/reth-bench/src/bench/send_payload.rs | use alloy_provider::network::AnyRpcBlock;
use alloy_rpc_types_engine::ExecutionPayload;
use clap::Parser;
use eyre::{OptionExt, Result};
use op_alloy_consensus::OpTxEnvelope;
use reth_cli_runner::CliContext;
use std::io::{BufReader, Read, Write};
/// Command for generating and sending an `engine_newPayload` request constructed from an RPC
/// block.
#[derive(Debug, Parser)]
pub struct Command {
/// Path to the json file to parse. If not specified, stdin will be used.
#[arg(short, long)]
path: Option<String>,
/// The engine RPC url to use.
#[arg(
short,
long,
// Required if `mode` is `execute` or `cast`.
required_if_eq_any([("mode", "execute"), ("mode", "cast")]),
// If `mode` is not specified, then `execute` is used, so we need to require it.
required_unless_present("mode")
)]
rpc_url: Option<String>,
/// The JWT secret to use. Can be either a path to a file containing the secret or the secret
/// itself.
#[arg(short, long)]
jwt_secret: Option<String>,
#[arg(long, default_value_t = 3)]
new_payload_version: u8,
/// The mode to use.
#[arg(long, value_enum, default_value = "execute")]
mode: Mode,
}
#[derive(Debug, Clone, clap::ValueEnum)]
enum Mode {
/// Execute the `cast` command. This works with blocks of any size, because it pipes the
/// payload into the `cast` command.
Execute,
/// Print the `cast` command. Caution: this may not work with large blocks because of the
/// command length limit.
Cast,
/// Print the JSON payload. Can be piped into `cast` command if the block is small enough.
Json,
}
impl Command {
/// Read input from either a file or stdin
fn read_input(&self) -> Result<String> {
Ok(match &self.path {
Some(path) => reth_fs_util::read_to_string(path)?,
None => String::from_utf8(
BufReader::new(std::io::stdin()).bytes().collect::<Result<Vec<_>, _>>()?,
)?,
})
}
/// Load JWT secret from either a file or use the provided string directly
fn load_jwt_secret(&self) -> Result<Option<String>> {
match &self.jwt_secret {
Some(secret) => {
// Try to read as file first
match std::fs::read_to_string(secret) {
Ok(contents) => Ok(Some(contents.trim().to_string())),
// If file read fails, use the string directly
Err(_) => Ok(Some(secret.clone())),
}
}
None => Ok(None),
}
}
/// Execute the generate payload command
pub async fn execute(self, _ctx: CliContext) -> Result<()> {
// Load block
let block_json = self.read_input()?;
// Load JWT secret
let jwt_secret = self.load_jwt_secret()?;
// Parse the block
let block = serde_json::from_str::<AnyRpcBlock>(&block_json)?
.into_inner()
.map_header(|header| header.map(|h| h.into_header_with_defaults()))
.try_map_transactions(|tx| {
// try to convert unknowns into op type so that we can also support optimism
tx.try_into_either::<OpTxEnvelope>()
})?
.into_consensus();
// Extract parent beacon block root
let parent_beacon_block_root = block.header.parent_beacon_block_root;
// Extract blob versioned hashes
let blob_versioned_hashes =
block.body.blob_versioned_hashes_iter().copied().collect::<Vec<_>>();
// Convert to execution payload
let execution_payload = ExecutionPayload::from_block_slow(&block).0;
let use_v4 = block.header.requests_hash.is_some();
// Create JSON request data
let json_request = if use_v4 {
serde_json::to_string(&(
execution_payload,
blob_versioned_hashes,
parent_beacon_block_root,
block.header.requests_hash.unwrap_or_default(),
))?
} else {
serde_json::to_string(&(
execution_payload,
blob_versioned_hashes,
parent_beacon_block_root,
))?
};
// Print output or execute command
match self.mode {
Mode::Execute => {
// Create cast command
let mut command = std::process::Command::new("cast");
let method = if use_v4 { "engine_newPayloadV4" } else { "engine_newPayloadV3" };
command.arg("rpc").arg(method).arg("--raw");
if let Some(rpc_url) = self.rpc_url {
command.arg("--rpc-url").arg(rpc_url);
}
if let Some(secret) = &jwt_secret {
command.arg("--jwt-secret").arg(secret);
}
// Start cast process
let mut process = command.stdin(std::process::Stdio::piped()).spawn()?;
// Write to cast's stdin
process
.stdin
.take()
.ok_or_eyre("stdin not available")?
.write_all(json_request.as_bytes())?;
// Wait for cast to finish
process.wait()?;
}
Mode::Cast => {
let mut cmd = format!(
"cast rpc engine_newPayloadV{} --raw '{}'",
self.new_payload_version, json_request
);
if let Some(rpc_url) = self.rpc_url {
cmd += &format!(" --rpc-url {rpc_url}");
}
if let Some(secret) = &jwt_secret {
cmd += &format!(" --jwt-secret {secret}");
}
println!("{cmd}");
}
Mode::Json => {
println!("{json_request}");
}
}
Ok(())
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/bin/genesis-builder/src/main.rs | bin/genesis-builder/src/main.rs | //! Genesis builder CLI tool for adding contracts to genesis files
use clap::Parser;
use reth_genesis_builder::{builder::GenesisBuilder, error::BuilderError, genesis, manifest};
use std::path::PathBuf;
use tracing::info;
use tracing_subscriber::{fmt, prelude::*, EnvFilter};
/// Command line arguments
#[derive(Parser)]
#[command(name = "genesis-builder")]
#[command(version)]
#[command(about = "Build genesis files with contracts from GitHub", long_about = None)]
struct Args {
/// Path to genesis manifest TOML file
#[arg(
long,
value_name = "FILE",
default_value = "crates/seismic/chainspec/res/genesis/manifest.toml"
)]
manifest: PathBuf,
/// Path to genesis JSON file to modify
#[arg(
long,
value_name = "FILE",
default_value = "crates/seismic/chainspec/res/genesis/dev.json"
)]
genesis: PathBuf,
/// Optional output path (defaults to modifying input genesis file in-place)
#[arg(long, value_name = "FILE")]
output: Option<PathBuf>,
/// say "yes" to every overwrite question
#[arg(short = 'y', long)]
yes_overwrite: bool,
}
/// Main function for building genesis files
fn main() -> Result<(), BuilderError> {
// Initialize tracing subscriber to read RUST_LOG env variable
tracing_subscriber::registry().with(fmt::layer()).with(EnvFilter::from_default_env()).init();
let args = Args::parse();
info!("Loading manifest: {}", args.manifest.display());
let manifest_data = manifest::load_manifest(&args.manifest)?;
info!("Found {} contracts to deploy", manifest_data.contracts.len());
info!("Loading genesis: {}", args.genesis.display());
let genesis_data = genesis::load_genesis(&args.genesis)?;
info!(" Current allocations: {}", genesis_data.alloc.len());
let builder = GenesisBuilder::new(manifest_data, genesis_data, args.yes_overwrite)?;
let updated_genesis = builder.build()?;
let output_path = args.output.unwrap_or(args.genesis.clone());
info!("Writing genesis: {}", output_path.display());
genesis::write_genesis(&updated_genesis, &output_path)?;
info!("Genesis build complete!");
info!(" Total allocations: {}", updated_genesis.alloc.len());
Ok(())
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/evm/evm/src/engine.rs | crates/evm/evm/src/engine.rs | use crate::{execute::ExecutableTxFor, ConfigureEvm, EvmEnvFor, ExecutionCtxFor};
/// [`ConfigureEvm`] extension providing methods for executing payloads.
pub trait ConfigureEngineEvm<ExecutionData>: ConfigureEvm {
/// Returns an [`EvmEnvFor`] for the given payload.
fn evm_env_for_payload(&self, payload: &ExecutionData) -> EvmEnvFor<Self>;
/// Returns an [`ExecutionCtxFor`] for the given payload.
fn context_for_payload<'a>(&self, payload: &'a ExecutionData) -> ExecutionCtxFor<'a, Self>;
/// Returns an [`ExecutableTxIterator`] for the given payload.
fn tx_iterator_for_payload(&self, payload: &ExecutionData) -> impl ExecutableTxIterator<Self>;
}
/// Iterator over executable transactions.
pub trait ExecutableTxIterator<Evm: ConfigureEvm>:
Iterator<Item = Result<Self::Tx, Self::Error>> + Send + 'static
{
/// The executable transaction type iterator yields.
type Tx: ExecutableTxFor<Evm> + Clone + Send + 'static;
/// Errors that may occur while recovering or decoding transactions.
type Error: core::error::Error + Send + Sync + 'static;
}
impl<Evm: ConfigureEvm, Tx, Err, T> ExecutableTxIterator<Evm> for T
where
Tx: ExecutableTxFor<Evm> + Clone + Send + 'static,
Err: core::error::Error + Send + Sync + 'static,
T: Iterator<Item = Result<Tx, Err>> + Send + 'static,
{
type Tx = Tx;
type Error = Err;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/evm/evm/src/lib.rs | crates/evm/evm/src/lib.rs | //! Traits for configuring an EVM specifics.
//!
//! # Revm features
//!
//! This crate does __not__ enforce specific revm features such as `blst` or `c-kzg`, which are
//! critical for revm's evm internals, it is the responsibility of the implementer to ensure the
//! proper features are selected.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![cfg_attr(not(feature = "std"), no_std)]
extern crate alloc;
use crate::execute::{BasicBlockBuilder, Executor};
use alloc::vec::Vec;
use alloy_eips::{
eip2718::{EIP2930_TX_TYPE_ID, LEGACY_TX_TYPE_ID},
eip2930::AccessList,
eip4895::Withdrawals,
};
use alloy_evm::block::{BlockExecutorFactory, BlockExecutorFor};
use alloy_primitives::{Address, B256};
use core::{error::Error, fmt::Debug};
use execute::{BasicBlockExecutor, BlockAssembler, BlockBuilder};
use reth_execution_errors::BlockExecutionError;
use reth_primitives_traits::{
BlockTy, HeaderTy, NodePrimitives, ReceiptTy, SealedBlock, SealedHeader, TxTy,
};
use revm::{context::TxEnv, database::State};
pub mod either;
/// EVM environment configuration.
pub mod execute;
mod aliases;
pub use aliases::*;
mod engine;
pub use engine::{ConfigureEngineEvm, ExecutableTxIterator};
#[cfg(feature = "metrics")]
pub mod metrics;
pub mod noop;
#[cfg(any(test, feature = "test-utils"))]
/// test helpers for mocking executor
pub mod test_utils;
pub use alloy_evm::{
block::{state_changes, system_calls, OnStateHook},
*,
};
pub use alloy_evm::block::state_changes as state_change;
/// A complete configuration of EVM for Reth.
///
/// This trait encapsulates complete configuration required for transaction execution and block
/// execution/building, providing a unified interface for EVM operations.
///
/// # Architecture Overview
///
/// The EVM abstraction consists of the following layers:
///
/// 1. **[`Evm`] (produced by [`EvmFactory`])**: The core EVM implementation responsible for
/// executing individual transactions and producing outputs including state changes, logs, gas
/// usage, etc.
///
/// 2. **[`BlockExecutor`] (produced by [`BlockExecutorFactory`])**: A higher-level component that
/// operates on top of [`Evm`] to execute entire blocks. This involves:
/// - Executing all transactions in sequence
/// - Building receipts from transaction outputs
/// - Applying block rewards to the beneficiary
/// - Executing system calls (e.g., EIP-4788 beacon root updates)
/// - Managing state changes and bundle accumulation
///
/// 3. **[`BlockAssembler`]**: Responsible for assembling valid blocks from executed transactions.
/// It takes the output from [`BlockExecutor`] along with execution context and produces a
/// complete block ready for inclusion in the chain.
///
/// # Usage Patterns
///
/// The abstraction supports two primary use cases:
///
/// ## 1. Executing Externally Provided Blocks (e.g., during sync)
///
/// ```rust,ignore
/// use reth_evm::ConfigureEvm;
///
/// // Execute a received block
/// let mut executor = evm_config.executor(state_db);
/// let output = executor.execute(&block)?;
///
/// // Access the execution results
/// println!("Gas used: {}", output.result.gas_used);
/// println!("Receipts: {:?}", output.result.receipts);
/// ```
///
/// ## 2. Building New Blocks (e.g., payload building)
///
/// Payload building is slightly different as it doesn't have the block's header yet, but rather
/// attributes for the block's environment, such as timestamp, fee recipient, and randomness value.
/// The block's header will be the outcome of the block building process.
///
/// ```rust,ignore
/// use reth_evm::{ConfigureEvm, NextBlockEnvAttributes};
///
/// // Create attributes for the next block
/// let attributes = NextBlockEnvAttributes {
/// timestamp: current_time + 12,
/// suggested_fee_recipient: beneficiary_address,
/// prev_randao: randomness_value,
/// gas_limit: 30_000_000,
/// withdrawals: Some(withdrawals),
/// parent_beacon_block_root: Some(beacon_root),
/// };
///
/// // Build a new block on top of parent
/// let mut builder = evm_config.builder_for_next_block(
/// &mut state_db,
/// &parent_header,
/// attributes
/// )?;
///
/// // Apply pre-execution changes (e.g., beacon root update)
/// builder.apply_pre_execution_changes()?;
///
/// // Execute transactions
/// for tx in pending_transactions {
/// match builder.execute_transaction(tx) {
/// Ok(gas_used) => {
/// println!("Transaction executed, gas used: {}", gas_used);
/// }
/// Err(e) => {
/// println!("Transaction failed: {:?}", e);
/// }
/// }
/// }
///
/// // Finish block building and get the outcome (block)
/// let outcome = builder.finish(state_provider)?;
/// let block = outcome.block;
/// ```
///
/// # Key Components
///
/// ## [`NextBlockEnvCtx`]
///
/// Contains attributes needed to configure the next block that cannot be derived from the
/// parent block alone. This includes data typically provided by the consensus layer:
/// - `timestamp`: Block timestamp
/// - `suggested_fee_recipient`: Beneficiary address
/// - `prev_randao`: Randomness value
/// - `gas_limit`: Block gas limit
/// - `withdrawals`: Consensus layer withdrawals
/// - `parent_beacon_block_root`: EIP-4788 beacon root
///
/// ## [`BlockAssembler`]
///
/// Takes the execution output and produces a complete block. It receives:
/// - Transaction execution results (receipts, gas used)
/// - Final state root after all executions
/// - Bundle state with all changes
/// - Execution context and environment
///
/// The assembler is responsible for:
/// - Setting the correct block header fields
/// - Including executed transactions
/// - Setting gas used and receipts root
/// - Applying any chain-specific rules
///
/// [`ExecutionCtx`]: BlockExecutorFactory::ExecutionCtx
/// [`NextBlockEnvCtx`]: ConfigureEvm::NextBlockEnvCtx
/// [`BlockExecutor`]: alloy_evm::block::BlockExecutor
#[auto_impl::auto_impl(&, Arc)]
pub trait ConfigureEvm: Clone + Debug + Send + Sync + Unpin {
/// The primitives type used by the EVM.
type Primitives: NodePrimitives;
/// The error type that is returned by [`Self::next_evm_env`].
type Error: Error + Send + Sync + 'static;
/// Context required for configuring next block environment.
///
/// Contains values that can't be derived from the parent block.
type NextBlockEnvCtx: Debug + Clone;
/// Configured [`BlockExecutorFactory`], contains [`EvmFactory`] internally.
type BlockExecutorFactory: for<'a> BlockExecutorFactory<
Transaction = TxTy<Self::Primitives>,
Receipt = ReceiptTy<Self::Primitives>,
ExecutionCtx<'a>: Debug + Send,
EvmFactory: EvmFactory<
Tx: TransactionEnv
+ FromRecoveredTx<TxTy<Self::Primitives>>
+ FromTxWithEncoded<TxTy<Self::Primitives>>,
// Precompiles<EmptyDBTyped<Infallible>> = PrecompilesMap,
>,
>;
/// A type that knows how to build a block.
type BlockAssembler: BlockAssembler<
Self::BlockExecutorFactory,
Block = BlockTy<Self::Primitives>,
>;
/// Returns reference to the configured [`BlockExecutorFactory`].
fn block_executor_factory(&self) -> &Self::BlockExecutorFactory;
/// Returns reference to the configured [`BlockAssembler`].
fn block_assembler(&self) -> &Self::BlockAssembler;
/// Creates a new [`EvmEnv`] for the given header.
fn evm_env(&self, header: &HeaderTy<Self::Primitives>) -> EvmEnvFor<Self>;
/// Returns the configured [`EvmEnv`] for `parent + 1` block.
///
/// This is intended for usage in block building after the merge and requires additional
/// attributes that can't be derived from the parent block: attributes that are determined by
/// the CL, such as the timestamp, suggested fee recipient, and randomness value.
///
/// # Example
///
/// ```rust,ignore
/// let evm_env = evm_config.next_evm_env(&parent_header, &attributes)?;
/// // evm_env now contains:
/// // - Correct spec ID based on timestamp and block number
/// // - Block environment with next block's parameters
/// // - Configuration like chain ID and blob parameters
/// ```
fn next_evm_env(
&self,
parent: &HeaderTy<Self::Primitives>,
attributes: &Self::NextBlockEnvCtx,
) -> Result<EvmEnvFor<Self>, Self::Error>;
/// Returns the configured [`BlockExecutorFactory::ExecutionCtx`] for a given block.
fn context_for_block<'a>(
&self,
block: &'a SealedBlock<BlockTy<Self::Primitives>>,
) -> ExecutionCtxFor<'a, Self>;
/// Returns the configured [`BlockExecutorFactory::ExecutionCtx`] for `parent + 1`
/// block.
fn context_for_next_block(
&self,
parent: &SealedHeader<HeaderTy<Self::Primitives>>,
attributes: Self::NextBlockEnvCtx,
) -> ExecutionCtxFor<'_, Self>;
/// Returns a [`TxEnv`] from a transaction and [`Address`].
fn tx_env(&self, transaction: impl IntoTxEnv<TxEnvFor<Self>>) -> TxEnvFor<Self> {
transaction.into_tx_env()
}
/// Provides a reference to [`EvmFactory`] implementation.
fn evm_factory(&self) -> &EvmFactoryFor<Self> {
self.block_executor_factory().evm_factory()
}
/// Returns a new EVM with the given database configured with the given environment settings,
/// including the spec id and transaction environment.
///
/// This will preserve any handler modifications
fn evm_with_env<DB: Database>(&self, db: DB, evm_env: EvmEnvFor<Self>) -> EvmFor<Self, DB> {
self.evm_factory().create_evm(db, evm_env)
}
/// Returns a new EVM with the given database configured with `cfg` and `block_env`
/// configuration derived from the given header. Relies on
/// [`ConfigureEvm::evm_env`].
///
/// # Caution
///
/// This does not initialize the tx environment.
fn evm_for_block<DB: Database>(
&self,
db: DB,
header: &HeaderTy<Self::Primitives>,
) -> EvmFor<Self, DB> {
let evm_env = self.evm_env(header);
self.evm_with_env(db, evm_env)
}
/// Returns a new EVM with the given database configured with the given environment settings,
/// including the spec id.
///
/// This will use the given external inspector as the EVM external context.
///
/// This will preserve any handler modifications
fn evm_with_env_and_inspector<DB, I>(
&self,
db: DB,
evm_env: EvmEnvFor<Self>,
inspector: I,
) -> EvmFor<Self, DB, I>
where
DB: Database,
I: InspectorFor<Self, DB>,
{
self.evm_factory().create_evm_with_inspector(db, evm_env, inspector)
}
/// Creates a strategy with given EVM and execution context.
fn create_executor<'a, DB, I>(
&'a self,
evm: EvmFor<Self, &'a mut State<DB>, I>,
ctx: <Self::BlockExecutorFactory as BlockExecutorFactory>::ExecutionCtx<'a>,
) -> impl BlockExecutorFor<'a, Self::BlockExecutorFactory, DB, I>
where
DB: Database,
I: InspectorFor<Self, &'a mut State<DB>> + 'a,
{
self.block_executor_factory().create_executor(evm, ctx)
}
/// Creates a strategy for execution of a given block.
fn executor_for_block<'a, DB: Database>(
&'a self,
db: &'a mut State<DB>,
block: &'a SealedBlock<<Self::Primitives as NodePrimitives>::Block>,
) -> impl BlockExecutorFor<'a, Self::BlockExecutorFactory, DB> {
let evm = self.evm_for_block(db, block.header());
let ctx = self.context_for_block(block);
self.create_executor(evm, ctx)
}
/// Creates a [`BlockBuilder`]. Should be used when building a new block.
///
/// Block builder wraps an inner [`alloy_evm::block::BlockExecutor`] and has a similar
/// interface. Builder collects all of the executed transactions, and once
/// [`BlockBuilder::finish`] is called, it invokes the configured [`BlockAssembler`] to
/// create a block.
///
/// # Example
///
/// ```rust,ignore
/// // Create a builder with specific EVM configuration
/// let evm = evm_config.evm_with_env(&mut state_db, evm_env);
/// let ctx = evm_config.context_for_next_block(&parent, attributes);
/// let builder = evm_config.create_block_builder(evm, &parent, ctx);
/// ```
fn create_block_builder<'a, DB, I>(
&'a self,
evm: EvmFor<Self, &'a mut State<DB>, I>,
parent: &'a SealedHeader<HeaderTy<Self::Primitives>>,
ctx: <Self::BlockExecutorFactory as BlockExecutorFactory>::ExecutionCtx<'a>,
) -> impl BlockBuilder<
Primitives = Self::Primitives,
Executor: BlockExecutorFor<'a, Self::BlockExecutorFactory, DB, I>,
>
where
DB: Database,
I: InspectorFor<Self, &'a mut State<DB>> + 'a,
{
BasicBlockBuilder {
executor: self.create_executor(evm, ctx.clone()),
ctx,
assembler: self.block_assembler(),
parent,
transactions: Vec::new(),
}
}
/// Creates a [`BlockBuilder`] for building of a new block. This is a helper to invoke
/// [`ConfigureEvm::create_block_builder`].
///
/// This is the primary method for building new blocks. It combines:
/// 1. Creating the EVM environment for the next block
/// 2. Setting up the execution context from attributes
/// 3. Initializing the block builder with proper configuration
///
/// # Example
///
/// ```rust,ignore
/// // Build a block with specific attributes
/// let mut builder = evm_config.builder_for_next_block(
/// &mut state_db,
/// &parent_header,
/// attributes
/// )?;
///
/// // Execute system calls (e.g., beacon root update)
/// builder.apply_pre_execution_changes()?;
///
/// // Execute transactions
/// for tx in transactions {
/// builder.execute_transaction(tx)?;
/// }
///
/// // Complete block building
/// let outcome = builder.finish(state_provider)?;
/// ```
fn builder_for_next_block<'a, DB: Database>(
&'a self,
db: &'a mut State<DB>,
parent: &'a SealedHeader<<Self::Primitives as NodePrimitives>::BlockHeader>,
attributes: Self::NextBlockEnvCtx,
) -> Result<impl BlockBuilder<Primitives = Self::Primitives>, Self::Error> {
let evm_env = self.next_evm_env(parent, &attributes)?;
let evm = self.evm_with_env(db, evm_env);
let ctx = self.context_for_next_block(parent, attributes);
Ok(self.create_block_builder(evm, parent, ctx))
}
/// Returns a new [`Executor`] for executing blocks.
///
/// The executor processes complete blocks including:
/// - All transactions in order
/// - Block rewards and fees
/// - Block level system calls
/// - State transitions
///
/// # Example
///
/// ```rust,ignore
/// // Create an executor
/// let mut executor = evm_config.executor(state_db);
///
/// // Execute a single block
/// let output = executor.execute(&block)?;
///
/// // Execute multiple blocks
/// let batch_output = executor.execute_batch(&blocks)?;
/// ```
#[auto_impl(keep_default_for(&, Arc))]
fn executor<DB: Database>(
&self,
db: DB,
) -> impl Executor<DB, Primitives = Self::Primitives, Error = BlockExecutionError> {
BasicBlockExecutor::new(self, db)
}
/// Returns a new [`BasicBlockExecutor`].
#[auto_impl(keep_default_for(&, Arc))]
fn batch_executor<DB: Database>(
&self,
db: DB,
) -> impl Executor<DB, Primitives = Self::Primitives, Error = BlockExecutionError> {
BasicBlockExecutor::new(self, db)
}
}
/// Represents additional attributes required to configure the next block.
///
/// This struct contains all the information needed to build a new block that cannot be
/// derived from the parent block header alone. These attributes are typically provided
/// by the consensus layer (CL) through the Engine API during payload building.
///
/// # Relationship with [`ConfigureEvm`] and [`BlockAssembler`]
///
/// The flow for building a new block involves:
///
/// 1. **Receive attributes** from the consensus layer containing:
/// - Timestamp for the new block
/// - Fee recipient (coinbase/beneficiary)
/// - Randomness value (prevRandao)
/// - Withdrawals to process
/// - Parent beacon block root for EIP-4788
///
/// 2. **Configure EVM environment** using these attributes: ```rust,ignore let evm_env =
/// evm_config.next_evm_env(&parent, &attributes)?; ```
///
/// 3. **Build the block** with transactions: ```rust,ignore let mut builder =
/// evm_config.builder_for_next_block( &mut state, &parent, attributes )?; ```
///
/// 4. **Assemble the final block** using [`BlockAssembler`] which takes:
/// - Execution results from all transactions
/// - The attributes used during execution
/// - Final state root after all changes
///
/// This design cleanly separates:
/// - **Configuration** (what parameters to use) - handled by `NextBlockEnvAttributes`
/// - **Execution** (running transactions) - handled by `BlockExecutor`
/// - **Assembly** (creating the final block) - handled by `BlockAssembler`
#[derive(Debug, Clone, PartialEq, Eq)]
// NOTE: this is created in crates/payload/basic/src/lib.rs
// => timestamp should be in milliseconds
pub struct NextBlockEnvAttributes {
/// The timestamp of the next block.
pub timestamp: u64,
/// The suggested fee recipient for the next block.
pub suggested_fee_recipient: Address,
/// The randomness value for the next block.
pub prev_randao: B256,
/// Block gas limit.
pub gas_limit: u64,
/// The parent beacon block root.
pub parent_beacon_block_root: Option<B256>,
/// Withdrawals
pub withdrawals: Option<Withdrawals>,
}
impl NextBlockEnvAttributes {
/// Returns the timestamp in seconds, assuming the timestamp is in milliseconds.
pub fn timestamp_seconds(&self) -> u64 {
if cfg!(feature = "timestamp-in-seconds") {
self.timestamp
} else {
self.timestamp / 1000
}
}
}
/// Abstraction over transaction environment.
pub trait TransactionEnv:
revm::context_interface::Transaction + Debug + Clone + Send + Sync + 'static
{
/// Set the gas limit.
fn set_gas_limit(&mut self, gas_limit: u64);
/// Set the gas limit.
fn with_gas_limit(mut self, gas_limit: u64) -> Self {
self.set_gas_limit(gas_limit);
self
}
/// Returns the configured nonce.
fn nonce(&self) -> u64;
/// Sets the nonce.
fn set_nonce(&mut self, nonce: u64);
/// Sets the nonce.
fn with_nonce(mut self, nonce: u64) -> Self {
self.set_nonce(nonce);
self
}
/// Set access list.
fn set_access_list(&mut self, access_list: AccessList);
/// Set access list.
fn with_access_list(mut self, access_list: AccessList) -> Self {
self.set_access_list(access_list);
self
}
}
impl TransactionEnv for TxEnv {
fn set_gas_limit(&mut self, gas_limit: u64) {
self.gas_limit = gas_limit;
}
fn nonce(&self) -> u64 {
self.nonce
}
fn set_nonce(&mut self, nonce: u64) {
self.nonce = nonce;
}
fn set_access_list(&mut self, access_list: AccessList) {
self.access_list = access_list;
if self.tx_type == LEGACY_TX_TYPE_ID {
// if this was previously marked as legacy tx, this must be upgraded to eip2930 with an
// accesslist
self.tx_type = EIP2930_TX_TYPE_ID;
}
}
}
#[cfg(feature = "op")]
impl<T: TransactionEnv> TransactionEnv for op_revm::OpTransaction<T> {
fn set_gas_limit(&mut self, gas_limit: u64) {
self.base.set_gas_limit(gas_limit);
}
fn nonce(&self) -> u64 {
TransactionEnv::nonce(&self.base)
}
fn set_nonce(&mut self, nonce: u64) {
self.base.set_nonce(nonce);
}
fn set_access_list(&mut self, access_list: AccessList) {
self.base.set_access_list(access_list);
}
}
impl<T: TransactionEnv> TransactionEnv for seismic_revm::SeismicTransaction<T> {
fn set_gas_limit(&mut self, gas_limit: u64) {
self.base.set_gas_limit(gas_limit);
}
fn nonce(&self) -> u64 {
TransactionEnv::nonce(&self.base)
}
fn set_nonce(&mut self, nonce: u64) {
self.base.set_nonce(nonce);
}
fn set_access_list(&mut self, access_list: AccessList) {
self.base.set_access_list(access_list);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/evm/evm/src/noop.rs | crates/evm/evm/src/noop.rs | //! Helpers for testing.
use crate::{ConfigureEvm, EvmEnvFor};
use reth_primitives_traits::{BlockTy, HeaderTy, SealedBlock, SealedHeader};
/// A no-op EVM config that panics on any call. Used as a typesystem hack to satisfy
/// [`ConfigureEvm`] bounds.
#[derive(Debug, Clone)]
pub struct NoopEvmConfig<Inner>(core::marker::PhantomData<Inner>);
impl<Inner> Default for NoopEvmConfig<Inner> {
fn default() -> Self {
Self::new()
}
}
impl<Inner> NoopEvmConfig<Inner> {
/// Create a new instance of the no-op EVM config.
pub const fn new() -> Self {
Self(core::marker::PhantomData)
}
fn inner(&self) -> &Inner {
unimplemented!("NoopEvmConfig should never be called")
}
}
impl<Inner> ConfigureEvm for NoopEvmConfig<Inner>
where
Inner: ConfigureEvm,
{
type Primitives = Inner::Primitives;
type Error = Inner::Error;
type NextBlockEnvCtx = Inner::NextBlockEnvCtx;
type BlockExecutorFactory = Inner::BlockExecutorFactory;
type BlockAssembler = Inner::BlockAssembler;
fn block_executor_factory(&self) -> &Self::BlockExecutorFactory {
self.inner().block_executor_factory()
}
fn block_assembler(&self) -> &Self::BlockAssembler {
self.inner().block_assembler()
}
fn evm_env(&self, header: &HeaderTy<Self::Primitives>) -> EvmEnvFor<Self> {
self.inner().evm_env(header)
}
fn next_evm_env(
&self,
parent: &HeaderTy<Self::Primitives>,
attributes: &Self::NextBlockEnvCtx,
) -> Result<EvmEnvFor<Self>, Self::Error> {
self.inner().next_evm_env(parent, attributes)
}
fn context_for_block<'a>(
&self,
block: &'a SealedBlock<BlockTy<Self::Primitives>>,
) -> crate::ExecutionCtxFor<'a, Self> {
self.inner().context_for_block(block)
}
fn context_for_next_block(
&self,
parent: &SealedHeader<HeaderTy<Self::Primitives>>,
attributes: Self::NextBlockEnvCtx,
) -> crate::ExecutionCtxFor<'_, Self> {
self.inner().context_for_next_block(parent, attributes)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/evm/evm/src/execute.rs | crates/evm/evm/src/execute.rs | //! Traits for execution.
use crate::{ConfigureEvm, Database, OnStateHook, TxEnvFor};
use alloc::{boxed::Box, vec::Vec};
use alloy_consensus::{BlockHeader, Header};
use alloy_eips::eip2718::WithEncoded;
pub use alloy_evm::block::{BlockExecutor, BlockExecutorFactory};
use alloy_evm::{
block::{CommitChanges, ExecutableTx},
Evm, EvmEnv, EvmFactory, RecoveredTx, ToTxEnv,
};
use alloy_primitives::{Address, B256};
pub use reth_execution_errors::{
BlockExecutionError, BlockValidationError, InternalBlockExecutionError,
};
use reth_execution_types::BlockExecutionResult;
pub use reth_execution_types::{BlockExecutionOutput, ExecutionOutcome};
use reth_primitives_traits::{
Block, HeaderTy, NodePrimitives, ReceiptTy, Recovered, RecoveredBlock, SealedHeader, TxTy,
};
use reth_storage_api::StateProvider;
pub use reth_storage_errors::provider::ProviderError;
use reth_trie_common::{updates::TrieUpdates, HashedPostState};
use revm::{
context::result::ExecutionResult,
database::{states::bundle_state::BundleRetention, BundleState, State},
};
/// A type that knows how to execute a block. It is assumed to operate on a
/// [`crate::Evm`] internally and use [`State`] as database.
pub trait Executor<DB: Database>: Sized {
/// The primitive types used by the executor.
type Primitives: NodePrimitives;
/// The error type returned by the executor.
type Error;
/// Executes a single block and returns [`BlockExecutionResult`], without the state changes.
fn execute_one(
&mut self,
block: &RecoveredBlock<<Self::Primitives as NodePrimitives>::Block>,
) -> Result<BlockExecutionResult<<Self::Primitives as NodePrimitives>::Receipt>, Self::Error>;
/// Executes the EVM with the given input and accepts a state hook closure that is invoked with
/// the EVM state after execution.
fn execute_one_with_state_hook<F>(
&mut self,
block: &RecoveredBlock<<Self::Primitives as NodePrimitives>::Block>,
state_hook: F,
) -> Result<BlockExecutionResult<<Self::Primitives as NodePrimitives>::Receipt>, Self::Error>
where
F: OnStateHook + 'static;
/// Consumes the type and executes the block.
///
/// # Note
/// Execution happens without any validation of the output.
///
/// # Returns
/// The output of the block execution.
fn execute(
mut self,
block: &RecoveredBlock<<Self::Primitives as NodePrimitives>::Block>,
) -> Result<BlockExecutionOutput<<Self::Primitives as NodePrimitives>::Receipt>, Self::Error>
{
let result = self.execute_one(block)?;
let mut state = self.into_state();
Ok(BlockExecutionOutput { state: state.take_bundle(), result })
}
/// Executes multiple inputs in the batch, and returns an aggregated [`ExecutionOutcome`].
fn execute_batch<'a, I>(
mut self,
blocks: I,
) -> Result<ExecutionOutcome<<Self::Primitives as NodePrimitives>::Receipt>, Self::Error>
where
I: IntoIterator<Item = &'a RecoveredBlock<<Self::Primitives as NodePrimitives>::Block>>,
{
let mut results = Vec::new();
let mut first_block = None;
for block in blocks {
if first_block.is_none() {
first_block = Some(block.header().number());
}
results.push(self.execute_one(block)?);
}
Ok(ExecutionOutcome::from_blocks(
first_block.unwrap_or_default(),
self.into_state().take_bundle(),
results,
))
}
/// Executes the EVM with the given input and accepts a state closure that is invoked with
/// the EVM state after execution.
fn execute_with_state_closure<F>(
mut self,
block: &RecoveredBlock<<Self::Primitives as NodePrimitives>::Block>,
mut f: F,
) -> Result<BlockExecutionOutput<<Self::Primitives as NodePrimitives>::Receipt>, Self::Error>
where
F: FnMut(&State<DB>),
{
let result = self.execute_one(block)?;
let mut state = self.into_state();
f(&state);
Ok(BlockExecutionOutput { state: state.take_bundle(), result })
}
/// Executes the EVM with the given input and accepts a state hook closure that is invoked with
/// the EVM state after execution.
fn execute_with_state_hook<F>(
mut self,
block: &RecoveredBlock<<Self::Primitives as NodePrimitives>::Block>,
state_hook: F,
) -> Result<BlockExecutionOutput<<Self::Primitives as NodePrimitives>::Receipt>, Self::Error>
where
F: OnStateHook + 'static,
{
let result = self.execute_one_with_state_hook(block, state_hook)?;
let mut state = self.into_state();
Ok(BlockExecutionOutput { state: state.take_bundle(), result })
}
/// Consumes the executor and returns the [`State`] containing all state changes.
fn into_state(self) -> State<DB>;
/// The size hint of the batch's tracked state size.
///
/// This is used to optimize DB commits depending on the size of the state.
fn size_hint(&self) -> usize;
}
/// Helper type for the output of executing a block.
#[derive(Debug, Clone)]
pub struct ExecuteOutput<R> {
/// Receipts obtained after executing a block.
pub receipts: Vec<R>,
/// Cumulative gas used in the block execution.
pub gas_used: u64,
}
/// Input for block building. Consumed by [`BlockAssembler`].
///
/// This struct contains all the data needed by the [`BlockAssembler`] to create
/// a complete block after transaction execution.
///
/// # Fields Overview
///
/// - `evm_env`: The EVM configuration used during execution (spec ID, block env, etc.)
/// - `execution_ctx`: Additional context like withdrawals and ommers
/// - `parent`: The parent block header this block builds on
/// - `transactions`: All transactions that were successfully executed
/// - `output`: Execution results including receipts and gas used
/// - `bundle_state`: Accumulated state changes from all transactions
/// - `state_provider`: Access to the current state for additional lookups
/// - `state_root`: The calculated state root after all changes
///
/// # Usage
///
/// This is typically created internally by [`BlockBuilder::finish`] after all
/// transactions have been executed:
///
/// ```rust,ignore
/// let input = BlockAssemblerInput {
/// evm_env: builder.evm_env(),
/// execution_ctx: builder.context(),
/// parent: &parent_header,
/// transactions: executed_transactions,
/// output: &execution_result,
/// bundle_state: &state_changes,
/// state_provider: &state,
/// state_root: calculated_root,
/// };
///
/// let block = assembler.assemble_block(input)?;
/// ```
#[derive(derive_more::Debug)]
#[non_exhaustive]
pub struct BlockAssemblerInput<'a, 'b, F: BlockExecutorFactory, H = Header> {
/// Configuration of EVM used when executing the block.
///
/// Contains context relevant to EVM such as [`revm::context::BlockEnv`].
pub evm_env: EvmEnv<<F::EvmFactory as EvmFactory>::Spec>,
/// [`BlockExecutorFactory::ExecutionCtx`] used to execute the block.
pub execution_ctx: F::ExecutionCtx<'a>,
/// Parent block header.
pub parent: &'a SealedHeader<H>,
/// Transactions that were executed in this block.
pub transactions: Vec<F::Transaction>,
/// Output of block execution.
pub output: &'b BlockExecutionResult<F::Receipt>,
/// [`BundleState`] after the block execution.
pub bundle_state: &'a BundleState,
/// Provider with access to state.
#[debug(skip)]
pub state_provider: &'b dyn StateProvider,
/// State root for this block.
pub state_root: B256,
}
/// A type that knows how to assemble a block from execution results.
///
/// The [`BlockAssembler`] is the final step in block production. After transactions
/// have been executed by the [`BlockExecutor`], the assembler takes all the execution
/// outputs and creates a properly formatted block.
///
/// # Responsibilities
///
/// The assembler is responsible for:
/// - Setting the correct block header fields (gas used, receipts root, logs bloom, etc.)
/// - Including the executed transactions in the correct order
/// - Setting the state root from the post-execution state
/// - Applying any chain-specific rules or adjustments
///
/// # Example Flow
///
/// ```rust,ignore
/// // 1. Execute transactions and get results
/// let execution_result = block_executor.finish()?;
///
/// // 2. Calculate state root from changes
/// let state_root = state_provider.state_root(&bundle_state)?;
///
/// // 3. Assemble the final block
/// let block = assembler.assemble_block(BlockAssemblerInput {
/// evm_env, // Environment used during execution
/// execution_ctx, // Context like withdrawals, ommers
/// parent, // Parent block header
/// transactions, // Executed transactions
/// output, // Execution results (receipts, gas)
/// bundle_state, // All state changes
/// state_provider, // For additional lookups if needed
/// state_root, // Computed state root
/// })?;
/// ```
///
/// # Relationship with Block Building
///
/// The assembler works together with:
/// - `NextBlockEnvAttributes`: Provides the configuration for the new block
/// - [`BlockExecutor`]: Executes transactions and produces results
/// - [`BlockBuilder`]: Orchestrates the entire process and calls the assembler
#[auto_impl::auto_impl(&, Arc)]
pub trait BlockAssembler<F: BlockExecutorFactory> {
/// The block type produced by the assembler.
type Block: Block;
/// Builds a block. see [`BlockAssemblerInput`] documentation for more details.
fn assemble_block(
&self,
input: BlockAssemblerInput<'_, '_, F, <Self::Block as Block>::Header>,
) -> Result<Self::Block, BlockExecutionError>;
}
/// Output of block building.
#[derive(Debug, Clone)]
pub struct BlockBuilderOutcome<N: NodePrimitives> {
/// Result of block execution.
pub execution_result: BlockExecutionResult<N::Receipt>,
/// Hashed state after execution.
pub hashed_state: HashedPostState,
/// Trie updates collected during state root calculation.
pub trie_updates: TrieUpdates,
/// The built block.
pub block: RecoveredBlock<N::Block>,
}
/// A type that knows how to execute and build a block.
///
/// It wraps an inner [`BlockExecutor`] and provides a way to execute transactions and
/// construct a block.
///
/// This is a helper to erase `BasicBlockBuilder` type.
pub trait BlockBuilder {
/// The primitive types used by the inner [`BlockExecutor`].
type Primitives: NodePrimitives;
/// Inner [`BlockExecutor`].
type Executor: BlockExecutor<
Transaction = TxTy<Self::Primitives>,
Receipt = ReceiptTy<Self::Primitives>,
>;
/// Invokes [`BlockExecutor::apply_pre_execution_changes`].
fn apply_pre_execution_changes(&mut self) -> Result<(), BlockExecutionError>;
/// Invokes [`BlockExecutor::execute_transaction_with_commit_condition`] and saves the
/// transaction in internal state only if the transaction was committed.
fn execute_transaction_with_commit_condition(
&mut self,
tx: impl ExecutorTx<Self::Executor>,
f: impl FnOnce(
&ExecutionResult<<<Self::Executor as BlockExecutor>::Evm as Evm>::HaltReason>,
) -> CommitChanges,
) -> Result<Option<u64>, BlockExecutionError>;
/// Invokes [`BlockExecutor::execute_transaction_with_result_closure`] and saves the
/// transaction in internal state.
fn execute_transaction_with_result_closure(
&mut self,
tx: impl ExecutorTx<Self::Executor>,
f: impl FnOnce(&ExecutionResult<<<Self::Executor as BlockExecutor>::Evm as Evm>::HaltReason>),
) -> Result<u64, BlockExecutionError> {
self.execute_transaction_with_commit_condition(tx, |res| {
f(res);
CommitChanges::Yes
})
.map(Option::unwrap_or_default)
}
/// Invokes [`BlockExecutor::execute_transaction`] and saves the transaction in
/// internal state.
fn execute_transaction(
&mut self,
tx: impl ExecutorTx<Self::Executor>,
) -> Result<u64, BlockExecutionError> {
self.execute_transaction_with_result_closure(tx, |_| ())
}
/// Add transaction
///
/// Seismic team added this function to the trait for our stuff,
/// default unimplemented for backward compatibility
fn add_transaction(
&mut self,
_tx: Recovered<TxTy<Self::Primitives>>,
) -> Result<u64, BlockExecutionError> {
unimplemented!("BlockBuilder trait's add_transaction function is not implemented")
}
/// Completes the block building process and returns the [`BlockBuilderOutcome`].
fn finish(
self,
state_provider: impl StateProvider,
) -> Result<BlockBuilderOutcome<Self::Primitives>, BlockExecutionError>;
/// Provides mutable access to the inner [`BlockExecutor`].
fn executor_mut(&mut self) -> &mut Self::Executor;
/// Provides access to the inner [`BlockExecutor`].
fn executor(&self) -> &Self::Executor;
/// Helper to access inner [`BlockExecutor::Evm`] mutably.
fn evm_mut(&mut self) -> &mut <Self::Executor as BlockExecutor>::Evm {
self.executor_mut().evm_mut()
}
/// Helper to access inner [`BlockExecutor::Evm`].
fn evm(&self) -> &<Self::Executor as BlockExecutor>::Evm {
self.executor().evm()
}
/// Consumes the type and returns the underlying [`BlockExecutor`].
fn into_executor(self) -> Self::Executor;
}
/// A type that constructs a block from transactions and execution results.
#[derive(Debug)]
pub struct BasicBlockBuilder<'a, F, Executor, Builder, N: NodePrimitives>
where
F: BlockExecutorFactory,
{
/// The block executor used to execute transactions.
pub executor: Executor,
/// The transactions executed in this block.
pub transactions: Vec<Recovered<TxTy<N>>>,
/// The parent block execution context.
pub ctx: F::ExecutionCtx<'a>,
/// The sealed parent block header.
pub parent: &'a SealedHeader<HeaderTy<N>>,
/// The assembler used to build the block.
pub assembler: Builder,
}
/// Conversions for executable transactions.
pub trait ExecutorTx<Executor: BlockExecutor> {
/// Converts the transaction into [`ExecutableTx`].
fn as_executable(&self) -> impl ExecutableTx<Executor>;
/// Converts the transaction into [`Recovered`].
fn into_recovered(self) -> Recovered<Executor::Transaction>;
}
impl<Executor: BlockExecutor> ExecutorTx<Executor>
for WithEncoded<Recovered<Executor::Transaction>>
{
fn as_executable(&self) -> impl ExecutableTx<Executor> {
self
}
fn into_recovered(self) -> Recovered<Executor::Transaction> {
self.1
}
}
impl<Executor: BlockExecutor> ExecutorTx<Executor> for Recovered<Executor::Transaction> {
fn as_executable(&self) -> impl ExecutableTx<Executor> {
self
}
fn into_recovered(self) -> Self {
self
}
}
impl<T, Executor> ExecutorTx<Executor>
for WithTxEnv<<<Executor as BlockExecutor>::Evm as Evm>::Tx, T>
where
T: ExecutorTx<Executor>,
Executor: BlockExecutor,
<<Executor as BlockExecutor>::Evm as Evm>::Tx: Clone,
Self: RecoveredTx<Executor::Transaction>,
{
fn as_executable(&self) -> impl ExecutableTx<Executor> {
self
}
fn into_recovered(self) -> Recovered<Executor::Transaction> {
self.tx.into_recovered()
}
}
impl<'a, F, DB, Executor, Builder, N> BlockBuilder
for BasicBlockBuilder<'a, F, Executor, Builder, N>
where
F: BlockExecutorFactory<Transaction = N::SignedTx, Receipt = N::Receipt>,
Executor: BlockExecutor<
Evm: Evm<
Spec = <F::EvmFactory as EvmFactory>::Spec,
HaltReason = <F::EvmFactory as EvmFactory>::HaltReason,
DB = &'a mut State<DB>,
>,
Transaction = N::SignedTx,
Receipt = N::Receipt,
>,
DB: Database + 'a,
Builder: BlockAssembler<F, Block = N::Block>,
N: NodePrimitives,
{
type Primitives = N;
type Executor = Executor;
fn apply_pre_execution_changes(&mut self) -> Result<(), BlockExecutionError> {
self.executor.apply_pre_execution_changes()
}
fn execute_transaction_with_commit_condition(
&mut self,
tx: impl ExecutorTx<Self::Executor>,
f: impl FnOnce(
&ExecutionResult<<<Self::Executor as BlockExecutor>::Evm as Evm>::HaltReason>,
) -> CommitChanges,
) -> Result<Option<u64>, BlockExecutionError> {
if let Some(gas_used) =
self.executor.execute_transaction_with_commit_condition(tx.as_executable(), f)?
{
self.transactions.push(tx.into_recovered());
Ok(Some(gas_used))
} else {
Ok(None)
}
}
fn finish(
self,
state: impl StateProvider,
) -> Result<BlockBuilderOutcome<N>, BlockExecutionError> {
let (evm, result) = self.executor.finish()?;
let (db, evm_env) = evm.finish();
// merge all transitions into bundle state
db.merge_transitions(BundleRetention::Reverts);
// calculate the state root
let hashed_state = state.hashed_post_state(&db.bundle_state);
let (state_root, trie_updates) = state
.state_root_with_updates(hashed_state.clone())
.map_err(BlockExecutionError::other)?;
let (transactions, senders) =
self.transactions.into_iter().map(|tx| tx.into_parts()).unzip();
let block = self.assembler.assemble_block(BlockAssemblerInput {
evm_env,
execution_ctx: self.ctx,
parent: self.parent,
transactions,
output: &result,
bundle_state: &db.bundle_state,
state_provider: &state,
state_root,
})?;
let block = RecoveredBlock::new_unhashed(block, senders);
Ok(BlockBuilderOutcome { execution_result: result, hashed_state, trie_updates, block })
}
fn add_transaction(
&mut self,
tx: Recovered<TxTy<Self::Primitives>>,
) -> Result<u64, BlockExecutionError> {
self.transactions.push(tx);
Ok(self.transactions.len() as u64)
}
fn executor_mut(&mut self) -> &mut Self::Executor {
&mut self.executor
}
fn executor(&self) -> &Self::Executor {
&self.executor
}
fn into_executor(self) -> Self::Executor {
self.executor
}
}
/// A generic block executor that uses a [`BlockExecutor`] to
/// execute blocks.
#[expect(missing_debug_implementations)]
pub struct BasicBlockExecutor<F, DB> {
/// Block execution strategy.
pub(crate) strategy_factory: F,
/// Database.
pub(crate) db: State<DB>,
}
impl<F, DB: Database> BasicBlockExecutor<F, DB> {
/// Creates a new `BasicBlockExecutor` with the given strategy.
pub fn new(strategy_factory: F, db: DB) -> Self {
let db =
State::builder().with_database(db).with_bundle_update().without_state_clear().build();
Self { strategy_factory, db }
}
}
impl<F, DB> Executor<DB> for BasicBlockExecutor<F, DB>
where
F: ConfigureEvm,
DB: Database,
{
type Primitives = F::Primitives;
type Error = BlockExecutionError;
fn execute_one(
&mut self,
block: &RecoveredBlock<<Self::Primitives as NodePrimitives>::Block>,
) -> Result<BlockExecutionResult<<Self::Primitives as NodePrimitives>::Receipt>, Self::Error>
{
let result = self
.strategy_factory
.executor_for_block(&mut self.db, block)
.execute_block(block.transactions_recovered())?;
self.db.merge_transitions(BundleRetention::Reverts);
Ok(result)
}
fn execute_one_with_state_hook<H>(
&mut self,
block: &RecoveredBlock<<Self::Primitives as NodePrimitives>::Block>,
state_hook: H,
) -> Result<BlockExecutionResult<<Self::Primitives as NodePrimitives>::Receipt>, Self::Error>
where
H: OnStateHook + 'static,
{
let result = self
.strategy_factory
.executor_for_block(&mut self.db, block)
.with_state_hook(Some(Box::new(state_hook)))
.execute_block(block.transactions_recovered())?;
self.db.merge_transitions(BundleRetention::Reverts);
Ok(result)
}
fn into_state(self) -> State<DB> {
self.db
}
fn size_hint(&self) -> usize {
self.db.bundle_state.size_hint()
}
}
/// A helper trait marking a 'static type that can be converted into an [`ExecutableTx`] for block
/// executor.
pub trait ExecutableTxFor<Evm: ConfigureEvm>:
ToTxEnv<TxEnvFor<Evm>> + RecoveredTx<TxTy<Evm::Primitives>>
{
}
impl<T, Evm: ConfigureEvm> ExecutableTxFor<Evm> for T where
T: ToTxEnv<TxEnvFor<Evm>> + RecoveredTx<TxTy<Evm::Primitives>>
{
}
/// A container for a transaction and a transaction environment.
#[derive(Debug, Clone)]
pub struct WithTxEnv<TxEnv, T> {
/// The transaction environment for EVM.
pub tx_env: TxEnv,
/// The recovered transaction.
pub tx: T,
}
impl<TxEnv, Tx, T: RecoveredTx<Tx>> RecoveredTx<Tx> for WithTxEnv<TxEnv, T> {
fn tx(&self) -> &Tx {
self.tx.tx()
}
fn signer(&self) -> &Address {
self.tx.signer()
}
}
impl<TxEnv: Clone, T> ToTxEnv<TxEnv> for WithTxEnv<TxEnv, T> {
fn to_tx_env(&self) -> TxEnv {
self.tx_env.clone()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::Address;
use alloy_consensus::constants::KECCAK_EMPTY;
use alloy_evm::block::state_changes::balance_increment_state;
use alloy_primitives::{address, map::HashMap, U256};
use core::marker::PhantomData;
use reth_ethereum_primitives::EthPrimitives;
use revm::{
database::{CacheDB, EmptyDB},
state::AccountInfo,
};
#[derive(Clone, Debug, Default)]
struct TestExecutorProvider;
impl TestExecutorProvider {
fn executor<DB>(&self, _db: DB) -> TestExecutor<DB>
where
DB: Database,
{
TestExecutor(PhantomData)
}
}
struct TestExecutor<DB>(PhantomData<DB>);
impl<DB: Database> Executor<DB> for TestExecutor<DB> {
type Primitives = EthPrimitives;
type Error = BlockExecutionError;
fn execute_one(
&mut self,
_block: &RecoveredBlock<<Self::Primitives as NodePrimitives>::Block>,
) -> Result<BlockExecutionResult<<Self::Primitives as NodePrimitives>::Receipt>, Self::Error>
{
Err(BlockExecutionError::msg("execution unavailable for tests"))
}
fn execute_one_with_state_hook<F>(
&mut self,
_block: &RecoveredBlock<<Self::Primitives as NodePrimitives>::Block>,
_state_hook: F,
) -> Result<BlockExecutionResult<<Self::Primitives as NodePrimitives>::Receipt>, Self::Error>
where
F: OnStateHook + 'static,
{
Err(BlockExecutionError::msg("execution unavailable for tests"))
}
fn into_state(self) -> State<DB> {
unreachable!()
}
fn size_hint(&self) -> usize {
0
}
}
#[test]
fn test_provider() {
let provider = TestExecutorProvider;
let db = CacheDB::<EmptyDB>::default();
let executor = provider.executor(db);
let _ = executor.execute(&Default::default());
}
fn setup_state_with_account(
addr: Address,
balance: u128,
nonce: u64,
) -> State<CacheDB<EmptyDB>> {
let db = CacheDB::<EmptyDB>::default();
let mut state = State::builder().with_database(db).with_bundle_update().build();
let account_info = AccountInfo {
balance: U256::from(balance),
nonce,
code_hash: KECCAK_EMPTY,
code: None,
};
state.insert_account(addr, account_info);
state
}
#[test]
fn test_balance_increment_state_zero() {
let addr = address!("0x1000000000000000000000000000000000000000");
let mut state = setup_state_with_account(addr, 100, 1);
let mut increments = HashMap::default();
increments.insert(addr, 0);
let result = balance_increment_state(&increments, &mut state).unwrap();
assert!(result.is_empty(), "Zero increments should be ignored");
}
#[test]
fn test_balance_increment_state_empty_increments_map() {
let mut state = State::builder()
.with_database(CacheDB::<EmptyDB>::default())
.with_bundle_update()
.build();
let increments = HashMap::default();
let result = balance_increment_state(&increments, &mut state).unwrap();
assert!(result.is_empty(), "Empty increments map should return empty state");
}
#[test]
fn test_balance_increment_state_multiple_valid_increments() {
let addr1 = address!("0x1000000000000000000000000000000000000000");
let addr2 = address!("0x2000000000000000000000000000000000000000");
let mut state = setup_state_with_account(addr1, 100, 1);
let account2 =
AccountInfo { balance: U256::from(200), nonce: 1, code_hash: KECCAK_EMPTY, code: None };
state.insert_account(addr2, account2);
let mut increments = HashMap::default();
increments.insert(addr1, 50);
increments.insert(addr2, 100);
let result = balance_increment_state(&increments, &mut state).unwrap();
assert_eq!(result.len(), 2);
assert_eq!(result.get(&addr1).unwrap().info.balance, U256::from(100));
assert_eq!(result.get(&addr2).unwrap().info.balance, U256::from(200));
}
#[test]
fn test_balance_increment_state_mixed_zero_and_nonzero_increments() {
let addr1 = address!("0x1000000000000000000000000000000000000000");
let addr2 = address!("0x2000000000000000000000000000000000000000");
let mut state = setup_state_with_account(addr1, 100, 1);
let account2 =
AccountInfo { balance: U256::from(200), nonce: 1, code_hash: KECCAK_EMPTY, code: None };
state.insert_account(addr2, account2);
let mut increments = HashMap::default();
increments.insert(addr1, 0);
increments.insert(addr2, 100);
let result = balance_increment_state(&increments, &mut state).unwrap();
assert_eq!(result.len(), 1, "Only non-zero increments should be included");
assert!(!result.contains_key(&addr1), "Zero increment account should not be included");
assert_eq!(result.get(&addr2).unwrap().info.balance, U256::from(200));
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/evm/evm/src/test_utils.rs | crates/evm/evm/src/test_utils.rs | //! Helpers for testing.
use crate::execute::BasicBlockExecutor;
use revm::database::State;
impl<Factory, DB> BasicBlockExecutor<Factory, DB> {
/// Provides safe read access to the state
pub fn with_state<F, R>(&self, f: F) -> R
where
F: FnOnce(&State<DB>) -> R,
{
f(&self.db)
}
/// Provides safe write access to the state
pub fn with_state_mut<F, R>(&mut self, f: F) -> R
where
F: FnOnce(&mut State<DB>) -> R,
{
f(&mut self.db)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/evm/evm/src/either.rs | crates/evm/evm/src/either.rs | //! Helper type that represents one of two possible executor types
use crate::{execute::Executor, Database, OnStateHook};
// re-export Either
pub use futures_util::future::Either;
use reth_execution_types::{BlockExecutionOutput, BlockExecutionResult};
use reth_primitives_traits::{NodePrimitives, RecoveredBlock};
impl<A, B, DB> Executor<DB> for Either<A, B>
where
A: Executor<DB>,
B: Executor<DB, Primitives = A::Primitives, Error = A::Error>,
DB: Database,
{
type Primitives = A::Primitives;
type Error = A::Error;
fn execute_one(
&mut self,
block: &RecoveredBlock<<Self::Primitives as NodePrimitives>::Block>,
) -> Result<BlockExecutionResult<<Self::Primitives as NodePrimitives>::Receipt>, Self::Error>
{
match self {
Self::Left(a) => a.execute_one(block),
Self::Right(b) => b.execute_one(block),
}
}
fn execute_one_with_state_hook<F>(
&mut self,
block: &RecoveredBlock<<Self::Primitives as NodePrimitives>::Block>,
state_hook: F,
) -> Result<BlockExecutionResult<<Self::Primitives as NodePrimitives>::Receipt>, Self::Error>
where
F: OnStateHook + 'static,
{
match self {
Self::Left(a) => a.execute_one_with_state_hook(block, state_hook),
Self::Right(b) => b.execute_one_with_state_hook(block, state_hook),
}
}
fn execute(
self,
block: &RecoveredBlock<<Self::Primitives as NodePrimitives>::Block>,
) -> Result<BlockExecutionOutput<<Self::Primitives as NodePrimitives>::Receipt>, Self::Error>
{
match self {
Self::Left(a) => a.execute(block),
Self::Right(b) => b.execute(block),
}
}
fn execute_with_state_closure<F>(
self,
block: &RecoveredBlock<<Self::Primitives as NodePrimitives>::Block>,
state: F,
) -> Result<BlockExecutionOutput<<Self::Primitives as NodePrimitives>::Receipt>, Self::Error>
where
F: FnMut(&revm::database::State<DB>),
{
match self {
Self::Left(a) => a.execute_with_state_closure(block, state),
Self::Right(b) => b.execute_with_state_closure(block, state),
}
}
fn into_state(self) -> revm::database::State<DB> {
match self {
Self::Left(a) => a.into_state(),
Self::Right(b) => b.into_state(),
}
}
fn size_hint(&self) -> usize {
match self {
Self::Left(a) => a.size_hint(),
Self::Right(b) => b.size_hint(),
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/evm/evm/src/metrics.rs | crates/evm/evm/src/metrics.rs | //! Executor metrics.
use alloy_consensus::BlockHeader;
use metrics::{Counter, Gauge, Histogram};
use reth_metrics::Metrics;
use reth_primitives_traits::{Block, RecoveredBlock};
use std::time::Instant;
/// Executor metrics.
// TODO(onbjerg): add sload/sstore
#[derive(Metrics, Clone)]
#[metrics(scope = "sync.execution")]
pub struct ExecutorMetrics {
/// The total amount of gas processed.
pub gas_processed_total: Counter,
/// The instantaneous amount of gas processed per second.
pub gas_per_second: Gauge,
/// The Histogram for amount of gas used.
pub gas_used_histogram: Histogram,
/// The Histogram for amount of time taken to execute blocks.
pub execution_histogram: Histogram,
/// The total amount of time it took to execute the latest block.
pub execution_duration: Gauge,
/// The Histogram for number of accounts loaded when executing the latest block.
pub accounts_loaded_histogram: Histogram,
/// The Histogram for number of storage slots loaded when executing the latest block.
pub storage_slots_loaded_histogram: Histogram,
/// The Histogram for number of bytecodes loaded when executing the latest block.
pub bytecodes_loaded_histogram: Histogram,
/// The Histogram for number of accounts updated when executing the latest block.
pub accounts_updated_histogram: Histogram,
/// The Histogram for number of storage slots updated when executing the latest block.
pub storage_slots_updated_histogram: Histogram,
/// The Histogram for number of bytecodes updated when executing the latest block.
pub bytecodes_updated_histogram: Histogram,
}
impl ExecutorMetrics {
/// Helper function for metered execution
fn metered<F, R>(&self, f: F) -> R
where
F: FnOnce() -> (u64, R),
{
// Execute the block and record the elapsed time.
let execute_start = Instant::now();
let (gas_used, output) = f();
let execution_duration = execute_start.elapsed().as_secs_f64();
// Update gas metrics.
self.gas_processed_total.increment(gas_used);
self.gas_per_second.set(gas_used as f64 / execution_duration);
self.gas_used_histogram.record(gas_used as f64);
self.execution_histogram.record(execution_duration);
self.execution_duration.set(execution_duration);
output
}
/// Execute a block and update basic gas/timing metrics.
///
/// This is a simple helper that tracks execution time and gas usage.
/// For more complex metrics tracking (like state changes), use the
/// metered execution functions in the engine/tree module.
pub fn metered_one<F, R, B>(&self, block: &RecoveredBlock<B>, f: F) -> R
where
F: FnOnce(&RecoveredBlock<B>) -> R,
B: Block,
B::Header: BlockHeader,
{
self.metered(|| (block.header().gas_used(), f(block)))
}
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_consensus::Header;
use alloy_primitives::B256;
use reth_ethereum_primitives::Block;
use reth_primitives_traits::Block as BlockTrait;
fn create_test_block_with_gas(gas_used: u64) -> RecoveredBlock<Block> {
let header = Header { gas_used, ..Default::default() };
let block = Block { header, body: Default::default() };
// Use a dummy hash for testing
let hash = B256::default();
let sealed = block.seal_unchecked(hash);
RecoveredBlock::new_sealed(sealed, Default::default())
}
#[test]
fn test_metered_one_updates_metrics() {
let metrics = ExecutorMetrics::default();
let block = create_test_block_with_gas(1000);
// Execute with metered_one
let result = metrics.metered_one(&block, |b| {
// Simulate some work
std::thread::sleep(std::time::Duration::from_millis(10));
b.header().gas_used()
});
// Verify result
assert_eq!(result, 1000);
}
#[test]
fn test_metered_helper_tracks_timing() {
let metrics = ExecutorMetrics::default();
let result = metrics.metered(|| {
// Simulate some work
std::thread::sleep(std::time::Duration::from_millis(10));
(500, "test_result")
});
assert_eq!(result, "test_result");
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/evm/evm/src/aliases.rs | crates/evm/evm/src/aliases.rs | //! Helper aliases when working with [`ConfigureEvm`] and the traits in this crate.
use crate::ConfigureEvm;
use alloy_evm::{block::BlockExecutorFactory, Database, EvmEnv, EvmFactory};
use revm::{inspector::NoOpInspector, Inspector};
/// Helper to access [`EvmFactory`] for a given [`ConfigureEvm`].
pub type EvmFactoryFor<Evm> =
<<Evm as ConfigureEvm>::BlockExecutorFactory as BlockExecutorFactory>::EvmFactory;
/// Helper to access [`EvmFactory::Spec`] for a given [`ConfigureEvm`].
pub type SpecFor<Evm> = <EvmFactoryFor<Evm> as EvmFactory>::Spec;
/// Helper to access [`EvmFactory::Evm`] for a given [`ConfigureEvm`].
pub type EvmFor<Evm, DB, I = NoOpInspector> = <EvmFactoryFor<Evm> as EvmFactory>::Evm<DB, I>;
/// Helper to access [`EvmFactory::Error`] for a given [`ConfigureEvm`].
pub type EvmErrorFor<Evm, DB> = <EvmFactoryFor<Evm> as EvmFactory>::Error<DB>;
/// Helper to access [`EvmFactory::Context`] for a given [`ConfigureEvm`].
pub type EvmContextFor<Evm, DB> = <EvmFactoryFor<Evm> as EvmFactory>::Context<DB>;
/// Helper to access [`EvmFactory::HaltReason`] for a given [`ConfigureEvm`].
pub type HaltReasonFor<Evm> = <EvmFactoryFor<Evm> as EvmFactory>::HaltReason;
/// Helper to access [`EvmFactory::Tx`] for a given [`ConfigureEvm`].
pub type TxEnvFor<Evm> = <EvmFactoryFor<Evm> as EvmFactory>::Tx;
/// Helper to access [`BlockExecutorFactory::ExecutionCtx`] for a given [`ConfigureEvm`].
pub type ExecutionCtxFor<'a, Evm> =
<<Evm as ConfigureEvm>::BlockExecutorFactory as BlockExecutorFactory>::ExecutionCtx<'a>;
/// Type alias for [`EvmEnv`] for a given [`ConfigureEvm`].
pub type EvmEnvFor<Evm> = EvmEnv<SpecFor<Evm>>;
/// Helper trait to bound [`Inspector`] for a [`ConfigureEvm`].
pub trait InspectorFor<Evm: ConfigureEvm, DB: Database>: Inspector<EvmContextFor<Evm, DB>> {}
impl<T, Evm, DB> InspectorFor<Evm, DB> for T
where
Evm: ConfigureEvm,
DB: Database,
T: Inspector<EvmContextFor<Evm, DB>>,
{
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/evm/execution-types/src/lib.rs | crates/evm/execution-types/src/lib.rs | //! Commonly used types for (EVM) block execution.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![cfg_attr(not(feature = "std"), no_std)]
extern crate alloc;
mod chain;
pub use chain::*;
mod execute;
pub use execute::*;
mod execution_outcome;
pub use execution_outcome::*;
/// Bincode-compatible serde implementations for commonly used types for (EVM) block execution.
///
/// `bincode` crate doesn't work with optionally serializable serde fields, but some of the
/// execution types require optional serialization for RPC compatibility. This module makes so that
/// all fields are serialized.
///
/// Read more: <https://github.com/bincode-org/bincode/issues/326>
#[cfg(feature = "serde-bincode-compat")]
pub mod serde_bincode_compat {
pub use super::{chain::serde_bincode_compat::*, execution_outcome::serde_bincode_compat::*};
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/evm/execution-types/src/execute.rs | crates/evm/execution-types/src/execute.rs | use revm::database::BundleState;
pub use alloy_evm::block::BlockExecutionResult;
/// [`BlockExecutionResult`] combined with state.
#[derive(
Debug,
Clone,
PartialEq,
Eq,
derive_more::AsRef,
derive_more::AsMut,
derive_more::Deref,
derive_more::DerefMut,
)]
pub struct BlockExecutionOutput<T> {
/// All the receipts of the transactions in the block.
#[as_ref]
#[as_mut]
#[deref]
#[deref_mut]
pub result: BlockExecutionResult<T>,
/// The changed state of the block after execution.
pub state: BundleState,
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/evm/execution-types/src/chain.rs | crates/evm/execution-types/src/chain.rs | //! Contains [Chain], a chain of blocks and their final state.
use crate::ExecutionOutcome;
use alloc::{borrow::Cow, collections::BTreeMap, vec::Vec};
use alloy_consensus::{transaction::Recovered, BlockHeader};
use alloy_eips::{eip1898::ForkBlock, eip2718::Encodable2718, BlockNumHash};
use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash};
use core::{fmt, ops::RangeInclusive};
use reth_primitives_traits::{
transaction::signed::SignedTransaction, Block, BlockBody, NodePrimitives, RecoveredBlock,
SealedHeader,
};
use reth_trie_common::updates::TrieUpdates;
use revm::database::BundleState;
/// A chain of blocks and their final state.
///
/// The chain contains the state of accounts after execution of its blocks,
/// changesets for those blocks (and their transactions), as well as the blocks themselves.
///
/// Used inside the `BlockchainTree`.
///
/// # Warning
///
/// A chain of blocks should not be empty.
#[derive(Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct Chain<N: NodePrimitives = reth_ethereum_primitives::EthPrimitives> {
/// All blocks in this chain.
blocks: BTreeMap<BlockNumber, RecoveredBlock<N::Block>>,
/// The outcome of block execution for this chain.
///
/// This field contains the state of all accounts after the execution of all blocks in this
/// chain, ranging from the [`Chain::first`] block to the [`Chain::tip`] block, inclusive.
///
/// Additionally, it includes the individual state changes that led to the current state.
execution_outcome: ExecutionOutcome<N::Receipt>,
/// State trie updates after block is added to the chain.
/// NOTE: Currently, trie updates are present only for
/// single-block chains that extend the canonical chain.
trie_updates: Option<TrieUpdates>,
}
impl<N: NodePrimitives> Default for Chain<N> {
fn default() -> Self {
Self {
blocks: Default::default(),
execution_outcome: Default::default(),
trie_updates: Default::default(),
}
}
}
impl<N: NodePrimitives> Chain<N> {
/// Create new Chain from blocks and state.
///
/// # Warning
///
/// A chain of blocks should not be empty.
pub fn new(
blocks: impl IntoIterator<Item = RecoveredBlock<N::Block>>,
execution_outcome: ExecutionOutcome<N::Receipt>,
trie_updates: Option<TrieUpdates>,
) -> Self {
let blocks =
blocks.into_iter().map(|b| (b.header().number(), b)).collect::<BTreeMap<_, _>>();
debug_assert!(!blocks.is_empty(), "Chain should have at least one block");
Self { blocks, execution_outcome, trie_updates }
}
/// Create new Chain from a single block and its state.
pub fn from_block(
block: RecoveredBlock<N::Block>,
execution_outcome: ExecutionOutcome<N::Receipt>,
trie_updates: Option<TrieUpdates>,
) -> Self {
Self::new([block], execution_outcome, trie_updates)
}
/// Get the blocks in this chain.
pub const fn blocks(&self) -> &BTreeMap<BlockNumber, RecoveredBlock<N::Block>> {
&self.blocks
}
/// Consumes the type and only returns the blocks in this chain.
pub fn into_blocks(self) -> BTreeMap<BlockNumber, RecoveredBlock<N::Block>> {
self.blocks
}
/// Returns an iterator over all headers in the block with increasing block numbers.
pub fn headers(&self) -> impl Iterator<Item = SealedHeader<N::BlockHeader>> + '_ {
self.blocks.values().map(|block| block.clone_sealed_header())
}
/// Get cached trie updates for this chain.
pub const fn trie_updates(&self) -> Option<&TrieUpdates> {
self.trie_updates.as_ref()
}
/// Remove cached trie updates for this chain.
pub fn clear_trie_updates(&mut self) {
self.trie_updates.take();
}
/// Get execution outcome of this chain
pub const fn execution_outcome(&self) -> &ExecutionOutcome<N::Receipt> {
&self.execution_outcome
}
/// Get mutable execution outcome of this chain
pub const fn execution_outcome_mut(&mut self) -> &mut ExecutionOutcome<N::Receipt> {
&mut self.execution_outcome
}
/// Prepends the given state to the current state.
pub fn prepend_state(&mut self, state: BundleState) {
self.execution_outcome.prepend_state(state);
self.trie_updates.take(); // invalidate cached trie updates
}
/// Return true if chain is empty and has no blocks.
pub fn is_empty(&self) -> bool {
self.blocks.is_empty()
}
/// Return block number of the block hash.
pub fn block_number(&self, block_hash: BlockHash) -> Option<BlockNumber> {
self.blocks.iter().find_map(|(num, block)| (block.hash() == block_hash).then_some(*num))
}
/// Returns the block with matching hash.
pub fn recovered_block(&self, block_hash: BlockHash) -> Option<&RecoveredBlock<N::Block>> {
self.blocks.iter().find_map(|(_num, block)| (block.hash() == block_hash).then_some(block))
}
/// Return execution outcome at the `block_number` or None if block is not known
pub fn execution_outcome_at_block(
&self,
block_number: BlockNumber,
) -> Option<ExecutionOutcome<N::Receipt>> {
if self.tip().number() == block_number {
return Some(self.execution_outcome.clone())
}
if self.blocks.contains_key(&block_number) {
let mut execution_outcome = self.execution_outcome.clone();
execution_outcome.revert_to(block_number);
return Some(execution_outcome)
}
None
}
/// Destructure the chain into its inner components:
/// 1. The blocks contained in the chain.
/// 2. The execution outcome representing the final state.
/// 3. The optional trie updates.
pub fn into_inner(
self,
) -> (ChainBlocks<'static, N::Block>, ExecutionOutcome<N::Receipt>, Option<TrieUpdates>) {
(ChainBlocks { blocks: Cow::Owned(self.blocks) }, self.execution_outcome, self.trie_updates)
}
/// Destructure the chain into its inner components:
/// 1. A reference to the blocks contained in the chain.
/// 2. A reference to the execution outcome representing the final state.
pub const fn inner(&self) -> (ChainBlocks<'_, N::Block>, &ExecutionOutcome<N::Receipt>) {
(ChainBlocks { blocks: Cow::Borrowed(&self.blocks) }, &self.execution_outcome)
}
/// Returns an iterator over all the receipts of the blocks in the chain.
pub fn block_receipts_iter(&self) -> impl Iterator<Item = &Vec<N::Receipt>> + '_ {
self.execution_outcome.receipts().iter()
}
/// Returns an iterator over all blocks in the chain with increasing block number.
pub fn blocks_iter(&self) -> impl Iterator<Item = &RecoveredBlock<N::Block>> + '_ {
self.blocks().iter().map(|block| block.1)
}
/// Returns an iterator over all blocks and their receipts in the chain.
pub fn blocks_and_receipts(
&self,
) -> impl Iterator<Item = (&RecoveredBlock<N::Block>, &Vec<N::Receipt>)> + '_ {
self.blocks_iter().zip(self.block_receipts_iter())
}
/// Get the block at which this chain forked.
pub fn fork_block(&self) -> ForkBlock {
let first = self.first();
ForkBlock {
number: first.header().number().saturating_sub(1),
hash: first.header().parent_hash(),
}
}
/// Get the first block in this chain.
///
/// # Panics
///
/// If chain doesn't have any blocks.
#[track_caller]
pub fn first(&self) -> &RecoveredBlock<N::Block> {
self.blocks.first_key_value().expect("Chain should have at least one block").1
}
/// Get the tip of the chain.
///
/// # Panics
///
/// If chain doesn't have any blocks.
#[track_caller]
pub fn tip(&self) -> &RecoveredBlock<N::Block> {
self.blocks.last_key_value().expect("Chain should have at least one block").1
}
/// Returns length of the chain.
pub fn len(&self) -> usize {
self.blocks.len()
}
/// Returns the range of block numbers in the chain.
///
/// # Panics
///
/// If chain doesn't have any blocks.
pub fn range(&self) -> RangeInclusive<BlockNumber> {
self.first().header().number()..=self.tip().header().number()
}
/// Get all receipts for the given block.
pub fn receipts_by_block_hash(&self, block_hash: BlockHash) -> Option<Vec<&N::Receipt>> {
let num = self.block_number(block_hash)?;
Some(self.execution_outcome.receipts_by_block(num).iter().collect())
}
/// Get all receipts with attachment.
///
/// Attachment includes block number, block hash, transaction hash and transaction index.
pub fn receipts_with_attachment(&self) -> Vec<BlockReceipts<N::Receipt>>
where
N::SignedTx: Encodable2718,
{
let mut receipt_attach = Vec::with_capacity(self.blocks().len());
self.blocks_and_receipts().for_each(|(block, receipts)| {
let block_num_hash = BlockNumHash::new(block.number(), block.hash());
let tx_receipts = block
.body()
.transactions()
.iter()
.zip(receipts)
.map(|(tx, receipt)| (tx.trie_hash(), receipt.clone()))
.collect();
receipt_attach.push(BlockReceipts {
block: block_num_hash,
tx_receipts,
timestamp: block.timestamp(),
});
});
receipt_attach
}
/// Append a single block with state to the chain.
/// This method assumes that blocks attachment to the chain has already been validated.
pub fn append_block(
&mut self,
block: RecoveredBlock<N::Block>,
execution_outcome: ExecutionOutcome<N::Receipt>,
) {
self.blocks.insert(block.header().number(), block);
self.execution_outcome.extend(execution_outcome);
self.trie_updates.take(); // reset
}
/// Merge two chains by appending the given chain into the current one.
///
/// The state of accounts for this chain is set to the state of the newest chain.
///
/// Returns the passed `other` chain in [`Result::Err`] variant if the chains could not be
/// connected.
pub fn append_chain(&mut self, other: Self) -> Result<(), Self> {
let chain_tip = self.tip();
let other_fork_block = other.fork_block();
if chain_tip.hash() != other_fork_block.hash {
return Err(other)
}
// Insert blocks from other chain
self.blocks.extend(other.blocks);
self.execution_outcome.extend(other.execution_outcome);
self.trie_updates.take(); // reset
Ok(())
}
}
/// Wrapper type for `blocks` display in `Chain`
#[derive(Debug)]
pub struct DisplayBlocksChain<'a, B: reth_primitives_traits::Block>(
pub &'a BTreeMap<BlockNumber, RecoveredBlock<B>>,
);
impl<B: reth_primitives_traits::Block> fmt::Display for DisplayBlocksChain<'_, B> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut list = f.debug_list();
let mut values = self.0.values().map(|block| block.num_hash());
if values.len() <= 3 {
list.entries(values);
} else {
list.entry(&values.next().unwrap());
list.entry(&format_args!("..."));
list.entry(&values.next_back().unwrap());
}
list.finish()
}
}
/// All blocks in the chain
#[derive(Clone, Debug, Default, PartialEq, Eq)]
pub struct ChainBlocks<'a, B: Block> {
blocks: Cow<'a, BTreeMap<BlockNumber, RecoveredBlock<B>>>,
}
impl<B: Block<Body: BlockBody<Transaction: SignedTransaction>>> ChainBlocks<'_, B> {
/// Creates a consuming iterator over all blocks in the chain with increasing block number.
///
/// Note: this always yields at least one block.
#[inline]
pub fn into_blocks(self) -> impl Iterator<Item = RecoveredBlock<B>> {
self.blocks.into_owned().into_values()
}
/// Creates an iterator over all blocks in the chain with increasing block number.
#[inline]
pub fn iter(&self) -> impl Iterator<Item = (&BlockNumber, &RecoveredBlock<B>)> {
self.blocks.iter()
}
/// Get the tip of the chain.
///
/// # Note
///
/// Chains always have at least one block.
#[inline]
pub fn tip(&self) -> &RecoveredBlock<B> {
self.blocks.last_key_value().expect("Chain should have at least one block").1
}
/// Get the _first_ block of the chain.
///
/// # Note
///
/// Chains always have at least one block.
#[inline]
pub fn first(&self) -> &RecoveredBlock<B> {
self.blocks.first_key_value().expect("Chain should have at least one block").1
}
/// Returns an iterator over all transactions in the chain.
#[inline]
pub fn transactions(&self) -> impl Iterator<Item = &<B::Body as BlockBody>::Transaction> + '_ {
self.blocks.values().flat_map(|block| block.body().transactions_iter())
}
/// Returns an iterator over all transactions and their senders.
#[inline]
pub fn transactions_with_sender(
&self,
) -> impl Iterator<Item = (&Address, &<B::Body as BlockBody>::Transaction)> + '_ {
self.blocks.values().flat_map(|block| block.transactions_with_sender())
}
/// Returns an iterator over all [`Recovered`] in the blocks
///
/// Note: This clones the transactions since it is assumed this is part of a shared [Chain].
#[inline]
pub fn transactions_ecrecovered(
&self,
) -> impl Iterator<Item = Recovered<<B::Body as BlockBody>::Transaction>> + '_ {
self.transactions_with_sender().map(|(signer, tx)| tx.clone().with_signer(*signer))
}
/// Returns an iterator over all transaction hashes in the block
#[inline]
pub fn transaction_hashes(&self) -> impl Iterator<Item = TxHash> + '_ {
self.blocks
.values()
.flat_map(|block| block.body().transactions_iter().map(|tx| tx.trie_hash()))
}
}
impl<B: Block> IntoIterator for ChainBlocks<'_, B> {
type Item = (BlockNumber, RecoveredBlock<B>);
type IntoIter = alloc::collections::btree_map::IntoIter<BlockNumber, RecoveredBlock<B>>;
fn into_iter(self) -> Self::IntoIter {
self.blocks.into_owned().into_iter()
}
}
/// Used to hold receipts and their attachment.
#[derive(Default, Clone, Debug, PartialEq, Eq)]
pub struct BlockReceipts<T = reth_ethereum_primitives::Receipt> {
/// Block identifier
pub block: BlockNumHash,
/// Transaction identifier and receipt.
pub tx_receipts: Vec<(TxHash, T)>,
/// Block timestamp
pub timestamp: u64,
}
/// Bincode-compatible [`Chain`] serde implementation.
#[cfg(feature = "serde-bincode-compat")]
pub(super) mod serde_bincode_compat {
use crate::{serde_bincode_compat, ExecutionOutcome};
use alloc::{borrow::Cow, collections::BTreeMap};
use alloy_primitives::BlockNumber;
use reth_ethereum_primitives::EthPrimitives;
use reth_primitives_traits::{
serde_bincode_compat::{RecoveredBlock, SerdeBincodeCompat},
Block, NodePrimitives,
};
use reth_trie_common::serde_bincode_compat::updates::TrieUpdates;
use serde::{ser::SerializeMap, Deserialize, Deserializer, Serialize, Serializer};
use serde_with::{DeserializeAs, SerializeAs};
/// Bincode-compatible [`super::Chain`] serde implementation.
///
/// Intended to use with the [`serde_with::serde_as`] macro in the following way:
/// ```rust
/// use reth_execution_types::{serde_bincode_compat, Chain};
/// use serde::{Deserialize, Serialize};
/// use serde_with::serde_as;
///
/// #[serde_as]
/// #[derive(Serialize, Deserialize)]
/// struct Data {
/// #[serde_as(as = "serde_bincode_compat::Chain")]
/// chain: Chain,
/// }
/// ```
#[derive(Debug, Serialize, Deserialize)]
pub struct Chain<'a, N = EthPrimitives>
where
N: NodePrimitives<
Block: Block<Header: SerdeBincodeCompat, Body: SerdeBincodeCompat> + 'static,
>,
{
blocks: RecoveredBlocks<'a, N::Block>,
execution_outcome: serde_bincode_compat::ExecutionOutcome<'a, N::Receipt>,
trie_updates: Option<TrieUpdates<'a>>,
}
#[derive(Debug)]
struct RecoveredBlocks<
'a,
B: reth_primitives_traits::Block<Header: SerdeBincodeCompat, Body: SerdeBincodeCompat>
+ 'static,
>(Cow<'a, BTreeMap<BlockNumber, reth_primitives_traits::RecoveredBlock<B>>>);
impl<B> Serialize for RecoveredBlocks<'_, B>
where
B: Block<Header: SerdeBincodeCompat, Body: SerdeBincodeCompat> + 'static,
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut state = serializer.serialize_map(Some(self.0.len()))?;
for (block_number, block) in self.0.iter() {
state.serialize_entry(block_number, &RecoveredBlock::<'_, B>::from(block))?;
}
state.end()
}
}
impl<'de, B> Deserialize<'de> for RecoveredBlocks<'_, B>
where
B: Block<Header: SerdeBincodeCompat, Body: SerdeBincodeCompat> + 'static,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
Ok(Self(Cow::Owned(
BTreeMap::<BlockNumber, RecoveredBlock<'_, B>>::deserialize(deserializer)
.map(|blocks| blocks.into_iter().map(|(n, b)| (n, b.into())).collect())?,
)))
}
}
impl<'a, N> From<&'a super::Chain<N>> for Chain<'a, N>
where
N: NodePrimitives<
Block: Block<Header: SerdeBincodeCompat, Body: SerdeBincodeCompat> + 'static,
>,
{
fn from(value: &'a super::Chain<N>) -> Self {
Self {
blocks: RecoveredBlocks(Cow::Borrowed(&value.blocks)),
execution_outcome: value.execution_outcome.as_repr(),
trie_updates: value.trie_updates.as_ref().map(Into::into),
}
}
}
impl<'a, N> From<Chain<'a, N>> for super::Chain<N>
where
N: NodePrimitives<
Block: Block<Header: SerdeBincodeCompat, Body: SerdeBincodeCompat> + 'static,
>,
{
fn from(value: Chain<'a, N>) -> Self {
Self {
blocks: value.blocks.0.into_owned(),
execution_outcome: ExecutionOutcome::from_repr(value.execution_outcome),
trie_updates: value.trie_updates.map(Into::into),
}
}
}
impl<N> SerializeAs<super::Chain<N>> for Chain<'_, N>
where
N: NodePrimitives<
Block: Block<Header: SerdeBincodeCompat, Body: SerdeBincodeCompat> + 'static,
>,
{
fn serialize_as<S>(source: &super::Chain<N>, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
Chain::from(source).serialize(serializer)
}
}
impl<'de, N> DeserializeAs<'de, super::Chain<N>> for Chain<'de, N>
where
N: NodePrimitives<
Block: Block<Header: SerdeBincodeCompat, Body: SerdeBincodeCompat> + 'static,
>,
{
fn deserialize_as<D>(deserializer: D) -> Result<super::Chain<N>, D::Error>
where
D: Deserializer<'de>,
{
Chain::deserialize(deserializer).map(Into::into)
}
}
#[cfg(test)]
mod tests {
use super::super::{serde_bincode_compat, Chain};
use arbitrary::Arbitrary;
use rand::Rng;
use reth_primitives_traits::RecoveredBlock;
use serde::{Deserialize, Serialize};
use serde_with::serde_as;
#[test]
fn test_chain_bincode_roundtrip() {
#[serde_as]
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)]
struct Data {
#[serde_as(as = "serde_bincode_compat::Chain")]
chain: Chain,
}
let mut bytes = [0u8; 1024];
rand::rng().fill(bytes.as_mut_slice());
let data = Data {
chain: Chain::new(
vec![RecoveredBlock::arbitrary(&mut arbitrary::Unstructured::new(&bytes))
.unwrap()],
Default::default(),
None,
),
};
let encoded = bincode::serialize(&data).unwrap();
let decoded: Data = bincode::deserialize(&encoded).unwrap();
assert_eq!(decoded, data);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_consensus::TxType;
use alloy_primitives::{Address, B256};
use reth_ethereum_primitives::Receipt;
use revm::{primitives::HashMap, state::AccountInfo};
#[test]
fn chain_append() {
let block: RecoveredBlock<reth_ethereum_primitives::Block> = Default::default();
let block1_hash = B256::new([0x01; 32]);
let block2_hash = B256::new([0x02; 32]);
let block3_hash = B256::new([0x03; 32]);
let block4_hash = B256::new([0x04; 32]);
let mut block1 = block.clone();
let mut block2 = block.clone();
let mut block3 = block.clone();
let mut block4 = block;
block1.set_hash(block1_hash);
block2.set_hash(block2_hash);
block3.set_hash(block3_hash);
block4.set_hash(block4_hash);
block3.set_parent_hash(block2_hash);
let mut chain1: Chain =
Chain { blocks: BTreeMap::from([(1, block1), (2, block2)]), ..Default::default() };
let chain2 =
Chain { blocks: BTreeMap::from([(3, block3), (4, block4)]), ..Default::default() };
assert!(chain1.append_chain(chain2.clone()).is_ok());
// chain1 got changed so this will fail
assert!(chain1.append_chain(chain2).is_err());
}
#[test]
fn test_number_split() {
let execution_outcome1: ExecutionOutcome = ExecutionOutcome::new(
BundleState::new(
vec![(
Address::new([2; 20]),
None,
Some(AccountInfo::default()),
HashMap::default(),
)],
vec![vec![(Address::new([2; 20]), None, vec![])]],
vec![],
),
vec![vec![]],
1,
vec![],
);
let execution_outcome2 = ExecutionOutcome::new(
BundleState::new(
vec![(
Address::new([3; 20]),
None,
Some(AccountInfo::default()),
HashMap::default(),
)],
vec![vec![(Address::new([3; 20]), None, vec![])]],
vec![],
),
vec![vec![]],
2,
vec![],
);
let mut block1: RecoveredBlock<reth_ethereum_primitives::Block> = Default::default();
let block1_hash = B256::new([15; 32]);
block1.set_block_number(1);
block1.set_hash(block1_hash);
block1.push_sender(Address::new([4; 20]));
let mut block2: RecoveredBlock<reth_ethereum_primitives::Block> = Default::default();
let block2_hash = B256::new([16; 32]);
block2.set_block_number(2);
block2.set_hash(block2_hash);
block2.push_sender(Address::new([4; 20]));
let mut block_state_extended = execution_outcome1;
block_state_extended.extend(execution_outcome2);
let chain: Chain =
Chain::new(vec![block1.clone(), block2.clone()], block_state_extended, None);
// return tip state
assert_eq!(
chain.execution_outcome_at_block(block2.number),
Some(chain.execution_outcome.clone())
);
// state at unknown block
assert_eq!(chain.execution_outcome_at_block(100), None);
}
#[test]
fn receipts_by_block_hash() {
// Create a default RecoveredBlock object
let block: RecoveredBlock<reth_ethereum_primitives::Block> = Default::default();
// Define block hashes for block1 and block2
let block1_hash = B256::new([0x01; 32]);
let block2_hash = B256::new([0x02; 32]);
// Clone the default block into block1 and block2
let mut block1 = block.clone();
let mut block2 = block;
// Set the hashes of block1 and block2
block1.set_hash(block1_hash);
block2.set_hash(block2_hash);
// Create a random receipt object, receipt1
let receipt1 = Receipt {
tx_type: TxType::Legacy,
cumulative_gas_used: 46913,
logs: vec![],
success: true,
};
// Create another random receipt object, receipt2
let receipt2 = Receipt {
tx_type: TxType::Legacy,
cumulative_gas_used: 1325345,
logs: vec![],
success: true,
};
// Create a Receipts object with a vector of receipt vectors
let receipts = vec![vec![receipt1.clone()], vec![receipt2]];
// Create an ExecutionOutcome object with the created bundle, receipts, an empty requests
// vector, and first_block set to 10
let execution_outcome = ExecutionOutcome {
bundle: Default::default(),
receipts,
requests: vec![],
first_block: 10,
};
// Create a Chain object with a BTreeMap of blocks mapped to their block numbers,
// including block1_hash and block2_hash, and the execution_outcome
let chain: Chain = Chain {
blocks: BTreeMap::from([(10, block1), (11, block2)]),
execution_outcome: execution_outcome.clone(),
..Default::default()
};
// Assert that the proper receipt vector is returned for block1_hash
assert_eq!(chain.receipts_by_block_hash(block1_hash), Some(vec![&receipt1]));
// Create an ExecutionOutcome object with a single receipt vector containing receipt1
let execution_outcome1 = ExecutionOutcome {
bundle: Default::default(),
receipts: vec![vec![receipt1]],
requests: vec![],
first_block: 10,
};
// Assert that the execution outcome at the first block contains only the first receipt
assert_eq!(chain.execution_outcome_at_block(10), Some(execution_outcome1));
// Assert that the execution outcome at the tip block contains the whole execution outcome
assert_eq!(chain.execution_outcome_at_block(11), Some(execution_outcome));
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/evm/execution-types/src/execution_outcome.rs | crates/evm/execution-types/src/execution_outcome.rs | use crate::{BlockExecutionOutput, BlockExecutionResult};
use alloc::{vec, vec::Vec};
use alloy_eips::eip7685::Requests;
use alloy_primitives::{logs_bloom, map::HashMap, Address, BlockNumber, Bloom, Log, B256, U256};
use reth_primitives_traits::{Account, Bytecode, Receipt, StorageEntry};
use reth_trie_common::{HashedPostState, KeyHasher};
use revm::{
database::{states::BundleState, BundleAccount},
state::{AccountInfo, FlaggedStorage},
};
/// Type used to initialize revms bundle state.
pub type BundleStateInit = HashMap<
Address,
(Option<Account>, Option<Account>, HashMap<B256, (FlaggedStorage, FlaggedStorage)>),
>;
/// Types used inside `RevertsInit` to initialize revms reverts.
pub type AccountRevertInit = (Option<Option<Account>>, Vec<StorageEntry>);
/// Type used to initialize revms reverts.
pub type RevertsInit = HashMap<BlockNumber, HashMap<Address, AccountRevertInit>>;
/// Represents a changed account
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct ChangedAccount {
/// The address of the account.
pub address: Address,
/// Account nonce.
pub nonce: u64,
/// Account balance.
pub balance: U256,
}
impl ChangedAccount {
/// Creates a new [`ChangedAccount`] with the given address and 0 balance and nonce.
pub const fn empty(address: Address) -> Self {
Self { address, nonce: 0, balance: U256::ZERO }
}
}
/// Represents the outcome of block execution, including post-execution changes and reverts.
///
/// The `ExecutionOutcome` structure aggregates the state changes over an arbitrary number of
/// blocks, capturing the resulting state, receipts, and requests following the execution.
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct ExecutionOutcome<T = reth_ethereum_primitives::Receipt> {
/// Bundle state with reverts.
pub bundle: BundleState,
/// The collection of receipts.
/// Outer vector stores receipts for each block sequentially.
/// The inner vector stores receipts ordered by transaction number.
pub receipts: Vec<Vec<T>>,
/// First block of bundle state.
pub first_block: BlockNumber,
/// The collection of EIP-7685 requests.
/// Outer vector stores requests for each block sequentially.
/// The inner vector stores requests ordered by transaction number.
///
/// A transaction may have zero or more requests, so the length of the inner vector is not
/// guaranteed to be the same as the number of transactions.
pub requests: Vec<Requests>,
}
impl<T> Default for ExecutionOutcome<T> {
fn default() -> Self {
Self {
bundle: Default::default(),
receipts: Default::default(),
first_block: Default::default(),
requests: Default::default(),
}
}
}
impl<T> ExecutionOutcome<T> {
/// Creates a new `ExecutionOutcome`.
///
/// This constructor initializes a new `ExecutionOutcome` instance with the provided
/// bundle state, receipts, first block number, and EIP-7685 requests.
pub const fn new(
bundle: BundleState,
receipts: Vec<Vec<T>>,
first_block: BlockNumber,
requests: Vec<Requests>,
) -> Self {
Self { bundle, receipts, first_block, requests }
}
/// Creates a new `ExecutionOutcome` from initialization parameters.
///
/// This constructor initializes a new `ExecutionOutcome` instance using detailed
/// initialization parameters.
pub fn new_init(
state_init: BundleStateInit,
revert_init: RevertsInit,
contracts_init: impl IntoIterator<Item = (B256, Bytecode)>,
receipts: Vec<Vec<T>>,
first_block: BlockNumber,
requests: Vec<Requests>,
) -> Self {
// sort reverts by block number
let mut reverts = revert_init.into_iter().collect::<Vec<_>>();
reverts.sort_unstable_by_key(|a| a.0);
// initialize revm bundle
let bundle = BundleState::new(
state_init.into_iter().map(|(address, (original, present, storage))| {
(
address,
original.map(Into::into),
present.map(Into::into),
storage
.into_iter()
.map(|(k, (orig_value, new_value))| (k.into(), (orig_value, new_value)))
.collect(),
)
}),
reverts.into_iter().map(|(_, reverts)| {
// does not need to be sorted, it is done when taking reverts.
reverts.into_iter().map(|(address, (original, storage))| {
(
address,
original.map(|i| i.map(Into::into)),
storage.into_iter().map(|entry| (entry.key.into(), entry.value)),
)
})
}),
contracts_init.into_iter().map(|(code_hash, bytecode)| (code_hash, bytecode.0)),
);
Self { bundle, receipts, first_block, requests }
}
/// Creates a new `ExecutionOutcome` from a single block execution result.
pub fn single(block_number: u64, output: BlockExecutionOutput<T>) -> Self {
Self {
bundle: output.state,
receipts: vec![output.result.receipts],
first_block: block_number,
requests: vec![output.result.requests],
}
}
/// Creates a new `ExecutionOutcome` from multiple [`BlockExecutionResult`]s.
pub fn from_blocks(
first_block: u64,
bundle: BundleState,
results: Vec<BlockExecutionResult<T>>,
) -> Self {
let mut value = Self { bundle, first_block, receipts: Vec::new(), requests: Vec::new() };
for result in results {
value.receipts.push(result.receipts);
value.requests.push(result.requests);
}
value
}
/// Return revm bundle state.
pub const fn state(&self) -> &BundleState {
&self.bundle
}
/// Returns mutable revm bundle state.
pub const fn state_mut(&mut self) -> &mut BundleState {
&mut self.bundle
}
/// Set first block.
pub const fn set_first_block(&mut self, first_block: BlockNumber) {
self.first_block = first_block;
}
/// Return iterator over all accounts
pub fn accounts_iter(&self) -> impl Iterator<Item = (Address, Option<&AccountInfo>)> {
self.bundle.state().iter().map(|(a, acc)| (*a, acc.info.as_ref()))
}
/// Return iterator over all [`BundleAccount`]s in the bundle
pub fn bundle_accounts_iter(&self) -> impl Iterator<Item = (Address, &BundleAccount)> {
self.bundle.state().iter().map(|(a, acc)| (*a, acc))
}
/// Get account if account is known.
pub fn account(&self, address: &Address) -> Option<Option<Account>> {
self.bundle.account(address).map(|a| a.info.as_ref().map(Into::into))
}
/// Get storage if value is known.
///
/// This means that depending on status we can potentially return `U256::ZERO`.
pub fn storage(&self, address: &Address, storage_key: U256) -> Option<FlaggedStorage> {
self.bundle.account(address).and_then(|a| a.storage_slot(storage_key))
}
/// Return bytecode if known.
pub fn bytecode(&self, code_hash: &B256) -> Option<Bytecode> {
self.bundle.bytecode(code_hash).map(Bytecode)
}
/// Returns [`HashedPostState`] for this execution outcome.
/// See [`HashedPostState::from_bundle_state`] for more info.
pub fn hash_state_slow<KH: KeyHasher>(&self) -> HashedPostState {
HashedPostState::from_bundle_state::<KH>(&self.bundle.state)
}
/// Transform block number to the index of block.
pub const fn block_number_to_index(&self, block_number: BlockNumber) -> Option<usize> {
if self.first_block > block_number {
return None
}
let index = block_number - self.first_block;
if index >= self.receipts.len() as u64 {
return None
}
Some(index as usize)
}
/// Returns the receipt root for all recorded receipts.
/// Note: this function calculated Bloom filters for every receipt and created merkle trees
/// of receipt. This is an expensive operation.
pub fn generic_receipts_root_slow(
&self,
block_number: BlockNumber,
f: impl FnOnce(&[T]) -> B256,
) -> Option<B256> {
Some(f(self.receipts.get(self.block_number_to_index(block_number)?)?))
}
/// Returns reference to receipts.
pub const fn receipts(&self) -> &Vec<Vec<T>> {
&self.receipts
}
/// Returns mutable reference to receipts.
pub const fn receipts_mut(&mut self) -> &mut Vec<Vec<T>> {
&mut self.receipts
}
/// Return all block receipts
pub fn receipts_by_block(&self, block_number: BlockNumber) -> &[T] {
let Some(index) = self.block_number_to_index(block_number) else { return &[] };
&self.receipts[index]
}
/// Is execution outcome empty.
pub const fn is_empty(&self) -> bool {
self.len() == 0
}
/// Number of blocks in the execution outcome.
pub const fn len(&self) -> usize {
self.receipts.len()
}
/// Return first block of the execution outcome
pub const fn first_block(&self) -> BlockNumber {
self.first_block
}
/// Return last block of the execution outcome
pub const fn last_block(&self) -> BlockNumber {
(self.first_block + self.len() as u64).saturating_sub(1)
}
/// Revert the state to the given block number.
///
/// Returns false if the block number is not in the bundle state.
///
/// # Note
///
/// The provided block number will stay inside the bundle state.
pub fn revert_to(&mut self, block_number: BlockNumber) -> bool {
let Some(index) = self.block_number_to_index(block_number) else { return false };
// +1 is for number of blocks that we have as index is included.
let new_len = index + 1;
let rm_trx: usize = self.len() - new_len;
// remove receipts
self.receipts.truncate(new_len);
// remove requests
self.requests.truncate(new_len);
// Revert last n reverts.
self.bundle.revert(rm_trx);
true
}
/// Splits the block range state at a given block number.
/// Returns two split states ([..at], [at..]).
/// The plain state of the 2nd bundle state will contain extra changes
/// that were made in state transitions belonging to the lower state.
///
/// # Panics
///
/// If the target block number is not included in the state block range.
pub fn split_at(self, at: BlockNumber) -> (Option<Self>, Self)
where
T: Clone,
{
if at == self.first_block {
return (None, self)
}
let (mut lower_state, mut higher_state) = (self.clone(), self);
// Revert lower state to [..at].
lower_state.revert_to(at.checked_sub(1).unwrap());
// Truncate higher state to [at..].
let at_idx = higher_state.block_number_to_index(at).unwrap();
higher_state.receipts = higher_state.receipts.split_off(at_idx);
// Ensure that there are enough requests to truncate.
// Sometimes we just have receipts and no requests.
if at_idx < higher_state.requests.len() {
higher_state.requests = higher_state.requests.split_off(at_idx);
}
higher_state.bundle.take_n_reverts(at_idx);
higher_state.first_block = at;
(Some(lower_state), higher_state)
}
/// Extend one state from another
///
/// For state this is very sensitive operation and should be used only when
/// we know that other state was build on top of this one.
/// In most cases this would be true.
pub fn extend(&mut self, other: Self) {
self.bundle.extend(other.bundle);
self.receipts.extend(other.receipts);
self.requests.extend(other.requests);
}
/// Prepends present the state with the given `BundleState`.
/// It adds changes from the given state but does not override any existing changes.
///
/// Reverts and receipts are not updated.
pub fn prepend_state(&mut self, mut other: BundleState) {
let other_len = other.reverts.len();
// take this bundle
let this_bundle = core::mem::take(&mut self.bundle);
// extend other bundle with this
other.extend(this_bundle);
// discard other reverts
other.take_n_reverts(other_len);
// swap bundles
core::mem::swap(&mut self.bundle, &mut other)
}
/// Create a new instance with updated receipts.
pub fn with_receipts(mut self, receipts: Vec<Vec<T>>) -> Self {
self.receipts = receipts;
self
}
/// Create a new instance with updated requests.
pub fn with_requests(mut self, requests: Vec<Requests>) -> Self {
self.requests = requests;
self
}
/// Returns an iterator over all changed accounts from the `ExecutionOutcome`.
///
/// This method filters the accounts to return only those that have undergone changes
/// and maps them into `ChangedAccount` instances, which include the address, nonce, and
/// balance.
pub fn changed_accounts(&self) -> impl Iterator<Item = ChangedAccount> + '_ {
self.accounts_iter().filter_map(|(addr, acc)| acc.map(|acc| (addr, acc))).map(
|(address, acc)| ChangedAccount { address, nonce: acc.nonce, balance: acc.balance },
)
}
}
impl<T: Receipt<Log = Log>> ExecutionOutcome<T> {
/// Returns an iterator over all block logs.
pub fn logs(&self, block_number: BlockNumber) -> Option<impl Iterator<Item = &Log>> {
let index = self.block_number_to_index(block_number)?;
Some(self.receipts[index].iter().flat_map(|r| r.logs()))
}
/// Return blocks logs bloom
pub fn block_logs_bloom(&self, block_number: BlockNumber) -> Option<Bloom> {
Some(logs_bloom(self.logs(block_number)?))
}
}
impl ExecutionOutcome {
/// Returns the ethereum receipt root for all recorded receipts.
///
/// Note: this function calculated Bloom filters for every receipt and created merkle trees
/// of receipt. This is a expensive operation.
pub fn ethereum_receipts_root(&self, block_number: BlockNumber) -> Option<B256> {
self.generic_receipts_root_slow(
block_number,
reth_ethereum_primitives::Receipt::calculate_receipt_root_no_memo,
)
}
}
impl<T> From<(BlockExecutionOutput<T>, BlockNumber)> for ExecutionOutcome<T> {
fn from((output, block_number): (BlockExecutionOutput<T>, BlockNumber)) -> Self {
Self::single(block_number, output)
}
}
#[cfg(feature = "serde-bincode-compat")]
pub(super) mod serde_bincode_compat {
use alloc::{borrow::Cow, vec::Vec};
use alloy_eips::eip7685::Requests;
use alloy_primitives::BlockNumber;
use reth_primitives_traits::serde_bincode_compat::SerdeBincodeCompat;
use revm::database::BundleState;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use serde_with::{DeserializeAs, SerializeAs};
/// Bincode-compatible [`super::ExecutionOutcome`] serde implementation.
///
/// Intended to use with the [`serde_with::serde_as`] macro in the following way:
/// ```rust
/// use reth_execution_types::{serde_bincode_compat, ExecutionOutcome};
/// ///
/// use reth_primitives_traits::serde_bincode_compat::SerdeBincodeCompat;
/// use serde::{Deserialize, Serialize};
/// use serde_with::serde_as;
///
/// #[serde_as]
/// #[derive(Serialize, Deserialize)]
/// struct Data<T: SerdeBincodeCompat + core::fmt::Debug> {
/// #[serde_as(as = "serde_bincode_compat::ExecutionOutcome<'_, T>")]
/// chain: ExecutionOutcome<T>,
/// }
/// ```
#[derive(Debug, Serialize, Deserialize)]
pub struct ExecutionOutcome<'a, T>
where
T: SerdeBincodeCompat + core::fmt::Debug,
{
bundle: Cow<'a, BundleState>,
receipts: Vec<Vec<T::BincodeRepr<'a>>>,
first_block: BlockNumber,
#[expect(clippy::owned_cow)]
requests: Cow<'a, Vec<Requests>>,
}
impl<'a, T> From<&'a super::ExecutionOutcome<T>> for ExecutionOutcome<'a, T>
where
T: SerdeBincodeCompat + core::fmt::Debug,
{
fn from(value: &'a super::ExecutionOutcome<T>) -> Self {
ExecutionOutcome {
bundle: Cow::Borrowed(&value.bundle),
receipts: value
.receipts
.iter()
.map(|vec| vec.iter().map(|receipt| T::as_repr(receipt)).collect())
.collect(),
first_block: value.first_block,
requests: Cow::Borrowed(&value.requests),
}
}
}
impl<'a, T> From<ExecutionOutcome<'a, T>> for super::ExecutionOutcome<T>
where
T: SerdeBincodeCompat + core::fmt::Debug,
{
fn from(value: ExecutionOutcome<'a, T>) -> Self {
Self {
bundle: value.bundle.into_owned(),
receipts: value
.receipts
.into_iter()
.map(|vec| vec.into_iter().map(|receipt| T::from_repr(receipt)).collect())
.collect(),
first_block: value.first_block,
requests: value.requests.into_owned(),
}
}
}
impl<T> SerializeAs<super::ExecutionOutcome<T>> for ExecutionOutcome<'_, T>
where
T: SerdeBincodeCompat + core::fmt::Debug,
{
fn serialize_as<S>(
source: &super::ExecutionOutcome<T>,
serializer: S,
) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
ExecutionOutcome::from(source).serialize(serializer)
}
}
impl<'de, T> DeserializeAs<'de, super::ExecutionOutcome<T>> for ExecutionOutcome<'de, T>
where
T: SerdeBincodeCompat + core::fmt::Debug,
{
fn deserialize_as<D>(deserializer: D) -> Result<super::ExecutionOutcome<T>, D::Error>
where
D: Deserializer<'de>,
{
ExecutionOutcome::deserialize(deserializer).map(Into::into)
}
}
impl<T: SerdeBincodeCompat + core::fmt::Debug> SerdeBincodeCompat for super::ExecutionOutcome<T> {
type BincodeRepr<'a> = ExecutionOutcome<'a, T>;
fn as_repr(&self) -> Self::BincodeRepr<'_> {
self.into()
}
fn from_repr(repr: Self::BincodeRepr<'_>) -> Self {
repr.into()
}
}
#[cfg(test)]
mod tests {
use super::super::{serde_bincode_compat, ExecutionOutcome};
use rand::Rng;
use reth_ethereum_primitives::Receipt;
use reth_primitives_traits::serde_bincode_compat::SerdeBincodeCompat;
use serde::{Deserialize, Serialize};
use serde_with::serde_as;
#[test]
fn test_chain_bincode_roundtrip() {
#[serde_as]
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)]
struct Data<T: SerdeBincodeCompat + core::fmt::Debug> {
#[serde_as(as = "serde_bincode_compat::ExecutionOutcome<'_, T>")]
data: ExecutionOutcome<T>,
}
let mut bytes = [0u8; 1024];
rand::rng().fill(bytes.as_mut_slice());
let data = Data {
data: ExecutionOutcome {
bundle: Default::default(),
receipts: vec![],
first_block: 0,
requests: vec![],
},
};
let encoded = bincode::serialize(&data).unwrap();
let decoded = bincode::deserialize::<Data<Receipt>>(&encoded).unwrap();
assert_eq!(decoded, data);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_consensus::TxType;
use alloy_primitives::{bytes, Address, LogData, B256};
#[test]
fn test_initialization() {
// Create a new BundleState object with initial data
let bundle = BundleState::new(
vec![(Address::new([2; 20]), None, Some(AccountInfo::default()), HashMap::default())],
vec![vec![(Address::new([2; 20]), None, vec![])]],
vec![],
);
// Create a Receipts object with a vector of receipt vectors
let receipts = vec![vec![Some(reth_ethereum_primitives::Receipt {
tx_type: TxType::Legacy,
cumulative_gas_used: 46913,
logs: vec![],
success: true,
})]];
// Create a Requests object with a vector of requests
let requests = vec![Requests::new(vec![bytes!("dead"), bytes!("beef"), bytes!("beebee")])];
// Define the first block number
let first_block = 123;
// Create a ExecutionOutcome object with the created bundle, receipts, requests, and
// first_block
let exec_res = ExecutionOutcome {
bundle: bundle.clone(),
receipts: receipts.clone(),
requests: requests.clone(),
first_block,
};
// Assert that creating a new ExecutionOutcome using the constructor matches exec_res
assert_eq!(
ExecutionOutcome::new(bundle, receipts.clone(), first_block, requests.clone()),
exec_res
);
// Create a BundleStateInit object and insert initial data
let mut state_init: BundleStateInit = HashMap::default();
state_init
.insert(Address::new([2; 20]), (None, Some(Account::default()), HashMap::default()));
// Create a HashMap for account reverts and insert initial data
let mut revert_inner: HashMap<Address, AccountRevertInit> = HashMap::default();
revert_inner.insert(Address::new([2; 20]), (None, vec![]));
// Create a RevertsInit object and insert the revert_inner data
let mut revert_init: RevertsInit = HashMap::default();
revert_init.insert(123, revert_inner);
// Assert that creating a new ExecutionOutcome using the new_init method matches
// exec_res
assert_eq!(
ExecutionOutcome::new_init(
state_init,
revert_init,
vec![],
receipts,
first_block,
requests,
),
exec_res
);
}
#[test]
fn test_block_number_to_index() {
// Create a Receipts object with a vector of receipt vectors
let receipts = vec![vec![Some(reth_ethereum_primitives::Receipt {
tx_type: TxType::Legacy,
cumulative_gas_used: 46913,
logs: vec![],
success: true,
})]];
// Define the first block number
let first_block = 123;
// Create a ExecutionOutcome object with the created bundle, receipts, requests, and
// first_block
let exec_res = ExecutionOutcome {
bundle: Default::default(),
receipts,
requests: vec![],
first_block,
};
// Test before the first block
assert_eq!(exec_res.block_number_to_index(12), None);
// Test after the first block but index larger than receipts length
assert_eq!(exec_res.block_number_to_index(133), None);
// Test after the first block
assert_eq!(exec_res.block_number_to_index(123), Some(0));
}
#[test]
fn test_get_logs() {
// Create a Receipts object with a vector of receipt vectors
let receipts = vec![vec![reth_ethereum_primitives::Receipt {
tx_type: TxType::Legacy,
cumulative_gas_used: 46913,
logs: vec![Log::<LogData>::default()],
success: true,
}]];
// Define the first block number
let first_block = 123;
// Create a ExecutionOutcome object with the created bundle, receipts, requests, and
// first_block
let exec_res = ExecutionOutcome {
bundle: Default::default(),
receipts,
requests: vec![],
first_block,
};
// Get logs for block number 123
let logs: Vec<&Log> = exec_res.logs(123).unwrap().collect();
// Assert that the logs match the expected logs
assert_eq!(logs, vec![&Log::<LogData>::default()]);
}
#[test]
fn test_receipts_by_block() {
// Create a Receipts object with a vector of receipt vectors
let receipts = vec![vec![Some(reth_ethereum_primitives::Receipt {
tx_type: TxType::Legacy,
cumulative_gas_used: 46913,
logs: vec![Log::<LogData>::default()],
success: true,
})]];
// Define the first block number
let first_block = 123;
// Create a ExecutionOutcome object with the created bundle, receipts, requests, and
// first_block
let exec_res = ExecutionOutcome {
bundle: Default::default(), // Default value for bundle
receipts, // Include the created receipts
requests: vec![], // Empty vector for requests
first_block, // Set the first block number
};
// Get receipts for block number 123 and convert the result into a vector
let receipts_by_block: Vec<_> = exec_res.receipts_by_block(123).iter().collect();
// Assert that the receipts for block number 123 match the expected receipts
assert_eq!(
receipts_by_block,
vec![&Some(reth_ethereum_primitives::Receipt {
tx_type: TxType::Legacy,
cumulative_gas_used: 46913,
logs: vec![Log::<LogData>::default()],
success: true,
})]
);
}
#[test]
fn test_receipts_len() {
// Create a Receipts object with a vector of receipt vectors
let receipts = vec![vec![Some(reth_ethereum_primitives::Receipt {
tx_type: TxType::Legacy,
cumulative_gas_used: 46913,
logs: vec![Log::<LogData>::default()],
success: true,
})]];
// Create an empty Receipts object
let receipts_empty = vec![];
// Define the first block number
let first_block = 123;
// Create a ExecutionOutcome object with the created bundle, receipts, requests, and
// first_block
let exec_res = ExecutionOutcome {
bundle: Default::default(), // Default value for bundle
receipts, // Include the created receipts
requests: vec![], // Empty vector for requests
first_block, // Set the first block number
};
// Assert that the length of receipts in exec_res is 1
assert_eq!(exec_res.len(), 1);
// Assert that exec_res is not empty
assert!(!exec_res.is_empty());
// Create a ExecutionOutcome object with an empty Receipts object
let exec_res_empty_receipts: ExecutionOutcome = ExecutionOutcome {
bundle: Default::default(), // Default value for bundle
receipts: receipts_empty, // Include the empty receipts
requests: vec![], // Empty vector for requests
first_block, // Set the first block number
};
// Assert that the length of receipts in exec_res_empty_receipts is 0
assert_eq!(exec_res_empty_receipts.len(), 0);
// Assert that exec_res_empty_receipts is empty
assert!(exec_res_empty_receipts.is_empty());
}
#[test]
fn test_revert_to() {
// Create a random receipt object
let receipt = reth_ethereum_primitives::Receipt {
tx_type: TxType::Legacy,
cumulative_gas_used: 46913,
logs: vec![],
success: true,
};
// Create a Receipts object with a vector of receipt vectors
let receipts = vec![vec![Some(receipt.clone())], vec![Some(receipt.clone())]];
// Define the first block number
let first_block = 123;
// Create a request.
let request = bytes!("deadbeef");
// Create a vector of Requests containing the request.
let requests =
vec![Requests::new(vec![request.clone()]), Requests::new(vec![request.clone()])];
// Create a ExecutionOutcome object with the created bundle, receipts, requests, and
// first_block
let mut exec_res =
ExecutionOutcome { bundle: Default::default(), receipts, requests, first_block };
// Assert that the revert_to method returns true when reverting to the initial block number.
assert!(exec_res.revert_to(123));
// Assert that the receipts are properly cut after reverting to the initial block number.
assert_eq!(exec_res.receipts, vec![vec![Some(receipt)]]);
// Assert that the requests are properly cut after reverting to the initial block number.
assert_eq!(exec_res.requests, vec![Requests::new(vec![request])]);
// Assert that the revert_to method returns false when attempting to revert to a block
// number greater than the initial block number.
assert!(!exec_res.revert_to(133));
// Assert that the revert_to method returns false when attempting to revert to a block
// number less than the initial block number.
assert!(!exec_res.revert_to(10));
}
#[test]
fn test_extend_execution_outcome() {
// Create a Receipt object with specific attributes.
let receipt = reth_ethereum_primitives::Receipt {
tx_type: TxType::Legacy,
cumulative_gas_used: 46913,
logs: vec![],
success: true,
};
// Create a Receipts object containing the receipt.
let receipts = vec![vec![Some(receipt.clone())]];
// Create a request.
let request = bytes!("deadbeef");
// Create a vector of Requests containing the request.
let requests = vec![Requests::new(vec![request.clone()])];
// Define the initial block number.
let first_block = 123;
// Create an ExecutionOutcome object.
let mut exec_res =
ExecutionOutcome { bundle: Default::default(), receipts, requests, first_block };
// Extend the ExecutionOutcome object by itself.
exec_res.extend(exec_res.clone());
// Assert the extended ExecutionOutcome matches the expected outcome.
assert_eq!(
exec_res,
ExecutionOutcome {
bundle: Default::default(),
receipts: vec![vec![Some(receipt.clone())], vec![Some(receipt)]],
requests: vec![Requests::new(vec![request.clone()]), Requests::new(vec![request])],
first_block: 123,
}
);
}
#[test]
fn test_split_at_execution_outcome() {
// Create a random receipt object
let receipt = reth_ethereum_primitives::Receipt {
tx_type: TxType::Legacy,
cumulative_gas_used: 46913,
logs: vec![],
success: true,
};
// Create a Receipts object with a vector of receipt vectors
let receipts = vec![
vec![Some(receipt.clone())],
vec![Some(receipt.clone())],
vec![Some(receipt.clone())],
];
// Define the first block number
let first_block = 123;
// Create a request.
let request = bytes!("deadbeef");
// Create a vector of Requests containing the request.
let requests = vec![
Requests::new(vec![request.clone()]),
Requests::new(vec![request.clone()]),
Requests::new(vec![request.clone()]),
];
// Create a ExecutionOutcome object with the created bundle, receipts, requests, and
// first_block
let exec_res =
ExecutionOutcome { bundle: Default::default(), receipts, requests, first_block };
// Split the ExecutionOutcome at block number 124
let result = exec_res.clone().split_at(124);
// Define the expected lower ExecutionOutcome after splitting
let lower_execution_outcome = ExecutionOutcome {
bundle: Default::default(),
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | true |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/evm/execution-errors/src/lib.rs | crates/evm/execution-errors/src/lib.rs | //! Commonly used error types used when doing block execution.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![cfg_attr(not(feature = "std"), no_std)]
extern crate alloc;
pub mod trie;
pub use trie::*;
pub use alloy_evm::block::{
BlockExecutionError, BlockValidationError, InternalBlockExecutionError,
};
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/evm/execution-errors/src/trie.rs | crates/evm/execution-errors/src/trie.rs | //! Errors when computing the state root.
use alloc::{boxed::Box, string::ToString};
use alloy_primitives::{Bytes, B256};
use nybbles::Nibbles;
use reth_storage_errors::{db::DatabaseError, provider::ProviderError};
use thiserror::Error;
/// State root errors.
#[derive(Error, PartialEq, Eq, Clone, Debug)]
pub enum StateRootError {
/// Internal database error.
#[error(transparent)]
Database(#[from] DatabaseError),
/// Storage root error.
#[error(transparent)]
StorageRootError(#[from] StorageRootError),
}
impl From<StateRootError> for DatabaseError {
fn from(err: StateRootError) -> Self {
match err {
StateRootError::Database(err) |
StateRootError::StorageRootError(StorageRootError::Database(err)) => err,
}
}
}
/// Storage root error.
#[derive(Error, PartialEq, Eq, Clone, Debug)]
pub enum StorageRootError {
/// Internal database error.
#[error(transparent)]
Database(#[from] DatabaseError),
}
impl From<StorageRootError> for DatabaseError {
fn from(err: StorageRootError) -> Self {
match err {
StorageRootError::Database(err) => err,
}
}
}
/// State proof errors.
#[derive(Error, PartialEq, Eq, Clone, Debug)]
pub enum StateProofError {
/// Internal database error.
#[error(transparent)]
Database(#[from] DatabaseError),
/// RLP decoding error.
#[error(transparent)]
Rlp(#[from] alloy_rlp::Error),
}
impl From<StateProofError> for ProviderError {
fn from(value: StateProofError) -> Self {
match value {
StateProofError::Database(error) => Self::Database(error),
StateProofError::Rlp(error) => Self::Rlp(error),
}
}
}
/// Result type with [`SparseStateTrieError`] as error.
pub type SparseStateTrieResult<Ok> = Result<Ok, SparseStateTrieError>;
/// Error encountered in `SparseStateTrie`.
#[derive(Error, Debug)]
#[error(transparent)]
pub struct SparseStateTrieError(#[from] Box<SparseStateTrieErrorKind>);
impl<T: Into<SparseStateTrieErrorKind>> From<T> for SparseStateTrieError {
#[cold]
fn from(value: T) -> Self {
Self(Box::new(value.into()))
}
}
impl From<SparseTrieError> for SparseStateTrieErrorKind {
#[cold]
fn from(value: SparseTrieError) -> Self {
Self::Sparse(*value.0)
}
}
impl SparseStateTrieError {
/// Returns the error kind.
pub const fn kind(&self) -> &SparseStateTrieErrorKind {
&self.0
}
/// Consumes the error and returns the error kind.
pub fn into_kind(self) -> SparseStateTrieErrorKind {
*self.0
}
}
/// Error encountered in `SparseStateTrie`.
#[derive(Error, Debug)]
pub enum SparseStateTrieErrorKind {
/// Encountered invalid root node.
#[error("invalid root node at {path:?}: {node:?}")]
InvalidRootNode {
/// Path to first proof node.
path: Nibbles,
/// Encoded first proof node.
node: Bytes,
},
/// Storage sparse trie error.
#[error("error in storage trie for address {0:?}: {1:?}")]
SparseStorageTrie(B256, SparseTrieErrorKind),
/// Sparse trie error.
#[error(transparent)]
Sparse(#[from] SparseTrieErrorKind),
/// RLP error.
#[error(transparent)]
Rlp(#[from] alloy_rlp::Error),
}
/// Result type with [`SparseTrieError`] as error.
pub type SparseTrieResult<Ok> = Result<Ok, SparseTrieError>;
/// Error encountered in `SparseTrie`.
#[derive(Error, Debug)]
#[error(transparent)]
pub struct SparseTrieError(#[from] Box<SparseTrieErrorKind>);
impl<T: Into<SparseTrieErrorKind>> From<T> for SparseTrieError {
#[cold]
fn from(value: T) -> Self {
Self(Box::new(value.into()))
}
}
impl SparseTrieError {
/// Returns the error kind.
pub const fn kind(&self) -> &SparseTrieErrorKind {
&self.0
}
/// Consumes the error and returns the error kind.
pub fn into_kind(self) -> SparseTrieErrorKind {
*self.0
}
}
/// [`SparseTrieError`] kind.
#[derive(Error, Debug)]
pub enum SparseTrieErrorKind {
/// Sparse trie is still blind. Thrown on attempt to update it.
#[error("sparse trie is blind")]
Blind,
/// Encountered blinded node on update.
#[error("attempted to update blind node at {path:?}: {hash}")]
BlindedNode {
/// Blind node path.
path: Nibbles,
/// Node hash
hash: B256,
},
/// Encountered unexpected node at path when revealing.
#[error("encountered an invalid node at path {path:?} when revealing: {node:?}")]
Reveal {
/// Path to the node.
path: Nibbles,
/// Node that was at the path when revealing.
node: Box<dyn core::fmt::Debug + Send>,
},
/// RLP error.
#[error(transparent)]
Rlp(#[from] alloy_rlp::Error),
/// Node not found in provider during revealing.
#[error("node {path:?} not found in provider during removal")]
NodeNotFoundInProvider {
/// Path to the missing node.
path: Nibbles,
},
/// Other.
#[error(transparent)]
Other(#[from] Box<dyn core::error::Error + Send>),
}
/// Trie witness errors.
#[derive(Error, Debug)]
pub enum TrieWitnessError {
/// Error gather proofs.
#[error(transparent)]
Proof(#[from] StateProofError),
/// RLP decoding error.
#[error(transparent)]
Rlp(#[from] alloy_rlp::Error),
/// Sparse state trie error.
#[error(transparent)]
Sparse(#[from] SparseStateTrieError),
/// Missing account.
#[error("missing account {_0}")]
MissingAccount(B256),
}
impl From<SparseStateTrieErrorKind> for TrieWitnessError {
fn from(error: SparseStateTrieErrorKind) -> Self {
Self::Sparse(error.into())
}
}
impl From<TrieWitnessError> for ProviderError {
fn from(error: TrieWitnessError) -> Self {
Self::TrieWitnessError(error.to_string())
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/tasks/src/lib.rs | crates/tasks/src/lib.rs | //! Reth task management.
//!
//! # Feature Flags
//!
//! - `rayon`: Enable rayon thread pool for blocking tasks.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
use crate::{
metrics::{IncCounterOnDrop, TaskExecutorMetrics},
shutdown::{signal, GracefulShutdown, GracefulShutdownGuard, Shutdown, Signal},
};
use dyn_clone::DynClone;
use futures_util::{
future::{select, BoxFuture},
Future, FutureExt, TryFutureExt,
};
use std::{
any::Any,
fmt::{Display, Formatter},
pin::{pin, Pin},
sync::{
atomic::{AtomicUsize, Ordering},
Arc, OnceLock,
},
task::{ready, Context, Poll},
};
use tokio::{
runtime::Handle,
sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender},
task::JoinHandle,
};
use tracing::{debug, error};
use tracing_futures::Instrument;
pub mod metrics;
pub mod shutdown;
#[cfg(feature = "rayon")]
pub mod pool;
/// Global [`TaskExecutor`] instance that can be accessed from anywhere.
static GLOBAL_EXECUTOR: OnceLock<TaskExecutor> = OnceLock::new();
/// A type that can spawn tasks.
///
/// The main purpose of this type is to abstract over [`TaskExecutor`] so it's more convenient to
/// provide default impls for testing.
///
///
/// # Examples
///
/// Use the [`TokioTaskExecutor`] that spawns with [`tokio::task::spawn`]
///
/// ```
/// # async fn t() {
/// use reth_tasks::{TaskSpawner, TokioTaskExecutor};
/// let executor = TokioTaskExecutor::default();
///
/// let task = executor.spawn(Box::pin(async {
/// // -- snip --
/// }));
/// task.await.unwrap();
/// # }
/// ```
///
/// Use the [`TaskExecutor`] that spawns task directly onto the tokio runtime via the [Handle].
///
/// ```
/// # use reth_tasks::TaskManager;
/// fn t() {
/// use reth_tasks::TaskSpawner;
/// let rt = tokio::runtime::Runtime::new().unwrap();
/// let manager = TaskManager::new(rt.handle().clone());
/// let executor = manager.executor();
/// let task = TaskSpawner::spawn(&executor, Box::pin(async {
/// // -- snip --
/// }));
/// rt.block_on(task).unwrap();
/// # }
/// ```
///
/// The [`TaskSpawner`] trait is [`DynClone`] so `Box<dyn TaskSpawner>` are also `Clone`.
#[auto_impl::auto_impl(&, Arc)]
pub trait TaskSpawner: Send + Sync + Unpin + std::fmt::Debug + DynClone {
/// Spawns the task onto the runtime.
/// See also [`Handle::spawn`].
fn spawn(&self, fut: BoxFuture<'static, ()>) -> JoinHandle<()>;
/// This spawns a critical task onto the runtime.
fn spawn_critical(&self, name: &'static str, fut: BoxFuture<'static, ()>) -> JoinHandle<()>;
/// Spawns a blocking task onto the runtime.
fn spawn_blocking(&self, fut: BoxFuture<'static, ()>) -> JoinHandle<()>;
/// This spawns a critical blocking task onto the runtime.
fn spawn_critical_blocking(
&self,
name: &'static str,
fut: BoxFuture<'static, ()>,
) -> JoinHandle<()>;
}
dyn_clone::clone_trait_object!(TaskSpawner);
/// An [`TaskSpawner`] that uses [`tokio::task::spawn`] to execute tasks
#[derive(Debug, Clone, Default)]
#[non_exhaustive]
pub struct TokioTaskExecutor;
impl TokioTaskExecutor {
/// Converts the instance to a boxed [`TaskSpawner`].
pub fn boxed(self) -> Box<dyn TaskSpawner + 'static> {
Box::new(self)
}
}
impl TaskSpawner for TokioTaskExecutor {
fn spawn(&self, fut: BoxFuture<'static, ()>) -> JoinHandle<()> {
tokio::task::spawn(fut)
}
fn spawn_critical(&self, _name: &'static str, fut: BoxFuture<'static, ()>) -> JoinHandle<()> {
tokio::task::spawn(fut)
}
fn spawn_blocking(&self, fut: BoxFuture<'static, ()>) -> JoinHandle<()> {
tokio::task::spawn_blocking(move || tokio::runtime::Handle::current().block_on(fut))
}
fn spawn_critical_blocking(
&self,
_name: &'static str,
fut: BoxFuture<'static, ()>,
) -> JoinHandle<()> {
tokio::task::spawn_blocking(move || tokio::runtime::Handle::current().block_on(fut))
}
}
/// Many reth components require to spawn tasks for long-running jobs. For example `discovery`
/// spawns tasks to handle egress and ingress of udp traffic or `network` that spawns session tasks
/// that handle the traffic to and from a peer.
///
/// To unify how tasks are created, the [`TaskManager`] provides access to the configured Tokio
/// runtime. A [`TaskManager`] stores the [`tokio::runtime::Handle`] it is associated with. In this
/// way it is possible to configure on which runtime a task is executed.
///
/// The main purpose of this type is to be able to monitor if a critical task panicked, for
/// diagnostic purposes, since tokio task essentially fail silently. Therefore, this type is a
/// Stream that yields the name of panicked task, See [`TaskExecutor::spawn_critical`]. In order to
/// execute Tasks use the [`TaskExecutor`] type [`TaskManager::executor`].
#[derive(Debug)]
#[must_use = "TaskManager must be polled to monitor critical tasks"]
pub struct TaskManager {
/// Handle to the tokio runtime this task manager is associated with.
///
/// See [`Handle`] docs.
handle: Handle,
/// Sender half for sending task events to this type
task_events_tx: UnboundedSender<TaskEvent>,
/// Receiver for task events
task_events_rx: UnboundedReceiver<TaskEvent>,
/// The [Signal] to fire when all tasks should be shutdown.
///
/// This is fired when dropped.
signal: Option<Signal>,
/// Receiver of the shutdown signal.
on_shutdown: Shutdown,
/// How many [`GracefulShutdown`] tasks are currently active
graceful_tasks: Arc<AtomicUsize>,
}
// === impl TaskManager ===
impl TaskManager {
/// Returns a __new__ [`TaskManager`] over the currently running Runtime.
///
/// This must be polled for the duration of the program.
///
/// To obtain the current [`TaskExecutor`] see [`TaskExecutor::current`].
///
/// # Panics
///
/// This will panic if called outside the context of a Tokio runtime.
pub fn current() -> Self {
let handle = Handle::current();
Self::new(handle)
}
/// Create a new instance connected to the given handle's tokio runtime.
///
/// This also sets the global [`TaskExecutor`].
pub fn new(handle: Handle) -> Self {
let (task_events_tx, task_events_rx) = unbounded_channel();
let (signal, on_shutdown) = signal();
let manager = Self {
handle,
task_events_tx,
task_events_rx,
signal: Some(signal),
on_shutdown,
graceful_tasks: Arc::new(AtomicUsize::new(0)),
};
let _ = GLOBAL_EXECUTOR
.set(manager.executor())
.inspect_err(|_| error!("Global executor already set"));
manager
}
/// Returns a new [`TaskExecutor`] that can spawn new tasks onto the tokio runtime this type is
/// connected to.
pub fn executor(&self) -> TaskExecutor {
TaskExecutor {
handle: self.handle.clone(),
on_shutdown: self.on_shutdown.clone(),
task_events_tx: self.task_events_tx.clone(),
metrics: Default::default(),
graceful_tasks: Arc::clone(&self.graceful_tasks),
}
}
/// Fires the shutdown signal and awaits until all tasks are shutdown.
pub fn graceful_shutdown(self) {
let _ = self.do_graceful_shutdown(None);
}
/// Fires the shutdown signal and awaits until all tasks are shutdown.
///
/// Returns true if all tasks were shutdown before the timeout elapsed.
pub fn graceful_shutdown_with_timeout(self, timeout: std::time::Duration) -> bool {
self.do_graceful_shutdown(Some(timeout))
}
fn do_graceful_shutdown(self, timeout: Option<std::time::Duration>) -> bool {
drop(self.signal);
let when = timeout.map(|t| std::time::Instant::now() + t);
while self.graceful_tasks.load(Ordering::Relaxed) > 0 {
if when.map(|when| std::time::Instant::now() > when).unwrap_or(false) {
debug!("graceful shutdown timed out");
return false
}
std::hint::spin_loop();
}
debug!("gracefully shut down");
true
}
}
/// An endless future that resolves if a critical task panicked.
///
/// See [`TaskExecutor::spawn_critical`]
impl Future for TaskManager {
type Output = Result<(), PanickedTaskError>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
match ready!(self.as_mut().get_mut().task_events_rx.poll_recv(cx)) {
Some(TaskEvent::Panic(err)) => Poll::Ready(Err(err)),
Some(TaskEvent::GracefulShutdown) | None => {
if let Some(signal) = self.get_mut().signal.take() {
signal.fire();
}
Poll::Ready(Ok(()))
}
}
}
}
/// Error with the name of the task that panicked and an error downcasted to string, if possible.
#[derive(Debug, thiserror::Error, PartialEq, Eq)]
pub struct PanickedTaskError {
task_name: &'static str,
error: Option<String>,
}
impl Display for PanickedTaskError {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
let task_name = self.task_name;
if let Some(error) = &self.error {
write!(f, "Critical task `{task_name}` panicked: `{error}`")
} else {
write!(f, "Critical task `{task_name}` panicked")
}
}
}
impl PanickedTaskError {
fn new(task_name: &'static str, error: Box<dyn Any>) -> Self {
let error = match error.downcast::<String>() {
Ok(value) => Some(*value),
Err(error) => match error.downcast::<&str>() {
Ok(value) => Some(value.to_string()),
Err(_) => None,
},
};
Self { task_name, error }
}
}
/// Represents the events that the `TaskManager`'s main future can receive.
#[derive(Debug)]
enum TaskEvent {
/// Indicates that a critical task has panicked.
Panic(PanickedTaskError),
/// A signal requesting a graceful shutdown of the `TaskManager`.
GracefulShutdown,
}
/// A type that can spawn new tokio tasks
#[derive(Debug, Clone)]
pub struct TaskExecutor {
/// Handle to the tokio runtime this task manager is associated with.
///
/// See [`Handle`] docs.
handle: Handle,
/// Receiver of the shutdown signal.
on_shutdown: Shutdown,
/// Sender half for sending task events to this type
task_events_tx: UnboundedSender<TaskEvent>,
/// Task Executor Metrics
metrics: TaskExecutorMetrics,
/// How many [`GracefulShutdown`] tasks are currently active
graceful_tasks: Arc<AtomicUsize>,
}
// === impl TaskExecutor ===
impl TaskExecutor {
/// Attempts to get the current `TaskExecutor` if one has been initialized.
///
/// Returns an error if no [`TaskExecutor`] has been initialized via [`TaskManager`].
pub fn try_current() -> Result<Self, NoCurrentTaskExecutorError> {
GLOBAL_EXECUTOR.get().cloned().ok_or_else(NoCurrentTaskExecutorError::default)
}
/// Returns the current `TaskExecutor`.
///
/// # Panics
///
/// Panics if no global executor has been initialized. Use [`try_current`](Self::try_current)
/// for a non-panicking version.
pub fn current() -> Self {
Self::try_current().unwrap()
}
/// Returns the [Handle] to the tokio runtime.
pub const fn handle(&self) -> &Handle {
&self.handle
}
/// Returns the receiver of the shutdown signal.
pub const fn on_shutdown_signal(&self) -> &Shutdown {
&self.on_shutdown
}
/// Spawns a future on the tokio runtime depending on the [`TaskKind`]
fn spawn_on_rt<F>(&self, fut: F, task_kind: TaskKind) -> JoinHandle<()>
where
F: Future<Output = ()> + Send + 'static,
{
match task_kind {
TaskKind::Default => self.handle.spawn(fut),
TaskKind::Blocking => {
let handle = self.handle.clone();
self.handle.spawn_blocking(move || handle.block_on(fut))
}
}
}
/// Spawns a regular task depending on the given [`TaskKind`]
fn spawn_task_as<F>(&self, fut: F, task_kind: TaskKind) -> JoinHandle<()>
where
F: Future<Output = ()> + Send + 'static,
{
let on_shutdown = self.on_shutdown.clone();
// Clone only the specific counter that we need.
let finished_regular_tasks_total_metrics =
self.metrics.finished_regular_tasks_total.clone();
// Wrap the original future to increment the finished tasks counter upon completion
let task = {
async move {
// Create an instance of IncCounterOnDrop with the counter to increment
let _inc_counter_on_drop =
IncCounterOnDrop::new(finished_regular_tasks_total_metrics);
let fut = pin!(fut);
let _ = select(on_shutdown, fut).await;
}
}
.in_current_span();
self.spawn_on_rt(task, task_kind)
}
/// Spawns the task onto the runtime.
/// The given future resolves as soon as the [Shutdown] signal is received.
///
/// See also [`Handle::spawn`].
pub fn spawn<F>(&self, fut: F) -> JoinHandle<()>
where
F: Future<Output = ()> + Send + 'static,
{
self.spawn_task_as(fut, TaskKind::Default)
}
/// Spawns a blocking task onto the runtime.
/// The given future resolves as soon as the [Shutdown] signal is received.
///
/// See also [`Handle::spawn_blocking`].
pub fn spawn_blocking<F>(&self, fut: F) -> JoinHandle<()>
where
F: Future<Output = ()> + Send + 'static,
{
self.spawn_task_as(fut, TaskKind::Blocking)
}
/// Spawns the task onto the runtime.
/// The given future resolves as soon as the [Shutdown] signal is received.
///
/// See also [`Handle::spawn`].
pub fn spawn_with_signal<F>(&self, f: impl FnOnce(Shutdown) -> F) -> JoinHandle<()>
where
F: Future<Output = ()> + Send + 'static,
{
let on_shutdown = self.on_shutdown.clone();
let fut = f(on_shutdown);
let task = fut.in_current_span();
self.handle.spawn(task)
}
/// Spawns a critical task depending on the given [`TaskKind`]
fn spawn_critical_as<F>(
&self,
name: &'static str,
fut: F,
task_kind: TaskKind,
) -> JoinHandle<()>
where
F: Future<Output = ()> + Send + 'static,
{
let panicked_tasks_tx = self.task_events_tx.clone();
let on_shutdown = self.on_shutdown.clone();
// wrap the task in catch unwind
let task = std::panic::AssertUnwindSafe(fut)
.catch_unwind()
.map_err(move |error| {
let task_error = PanickedTaskError::new(name, error);
error!("{task_error}");
let _ = panicked_tasks_tx.send(TaskEvent::Panic(task_error));
})
.in_current_span();
// Clone only the specific counter that we need.
let finished_critical_tasks_total_metrics =
self.metrics.finished_critical_tasks_total.clone();
let task = async move {
// Create an instance of IncCounterOnDrop with the counter to increment
let _inc_counter_on_drop = IncCounterOnDrop::new(finished_critical_tasks_total_metrics);
let task = pin!(task);
let _ = select(on_shutdown, task).await;
};
self.spawn_on_rt(task, task_kind)
}
/// This spawns a critical blocking task onto the runtime.
/// The given future resolves as soon as the [Shutdown] signal is received.
///
/// If this task panics, the [`TaskManager`] is notified.
pub fn spawn_critical_blocking<F>(&self, name: &'static str, fut: F) -> JoinHandle<()>
where
F: Future<Output = ()> + Send + 'static,
{
self.spawn_critical_as(name, fut, TaskKind::Blocking)
}
/// This spawns a critical task onto the runtime.
/// The given future resolves as soon as the [Shutdown] signal is received.
///
/// If this task panics, the [`TaskManager`] is notified.
pub fn spawn_critical<F>(&self, name: &'static str, fut: F) -> JoinHandle<()>
where
F: Future<Output = ()> + Send + 'static,
{
self.spawn_critical_as(name, fut, TaskKind::Default)
}
/// This spawns a critical task onto the runtime.
///
/// If this task panics, the [`TaskManager`] is notified.
pub fn spawn_critical_with_shutdown_signal<F>(
&self,
name: &'static str,
f: impl FnOnce(Shutdown) -> F,
) -> JoinHandle<()>
where
F: Future<Output = ()> + Send + 'static,
{
let panicked_tasks_tx = self.task_events_tx.clone();
let on_shutdown = self.on_shutdown.clone();
let fut = f(on_shutdown);
// wrap the task in catch unwind
let task = std::panic::AssertUnwindSafe(fut)
.catch_unwind()
.map_err(move |error| {
let task_error = PanickedTaskError::new(name, error);
error!("{task_error}");
let _ = panicked_tasks_tx.send(TaskEvent::Panic(task_error));
})
.map(drop)
.in_current_span();
self.handle.spawn(task)
}
/// This spawns a critical task onto the runtime.
///
/// If this task panics, the [`TaskManager`] is notified.
/// The [`TaskManager`] will wait until the given future has completed before shutting down.
///
/// # Example
///
/// ```no_run
/// # async fn t(executor: reth_tasks::TaskExecutor) {
///
/// executor.spawn_critical_with_graceful_shutdown_signal("grace", |shutdown| async move {
/// // await the shutdown signal
/// let guard = shutdown.await;
/// // do work before exiting the program
/// tokio::time::sleep(std::time::Duration::from_secs(1)).await;
/// // allow graceful shutdown
/// drop(guard);
/// });
/// # }
/// ```
pub fn spawn_critical_with_graceful_shutdown_signal<F>(
&self,
name: &'static str,
f: impl FnOnce(GracefulShutdown) -> F,
) -> JoinHandle<()>
where
F: Future<Output = ()> + Send + 'static,
{
let panicked_tasks_tx = self.task_events_tx.clone();
let on_shutdown = GracefulShutdown::new(
self.on_shutdown.clone(),
GracefulShutdownGuard::new(Arc::clone(&self.graceful_tasks)),
);
let fut = f(on_shutdown);
// wrap the task in catch unwind
let task = std::panic::AssertUnwindSafe(fut)
.catch_unwind()
.map_err(move |error| {
let task_error = PanickedTaskError::new(name, error);
error!("{task_error}");
let _ = panicked_tasks_tx.send(TaskEvent::Panic(task_error));
})
.map(drop)
.in_current_span();
self.handle.spawn(task)
}
/// This spawns a regular task onto the runtime.
///
/// The [`TaskManager`] will wait until the given future has completed before shutting down.
///
/// # Example
///
/// ```no_run
/// # async fn t(executor: reth_tasks::TaskExecutor) {
///
/// executor.spawn_with_graceful_shutdown_signal(|shutdown| async move {
/// // await the shutdown signal
/// let guard = shutdown.await;
/// // do work before exiting the program
/// tokio::time::sleep(std::time::Duration::from_secs(1)).await;
/// // allow graceful shutdown
/// drop(guard);
/// });
/// # }
/// ```
pub fn spawn_with_graceful_shutdown_signal<F>(
&self,
f: impl FnOnce(GracefulShutdown) -> F,
) -> JoinHandle<()>
where
F: Future<Output = ()> + Send + 'static,
{
let on_shutdown = GracefulShutdown::new(
self.on_shutdown.clone(),
GracefulShutdownGuard::new(Arc::clone(&self.graceful_tasks)),
);
let fut = f(on_shutdown);
self.handle.spawn(fut)
}
/// Sends a request to the `TaskManager` to initiate a graceful shutdown.
///
/// Caution: This will terminate the entire program.
///
/// The [`TaskManager`] upon receiving this event, will terminate and initiate the shutdown that
/// can be handled via the returned [`GracefulShutdown`].
pub fn initiate_graceful_shutdown(
&self,
) -> Result<GracefulShutdown, tokio::sync::mpsc::error::SendError<()>> {
self.task_events_tx
.send(TaskEvent::GracefulShutdown)
.map_err(|_send_error_with_task_event| tokio::sync::mpsc::error::SendError(()))?;
Ok(GracefulShutdown::new(
self.on_shutdown.clone(),
GracefulShutdownGuard::new(Arc::clone(&self.graceful_tasks)),
))
}
}
impl TaskSpawner for TaskExecutor {
fn spawn(&self, fut: BoxFuture<'static, ()>) -> JoinHandle<()> {
self.metrics.inc_regular_tasks();
self.spawn(fut)
}
fn spawn_critical(&self, name: &'static str, fut: BoxFuture<'static, ()>) -> JoinHandle<()> {
self.metrics.inc_critical_tasks();
Self::spawn_critical(self, name, fut)
}
fn spawn_blocking(&self, fut: BoxFuture<'static, ()>) -> JoinHandle<()> {
self.metrics.inc_regular_tasks();
self.spawn_blocking(fut)
}
fn spawn_critical_blocking(
&self,
name: &'static str,
fut: BoxFuture<'static, ()>,
) -> JoinHandle<()> {
self.metrics.inc_critical_tasks();
Self::spawn_critical_blocking(self, name, fut)
}
}
/// `TaskSpawner` with extended behaviour
#[auto_impl::auto_impl(&, Arc)]
pub trait TaskSpawnerExt: Send + Sync + Unpin + std::fmt::Debug + DynClone {
/// This spawns a critical task onto the runtime.
///
/// If this task panics, the [`TaskManager`] is notified.
/// The [`TaskManager`] will wait until the given future has completed before shutting down.
fn spawn_critical_with_graceful_shutdown_signal<F>(
&self,
name: &'static str,
f: impl FnOnce(GracefulShutdown) -> F,
) -> JoinHandle<()>
where
F: Future<Output = ()> + Send + 'static;
/// This spawns a regular task onto the runtime.
///
/// The [`TaskManager`] will wait until the given future has completed before shutting down.
fn spawn_with_graceful_shutdown_signal<F>(
&self,
f: impl FnOnce(GracefulShutdown) -> F,
) -> JoinHandle<()>
where
F: Future<Output = ()> + Send + 'static;
}
impl TaskSpawnerExt for TaskExecutor {
fn spawn_critical_with_graceful_shutdown_signal<F>(
&self,
name: &'static str,
f: impl FnOnce(GracefulShutdown) -> F,
) -> JoinHandle<()>
where
F: Future<Output = ()> + Send + 'static,
{
Self::spawn_critical_with_graceful_shutdown_signal(self, name, f)
}
fn spawn_with_graceful_shutdown_signal<F>(
&self,
f: impl FnOnce(GracefulShutdown) -> F,
) -> JoinHandle<()>
where
F: Future<Output = ()> + Send + 'static,
{
Self::spawn_with_graceful_shutdown_signal(self, f)
}
}
/// Determines how a task is spawned
enum TaskKind {
/// Spawn the task to the default executor [`Handle::spawn`]
Default,
/// Spawn the task to the blocking executor [`Handle::spawn_blocking`]
Blocking,
}
/// Error returned by `try_current` when no task executor has been configured.
#[derive(Debug, Default, thiserror::Error)]
#[error("No current task executor available.")]
#[non_exhaustive]
pub struct NoCurrentTaskExecutorError;
#[cfg(test)]
mod tests {
use super::*;
use std::{sync::atomic::AtomicBool, time::Duration};
#[test]
fn test_cloneable() {
#[derive(Clone)]
struct ExecutorWrapper {
_e: Box<dyn TaskSpawner>,
}
let executor: Box<dyn TaskSpawner> = Box::<TokioTaskExecutor>::default();
let _e = dyn_clone::clone_box(&*executor);
let e = ExecutorWrapper { _e };
let _e2 = e;
}
#[test]
fn test_critical() {
let runtime = tokio::runtime::Runtime::new().unwrap();
let handle = runtime.handle().clone();
let manager = TaskManager::new(handle);
let executor = manager.executor();
executor.spawn_critical("this is a critical task", async { panic!("intentionally panic") });
runtime.block_on(async move {
let err_result = manager.await;
assert!(err_result.is_err(), "Expected TaskManager to return an error due to panic");
let panicked_err = err_result.unwrap_err();
assert_eq!(panicked_err.task_name, "this is a critical task");
assert_eq!(panicked_err.error, Some("intentionally panic".to_string()));
})
}
// Tests that spawned tasks are terminated if the `TaskManager` drops
#[test]
fn test_manager_shutdown_critical() {
let runtime = tokio::runtime::Runtime::new().unwrap();
let handle = runtime.handle().clone();
let manager = TaskManager::new(handle.clone());
let executor = manager.executor();
let (signal, shutdown) = signal();
executor.spawn_critical("this is a critical task", async move {
tokio::time::sleep(Duration::from_millis(200)).await;
drop(signal);
});
drop(manager);
handle.block_on(shutdown);
}
// Tests that spawned tasks are terminated if the `TaskManager` drops
#[test]
fn test_manager_shutdown() {
let runtime = tokio::runtime::Runtime::new().unwrap();
let handle = runtime.handle().clone();
let manager = TaskManager::new(handle.clone());
let executor = manager.executor();
let (signal, shutdown) = signal();
executor.spawn(Box::pin(async move {
tokio::time::sleep(Duration::from_millis(200)).await;
drop(signal);
}));
drop(manager);
handle.block_on(shutdown);
}
#[test]
fn test_manager_graceful_shutdown() {
let runtime = tokio::runtime::Runtime::new().unwrap();
let handle = runtime.handle().clone();
let manager = TaskManager::new(handle);
let executor = manager.executor();
let val = Arc::new(AtomicBool::new(false));
let c = val.clone();
executor.spawn_critical_with_graceful_shutdown_signal("grace", |shutdown| async move {
let _guard = shutdown.await;
tokio::time::sleep(Duration::from_millis(200)).await;
c.store(true, Ordering::Relaxed);
});
manager.graceful_shutdown();
assert!(val.load(Ordering::Relaxed));
}
#[test]
fn test_manager_graceful_shutdown_many() {
let runtime = tokio::runtime::Runtime::new().unwrap();
let handle = runtime.handle().clone();
let manager = TaskManager::new(handle);
let executor = manager.executor();
let counter = Arc::new(AtomicUsize::new(0));
let num = 10;
for _ in 0..num {
let c = counter.clone();
executor.spawn_critical_with_graceful_shutdown_signal(
"grace",
move |shutdown| async move {
let _guard = shutdown.await;
tokio::time::sleep(Duration::from_millis(200)).await;
c.fetch_add(1, Ordering::SeqCst);
},
);
}
manager.graceful_shutdown();
assert_eq!(counter.load(Ordering::Relaxed), num);
}
#[test]
fn test_manager_graceful_shutdown_timeout() {
let runtime = tokio::runtime::Runtime::new().unwrap();
let handle = runtime.handle().clone();
let manager = TaskManager::new(handle);
let executor = manager.executor();
let timeout = Duration::from_millis(500);
let val = Arc::new(AtomicBool::new(false));
let val2 = val.clone();
executor.spawn_critical_with_graceful_shutdown_signal("grace", |shutdown| async move {
let _guard = shutdown.await;
tokio::time::sleep(timeout * 3).await;
val2.store(true, Ordering::Relaxed);
unreachable!("should not be reached");
});
manager.graceful_shutdown_with_timeout(timeout);
assert!(!val.load(Ordering::Relaxed));
}
#[test]
fn can_access_global() {
let runtime = tokio::runtime::Runtime::new().unwrap();
let handle = runtime.handle().clone();
let _manager = TaskManager::new(handle);
let _executor = TaskExecutor::try_current().unwrap();
}
#[test]
fn test_graceful_shutdown_triggered_by_executor() {
let runtime = tokio::runtime::Runtime::new().unwrap();
let task_manager = TaskManager::new(runtime.handle().clone());
let executor = task_manager.executor();
let task_did_shutdown_flag = Arc::new(AtomicBool::new(false));
let flag_clone = task_did_shutdown_flag.clone();
let spawned_task_handle = executor.spawn_with_signal(|shutdown_signal| async move {
shutdown_signal.await;
flag_clone.store(true, Ordering::SeqCst);
});
let manager_future_handle = runtime.spawn(task_manager);
let send_result = executor.initiate_graceful_shutdown();
assert!(send_result.is_ok(), "Sending the graceful shutdown signal should succeed and return a GracefulShutdown future");
let manager_final_result = runtime.block_on(manager_future_handle);
assert!(manager_final_result.is_ok(), "TaskManager task should not panic");
assert_eq!(
manager_final_result.unwrap(),
Ok(()),
"TaskManager should resolve cleanly with Ok(()) after graceful shutdown request"
);
let task_join_result = runtime.block_on(spawned_task_handle);
assert!(task_join_result.is_ok(), "Spawned task should complete without panic");
assert!(
task_did_shutdown_flag.load(Ordering::Relaxed),
"Task should have received the shutdown signal and set the flag"
);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/tasks/src/shutdown.rs | crates/tasks/src/shutdown.rs | //! Helper for shutdown signals
use futures_util::{
future::{FusedFuture, Shared},
FutureExt,
};
use std::{
future::Future,
pin::Pin,
sync::{atomic::AtomicUsize, Arc},
task::{ready, Context, Poll},
};
use tokio::sync::oneshot;
/// A Future that resolves when the shutdown event has been fired.
#[derive(Debug)]
pub struct GracefulShutdown {
shutdown: Shutdown,
guard: Option<GracefulShutdownGuard>,
}
impl GracefulShutdown {
pub(crate) const fn new(shutdown: Shutdown, guard: GracefulShutdownGuard) -> Self {
Self { shutdown, guard: Some(guard) }
}
/// Returns a new shutdown future that is ignores the returned [`GracefulShutdownGuard`].
///
/// This just maps the return value of the future to `()`, it does not drop the guard.
pub fn ignore_guard(self) -> impl Future<Output = ()> + Send + Sync + Unpin + 'static {
self.map(drop)
}
}
impl Future for GracefulShutdown {
type Output = GracefulShutdownGuard;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
ready!(self.shutdown.poll_unpin(cx));
Poll::Ready(self.get_mut().guard.take().expect("Future polled after completion"))
}
}
impl Clone for GracefulShutdown {
fn clone(&self) -> Self {
Self {
shutdown: self.shutdown.clone(),
guard: self.guard.as_ref().map(|g| GracefulShutdownGuard::new(Arc::clone(&g.0))),
}
}
}
/// A guard that fires once dropped to signal the [`TaskManager`](crate::TaskManager) that the
/// [`GracefulShutdown`] has completed.
#[derive(Debug)]
#[must_use = "if unused the task will not be gracefully shutdown"]
pub struct GracefulShutdownGuard(Arc<AtomicUsize>);
impl GracefulShutdownGuard {
pub(crate) fn new(counter: Arc<AtomicUsize>) -> Self {
counter.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
Self(counter)
}
}
impl Drop for GracefulShutdownGuard {
fn drop(&mut self) {
self.0.fetch_sub(1, std::sync::atomic::Ordering::SeqCst);
}
}
/// A Future that resolves when the shutdown event has been fired.
#[derive(Debug, Clone)]
pub struct Shutdown(Shared<oneshot::Receiver<()>>);
impl Future for Shutdown {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let pin = self.get_mut();
if pin.0.is_terminated() || pin.0.poll_unpin(cx).is_ready() {
Poll::Ready(())
} else {
Poll::Pending
}
}
}
/// Shutdown signal that fires either manually or on drop by closing the channel
#[derive(Debug)]
pub struct Signal(oneshot::Sender<()>);
impl Signal {
/// Fire the signal manually.
pub fn fire(self) {
let _ = self.0.send(());
}
}
/// Create a channel pair that's used to propagate shutdown event
pub fn signal() -> (Signal, Shutdown) {
let (sender, receiver) = oneshot::channel();
(Signal(sender), Shutdown(receiver.shared()))
}
#[cfg(test)]
mod tests {
use super::*;
use futures_util::future::join_all;
use std::time::Duration;
#[tokio::test(flavor = "multi_thread")]
async fn test_shutdown() {
let (_signal, _shutdown) = signal();
}
#[tokio::test(flavor = "multi_thread")]
async fn test_drop_signal() {
let (signal, shutdown) = signal();
tokio::task::spawn(async move {
tokio::time::sleep(Duration::from_millis(500)).await;
drop(signal)
});
shutdown.await;
}
#[tokio::test(flavor = "multi_thread")]
async fn test_multi_shutdowns() {
let (signal, shutdown) = signal();
let mut tasks = Vec::with_capacity(100);
for _ in 0..100 {
let shutdown = shutdown.clone();
let task = tokio::task::spawn(async move {
shutdown.await;
});
tasks.push(task);
}
drop(signal);
join_all(tasks).await;
}
#[tokio::test(flavor = "multi_thread")]
async fn test_drop_signal_from_thread() {
let (signal, shutdown) = signal();
let _thread = std::thread::spawn(|| {
std::thread::sleep(Duration::from_millis(500));
drop(signal)
});
shutdown.await;
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/tasks/src/metrics.rs | crates/tasks/src/metrics.rs | //! Task Executor Metrics
use core::fmt;
use reth_metrics::{metrics::Counter, Metrics};
/// Task Executor Metrics
#[derive(Metrics, Clone)]
#[metrics(scope = "executor.spawn")]
pub struct TaskExecutorMetrics {
/// Number of spawned critical tasks
pub(crate) critical_tasks_total: Counter,
/// Number of finished spawned critical tasks
pub(crate) finished_critical_tasks_total: Counter,
/// Number of spawned regular tasks
pub(crate) regular_tasks_total: Counter,
/// Number of finished spawned regular tasks
pub(crate) finished_regular_tasks_total: Counter,
}
impl TaskExecutorMetrics {
/// Increments the counter for spawned critical tasks.
pub(crate) fn inc_critical_tasks(&self) {
self.critical_tasks_total.increment(1);
}
/// Increments the counter for spawned regular tasks.
pub(crate) fn inc_regular_tasks(&self) {
self.regular_tasks_total.increment(1);
}
}
/// Helper type for increasing counters even if a task fails
pub struct IncCounterOnDrop(Counter);
impl fmt::Debug for IncCounterOnDrop {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("IncCounterOnDrop").finish()
}
}
impl IncCounterOnDrop {
/// Creates a new instance of `IncCounterOnDrop` with the given counter.
pub const fn new(counter: Counter) -> Self {
Self(counter)
}
}
impl Drop for IncCounterOnDrop {
/// Increment the counter when the instance is dropped.
fn drop(&mut self) {
self.0.increment(1);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/tasks/src/pool.rs | crates/tasks/src/pool.rs | //! Additional helpers for executing tracing calls
use std::{
future::Future,
panic::{catch_unwind, AssertUnwindSafe},
pin::Pin,
sync::Arc,
task::{ready, Context, Poll},
thread,
};
use tokio::sync::{oneshot, AcquireError, OwnedSemaphorePermit, Semaphore};
/// RPC Tracing call guard semaphore.
///
/// This is used to restrict the number of concurrent RPC requests to tracing methods like
/// `debug_traceTransaction` as well as `eth_getProof` because they can consume a lot of
/// memory and CPU.
///
/// This types serves as an entry guard for the [`BlockingTaskPool`] and is used to rate limit
/// parallel blocking tasks in the pool.
#[derive(Clone, Debug)]
pub struct BlockingTaskGuard(Arc<Semaphore>);
impl BlockingTaskGuard {
/// Create a new `BlockingTaskGuard` with the given maximum number of blocking tasks in
/// parallel.
pub fn new(max_blocking_tasks: usize) -> Self {
Self(Arc::new(Semaphore::new(max_blocking_tasks)))
}
/// See also [`Semaphore::acquire_owned`]
pub async fn acquire_owned(self) -> Result<OwnedSemaphorePermit, AcquireError> {
self.0.acquire_owned().await
}
/// See also [`Semaphore::acquire_many_owned`]
pub async fn acquire_many_owned(self, n: u32) -> Result<OwnedSemaphorePermit, AcquireError> {
self.0.acquire_many_owned(n).await
}
}
/// Used to execute blocking tasks on a rayon threadpool from within a tokio runtime.
///
/// This is a dedicated threadpool for blocking tasks which are CPU bound.
/// RPC calls that perform blocking IO (disk lookups) are not executed on this pool but on the tokio
/// runtime's blocking pool, which performs poorly with CPU bound tasks (see
/// <https://ryhl.io/blog/async-what-is-blocking/>). Once the tokio blocking
/// pool is saturated it is converted into a queue, blocking tasks could then interfere with the
/// queue and block other RPC calls.
///
/// See also [tokio-docs] for more information.
///
/// [tokio-docs]: https://docs.rs/tokio/latest/tokio/index.html#cpu-bound-tasks-and-blocking-code
#[derive(Clone, Debug)]
pub struct BlockingTaskPool {
pool: Arc<rayon::ThreadPool>,
}
impl BlockingTaskPool {
/// Create a new `BlockingTaskPool` with the given threadpool.
pub fn new(pool: rayon::ThreadPool) -> Self {
Self { pool: Arc::new(pool) }
}
/// Convenience function to start building a new threadpool.
pub fn builder() -> rayon::ThreadPoolBuilder {
rayon::ThreadPoolBuilder::new()
}
/// Convenience function to build a new threadpool with the default configuration.
///
/// Uses [`rayon::ThreadPoolBuilder::build`](rayon::ThreadPoolBuilder::build) defaults but
/// increases the stack size to 8MB.
pub fn build() -> Result<Self, rayon::ThreadPoolBuildError> {
Self::builder().build().map(Self::new)
}
/// Asynchronous wrapper around Rayon's
/// [`ThreadPool::spawn`](rayon::ThreadPool::spawn).
///
/// Runs a function on the configured threadpool, returning a future that resolves with the
/// function's return value.
///
/// If the function panics, the future will resolve to an error.
pub fn spawn<F, R>(&self, func: F) -> BlockingTaskHandle<R>
where
F: FnOnce() -> R + Send + 'static,
R: Send + 'static,
{
let (tx, rx) = oneshot::channel();
self.pool.spawn(move || {
let _result = tx.send(catch_unwind(AssertUnwindSafe(func)));
});
BlockingTaskHandle { rx }
}
/// Asynchronous wrapper around Rayon's
/// [`ThreadPool::spawn_fifo`](rayon::ThreadPool::spawn_fifo).
///
/// Runs a function on the configured threadpool, returning a future that resolves with the
/// function's return value.
///
/// If the function panics, the future will resolve to an error.
pub fn spawn_fifo<F, R>(&self, func: F) -> BlockingTaskHandle<R>
where
F: FnOnce() -> R + Send + 'static,
R: Send + 'static,
{
let (tx, rx) = oneshot::channel();
self.pool.spawn_fifo(move || {
let _result = tx.send(catch_unwind(AssertUnwindSafe(func)));
});
BlockingTaskHandle { rx }
}
}
/// Async handle for a blocking task running in a Rayon thread pool.
///
/// ## Panics
///
/// If polled from outside a tokio runtime.
#[derive(Debug)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
#[pin_project::pin_project]
pub struct BlockingTaskHandle<T> {
#[pin]
pub(crate) rx: oneshot::Receiver<thread::Result<T>>,
}
impl<T> Future for BlockingTaskHandle<T> {
type Output = thread::Result<T>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
match ready!(self.project().rx.poll(cx)) {
Ok(res) => Poll::Ready(res),
Err(_) => Poll::Ready(Err(Box::<TokioBlockingTaskError>::default())),
}
}
}
/// An error returned when the Tokio channel is dropped while awaiting a result.
///
/// This should only happen
#[derive(Debug, Default, thiserror::Error)]
#[error("tokio channel dropped while awaiting result")]
#[non_exhaustive]
pub struct TokioBlockingTaskError;
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn blocking_pool() {
let pool = BlockingTaskPool::build().unwrap();
let res = pool.spawn(move || 5);
let res = res.await.unwrap();
assert_eq!(res, 5);
}
#[tokio::test]
async fn blocking_pool_panic() {
let pool = BlockingTaskPool::build().unwrap();
let res = pool.spawn(move || -> i32 {
panic!();
});
let res = res.await;
assert!(res.is_err());
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/storage-api/src/database_provider.rs | crates/storage/storage-api/src/database_provider.rs | use alloc::vec::Vec;
use core::ops::{Bound, RangeBounds};
use reth_db_api::{
common::KeyValue,
cursor::DbCursorRO,
database::Database,
table::Table,
transaction::{DbTx, DbTxMut},
DatabaseError,
};
use reth_prune_types::PruneModes;
use reth_storage_errors::provider::ProviderResult;
/// Database provider.
pub trait DBProvider: Sized {
/// Underlying database transaction held by the provider.
type Tx: DbTx;
/// Returns a reference to the underlying transaction.
fn tx_ref(&self) -> &Self::Tx;
/// Returns a mutable reference to the underlying transaction.
fn tx_mut(&mut self) -> &mut Self::Tx;
/// Consumes the provider and returns the underlying transaction.
fn into_tx(self) -> Self::Tx;
/// Disables long-lived read transaction safety guarantees for leaks prevention and
/// observability improvements.
///
/// CAUTION: In most of the cases, you want the safety guarantees for long read transactions
/// enabled. Use this only if you're sure that no write transaction is open in parallel, meaning
/// that Reth as a node is offline and not progressing.
fn disable_long_read_transaction_safety(mut self) -> Self {
self.tx_mut().disable_long_read_transaction_safety();
self
}
/// Commit database transaction
fn commit(self) -> ProviderResult<bool> {
Ok(self.into_tx().commit()?)
}
/// Returns a reference to prune modes.
fn prune_modes_ref(&self) -> &PruneModes;
/// Return full table as Vec
fn table<T: Table>(&self) -> Result<Vec<KeyValue<T>>, DatabaseError>
where
T::Key: Default + Ord,
{
self.tx_ref()
.cursor_read::<T>()?
.walk(Some(T::Key::default()))?
.collect::<Result<Vec<_>, DatabaseError>>()
}
/// Return a list of entries from the table, based on the given range.
#[inline]
fn get<T: Table>(
&self,
range: impl RangeBounds<T::Key>,
) -> Result<Vec<KeyValue<T>>, DatabaseError> {
self.tx_ref().cursor_read::<T>()?.walk_range(range)?.collect::<Result<Vec<_>, _>>()
}
/// Iterates over read only values in the given table and collects them into a vector.
///
/// Early-returns if the range is empty, without opening a cursor transaction.
fn cursor_read_collect<T: Table<Key = u64>>(
&self,
range: impl RangeBounds<T::Key>,
) -> ProviderResult<Vec<T::Value>> {
let capacity = match range_size_hint(&range) {
Some(0) | None => return Ok(Vec::new()),
Some(capacity) => capacity,
};
let mut cursor = self.tx_ref().cursor_read::<T>()?;
self.cursor_collect_with_capacity(&mut cursor, range, capacity)
}
/// Iterates over read only values in the given table and collects them into a vector.
fn cursor_collect<T: Table<Key = u64>>(
&self,
cursor: &mut impl DbCursorRO<T>,
range: impl RangeBounds<T::Key>,
) -> ProviderResult<Vec<T::Value>> {
let capacity = range_size_hint(&range).unwrap_or(0);
self.cursor_collect_with_capacity(cursor, range, capacity)
}
/// Iterates over read only values in the given table and collects them into a vector with
/// capacity.
fn cursor_collect_with_capacity<T: Table<Key = u64>>(
&self,
cursor: &mut impl DbCursorRO<T>,
range: impl RangeBounds<T::Key>,
capacity: usize,
) -> ProviderResult<Vec<T::Value>> {
let mut items = Vec::with_capacity(capacity);
for entry in cursor.walk_range(range)? {
items.push(entry?.1);
}
Ok(items)
}
/// Remove list of entries from the table. Returns the number of entries removed.
#[inline]
fn remove<T: Table>(&self, range: impl RangeBounds<T::Key>) -> Result<usize, DatabaseError>
where
Self::Tx: DbTxMut,
{
let mut entries = 0;
let mut cursor_write = self.tx_ref().cursor_write::<T>()?;
let mut walker = cursor_write.walk_range(range)?;
while walker.next().transpose()?.is_some() {
walker.delete_current()?;
entries += 1;
}
Ok(entries)
}
/// Return a list of entries from the table, and remove them, based on the given range.
#[inline]
fn take<T: Table>(
&self,
range: impl RangeBounds<T::Key>,
) -> Result<Vec<KeyValue<T>>, DatabaseError>
where
Self::Tx: DbTxMut,
{
let mut cursor_write = self.tx_ref().cursor_write::<T>()?;
let mut walker = cursor_write.walk_range(range)?;
let mut items = Vec::new();
while let Some(i) = walker.next().transpose()? {
walker.delete_current()?;
items.push(i)
}
Ok(items)
}
}
/// Database provider factory.
#[auto_impl::auto_impl(&, Arc)]
pub trait DatabaseProviderFactory: Send + Sync {
/// Database this factory produces providers for.
type DB: Database;
/// Provider type returned by the factory.
type Provider: DBProvider<Tx = <Self::DB as Database>::TX>;
/// Read-write provider type returned by the factory.
type ProviderRW: DBProvider<Tx = <Self::DB as Database>::TXMut>;
/// Create new read-only database provider.
fn database_provider_ro(&self) -> ProviderResult<Self::Provider>;
/// Create new read-write database provider.
fn database_provider_rw(&self) -> ProviderResult<Self::ProviderRW>;
}
/// Helper type alias to get the associated transaction type from a [`DatabaseProviderFactory`].
pub type FactoryTx<F> = <<F as DatabaseProviderFactory>::DB as Database>::TX;
fn range_size_hint(range: &impl RangeBounds<u64>) -> Option<usize> {
let start = match range.start_bound().cloned() {
Bound::Included(start) => start,
Bound::Excluded(start) => start.checked_add(1)?,
Bound::Unbounded => 0,
};
let end = match range.end_bound().cloned() {
Bound::Included(end) => end.saturating_add(1),
Bound::Excluded(end) => end,
Bound::Unbounded => return None,
};
end.checked_sub(start).map(|x| x as _)
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/storage-api/src/chain_info.rs | crates/storage/storage-api/src/chain_info.rs | use alloy_rpc_types_engine::ForkchoiceState;
use reth_primitives_traits::SealedHeader;
/// A type that can track updates related to fork choice updates.
pub trait CanonChainTracker: Send + Sync {
/// The header type.
type Header: Send + Sync;
/// Notify the tracker about a received fork choice update.
fn on_forkchoice_update_received(&self, update: &ForkchoiceState);
/// Returns the last time a fork choice update was received from the CL
/// ([`CanonChainTracker::on_forkchoice_update_received`])
#[cfg(feature = "std")]
fn last_received_update_timestamp(&self) -> Option<std::time::Instant>;
/// Sets the canonical head of the chain.
fn set_canonical_head(&self, header: SealedHeader<Self::Header>);
/// Sets the safe block of the chain.
fn set_safe(&self, header: SealedHeader<Self::Header>);
/// Sets the finalized block of the chain.
fn set_finalized(&self, header: SealedHeader<Self::Header>);
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/storage-api/src/stats.rs | crates/storage/storage-api/src/stats.rs | use reth_db_api::table::Table;
/// The trait for fetching provider statistics.
#[auto_impl::auto_impl(&, Arc)]
pub trait StatsReader: Send + Sync {
/// Fetch the number of entries in the corresponding [Table]. Depending on the provider, it may
/// route to different data sources other than [Table].
fn count_entries<T: Table>(&self) -> reth_storage_errors::provider::ProviderResult<usize>;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/storage-api/src/lib.rs | crates/storage/storage-api/src/lib.rs | //! Collection of traits and types for common storage access.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![cfg_attr(not(feature = "std"), no_std)]
extern crate alloc;
// Re-export used error types.
pub use reth_storage_errors as errors;
mod account;
pub use account::*;
mod block;
pub use block::*;
mod block_id;
pub use block_id::*;
mod block_hash;
pub use block_hash::*;
#[cfg(feature = "db-api")]
mod chain;
#[cfg(feature = "db-api")]
pub use chain::*;
mod header;
pub use header::*;
mod prune_checkpoint;
pub use prune_checkpoint::*;
mod receipts;
pub use receipts::*;
mod stage_checkpoint;
pub use stage_checkpoint::*;
mod state;
pub use state::*;
mod storage;
pub use storage::*;
mod transactions;
pub use transactions::*;
mod trie;
pub use trie::*;
mod chain_info;
pub use chain_info::*;
#[cfg(feature = "db-api")]
mod database_provider;
#[cfg(feature = "db-api")]
pub use database_provider::*;
pub mod noop;
#[cfg(feature = "db-api")]
mod history;
#[cfg(feature = "db-api")]
pub use history::*;
#[cfg(feature = "db-api")]
mod hashing;
#[cfg(feature = "db-api")]
pub use hashing::*;
#[cfg(feature = "db-api")]
mod stats;
#[cfg(feature = "db-api")]
pub use stats::*;
mod primitives;
pub use primitives::*;
mod block_indices;
pub use block_indices::*;
mod block_writer;
pub use block_writer::*;
mod state_writer;
pub use state_writer::*;
mod header_sync_gap;
pub use header_sync_gap::HeaderSyncGapProvider;
mod full;
pub use full::*;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/storage-api/src/noop.rs | crates/storage/storage-api/src/noop.rs | //! Various noop implementations for traits.
use crate::{
AccountReader, BlockBodyIndicesProvider, BlockHashReader, BlockIdReader, BlockNumReader,
BlockReader, BlockReaderIdExt, BlockSource, BytecodeReader, ChangeSetReader,
HashedPostStateProvider, HeaderProvider, NodePrimitivesProvider, PruneCheckpointReader,
ReceiptProvider, ReceiptProviderIdExt, StageCheckpointReader, StateProofProvider,
StateProvider, StateProviderBox, StateProviderFactory, StateReader, StateRootProvider,
StorageRootProvider, TransactionVariant, TransactionsProvider,
};
#[cfg(feature = "db-api")]
use crate::{DBProvider, DatabaseProviderFactory};
use alloc::{boxed::Box, string::String, sync::Arc, vec::Vec};
use alloy_consensus::transaction::TransactionMeta;
use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumberOrTag};
use alloy_primitives::{
Address, BlockHash, BlockNumber, Bytes, StorageKey, TxHash, TxNumber, B256, U256,
};
use core::{
fmt::Debug,
marker::PhantomData,
ops::{RangeBounds, RangeInclusive},
};
use reth_chainspec::{ChainInfo, ChainSpecProvider, EthChainSpec, MAINNET};
#[cfg(feature = "db-api")]
use reth_db_api::mock::{DatabaseMock, TxMock};
use reth_db_models::{AccountBeforeTx, StoredBlockBodyIndices};
use reth_ethereum_primitives::EthPrimitives;
use reth_execution_types::ExecutionOutcome;
use reth_primitives_traits::{Account, Bytecode, NodePrimitives, RecoveredBlock, SealedHeader};
#[cfg(feature = "db-api")]
use reth_prune_types::PruneModes;
use reth_prune_types::{PruneCheckpoint, PruneSegment};
use reth_stages_types::{StageCheckpoint, StageId};
use reth_storage_errors::provider::{ProviderError, ProviderResult};
use reth_trie_common::{
updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof,
MultiProofTargets, StorageMultiProof, StorageProof, TrieInput,
};
use revm_state::FlaggedStorage;
/// Supports various api interfaces for testing purposes.
#[derive(Debug)]
#[non_exhaustive]
pub struct NoopProvider<ChainSpec = reth_chainspec::ChainSpec, N = EthPrimitives> {
chain_spec: Arc<ChainSpec>,
#[cfg(feature = "db-api")]
tx: TxMock,
#[cfg(feature = "db-api")]
prune_modes: PruneModes,
_phantom: PhantomData<N>,
}
impl<ChainSpec, N> NoopProvider<ChainSpec, N> {
/// Create a new instance for specific primitive types.
pub fn new(chain_spec: Arc<ChainSpec>) -> Self {
Self {
chain_spec,
#[cfg(feature = "db-api")]
tx: TxMock::default(),
#[cfg(feature = "db-api")]
prune_modes: PruneModes::none(),
_phantom: Default::default(),
}
}
}
impl<ChainSpec> NoopProvider<ChainSpec> {
/// Create a new instance of the `NoopBlockReader`.
pub fn eth(chain_spec: Arc<ChainSpec>) -> Self {
Self {
chain_spec,
#[cfg(feature = "db-api")]
tx: TxMock::default(),
#[cfg(feature = "db-api")]
prune_modes: PruneModes::none(),
_phantom: Default::default(),
}
}
}
impl NoopProvider {
/// Create a new instance of the [`NoopProvider`] with the mainnet chain spec.
pub fn mainnet() -> Self {
Self::eth(MAINNET.clone())
}
}
impl Default for NoopProvider {
fn default() -> Self {
Self::mainnet()
}
}
impl<ChainSpec, N> Clone for NoopProvider<ChainSpec, N> {
fn clone(&self) -> Self {
Self {
chain_spec: Arc::clone(&self.chain_spec),
#[cfg(feature = "db-api")]
tx: self.tx.clone(),
#[cfg(feature = "db-api")]
prune_modes: self.prune_modes.clone(),
_phantom: Default::default(),
}
}
}
/// Noop implementation for testing purposes
impl<ChainSpec: Send + Sync, N: Send + Sync> BlockHashReader for NoopProvider<ChainSpec, N> {
fn block_hash(&self, _number: u64) -> ProviderResult<Option<B256>> {
Ok(None)
}
fn canonical_hashes_range(
&self,
_start: BlockNumber,
_end: BlockNumber,
) -> ProviderResult<Vec<B256>> {
Ok(Vec::new())
}
}
impl<ChainSpec: Send + Sync, N: Send + Sync> BlockNumReader for NoopProvider<ChainSpec, N> {
fn chain_info(&self) -> ProviderResult<ChainInfo> {
Ok(ChainInfo::default())
}
fn best_block_number(&self) -> ProviderResult<BlockNumber> {
Ok(0)
}
fn last_block_number(&self) -> ProviderResult<BlockNumber> {
Ok(0)
}
fn block_number(&self, _hash: B256) -> ProviderResult<Option<BlockNumber>> {
Ok(None)
}
}
impl<ChainSpec: EthChainSpec + 'static, N: Debug + Send + Sync + 'static> ChainSpecProvider
for NoopProvider<ChainSpec, N>
{
type ChainSpec = ChainSpec;
fn chain_spec(&self) -> Arc<Self::ChainSpec> {
self.chain_spec.clone()
}
}
impl<C: Send + Sync, N: NodePrimitives> BlockIdReader for NoopProvider<C, N> {
fn pending_block_num_hash(&self) -> ProviderResult<Option<alloy_eips::BlockNumHash>> {
Ok(None)
}
fn safe_block_num_hash(&self) -> ProviderResult<Option<alloy_eips::BlockNumHash>> {
Ok(None)
}
fn finalized_block_num_hash(&self) -> ProviderResult<Option<alloy_eips::BlockNumHash>> {
Ok(None)
}
}
impl<C: Send + Sync, N: NodePrimitives> BlockReaderIdExt for NoopProvider<C, N> {
fn block_by_id(&self, _id: BlockId) -> ProviderResult<Option<N::Block>> {
Ok(None)
}
fn sealed_header_by_id(
&self,
_id: BlockId,
) -> ProviderResult<Option<SealedHeader<N::BlockHeader>>> {
Ok(None)
}
fn header_by_id(&self, _id: BlockId) -> ProviderResult<Option<N::BlockHeader>> {
Ok(None)
}
}
impl<C: Send + Sync, N: NodePrimitives> BlockReader for NoopProvider<C, N> {
type Block = N::Block;
fn find_block_by_hash(
&self,
_hash: B256,
_source: BlockSource,
) -> ProviderResult<Option<Self::Block>> {
Ok(None)
}
fn block(&self, _id: BlockHashOrNumber) -> ProviderResult<Option<Self::Block>> {
Ok(None)
}
fn pending_block(&self) -> ProviderResult<Option<RecoveredBlock<Self::Block>>> {
Ok(None)
}
fn pending_block_and_receipts(
&self,
) -> ProviderResult<Option<(RecoveredBlock<Self::Block>, Vec<Self::Receipt>)>> {
Ok(None)
}
fn recovered_block(
&self,
_id: BlockHashOrNumber,
_transaction_kind: TransactionVariant,
) -> ProviderResult<Option<RecoveredBlock<Self::Block>>> {
Ok(None)
}
fn sealed_block_with_senders(
&self,
_id: BlockHashOrNumber,
_transaction_kind: TransactionVariant,
) -> ProviderResult<Option<RecoveredBlock<Self::Block>>> {
Ok(None)
}
fn block_range(&self, _range: RangeInclusive<BlockNumber>) -> ProviderResult<Vec<Self::Block>> {
Ok(Vec::new())
}
fn block_with_senders_range(
&self,
_range: RangeInclusive<BlockNumber>,
) -> ProviderResult<Vec<RecoveredBlock<Self::Block>>> {
Ok(Vec::new())
}
fn recovered_block_range(
&self,
_range: RangeInclusive<BlockNumber>,
) -> ProviderResult<Vec<RecoveredBlock<Self::Block>>> {
Ok(Vec::new())
}
}
impl<C: Send + Sync, N: NodePrimitives> TransactionsProvider for NoopProvider<C, N> {
type Transaction = N::SignedTx;
fn transaction_id(&self, _tx_hash: TxHash) -> ProviderResult<Option<TxNumber>> {
Ok(None)
}
fn transaction_by_id(&self, _id: TxNumber) -> ProviderResult<Option<Self::Transaction>> {
Ok(None)
}
fn transaction_by_id_unhashed(
&self,
_id: TxNumber,
) -> ProviderResult<Option<Self::Transaction>> {
Ok(None)
}
fn transaction_by_hash(&self, _hash: TxHash) -> ProviderResult<Option<Self::Transaction>> {
Ok(None)
}
fn transaction_by_hash_with_meta(
&self,
_hash: TxHash,
) -> ProviderResult<Option<(Self::Transaction, TransactionMeta)>> {
Ok(None)
}
fn transaction_block(&self, _id: TxNumber) -> ProviderResult<Option<BlockNumber>> {
Ok(None)
}
fn transactions_by_block(
&self,
_block_id: BlockHashOrNumber,
) -> ProviderResult<Option<Vec<Self::Transaction>>> {
Ok(None)
}
fn transactions_by_block_range(
&self,
_range: impl RangeBounds<BlockNumber>,
) -> ProviderResult<Vec<Vec<Self::Transaction>>> {
Ok(Vec::default())
}
fn transactions_by_tx_range(
&self,
_range: impl RangeBounds<TxNumber>,
) -> ProviderResult<Vec<Self::Transaction>> {
Ok(Vec::default())
}
fn senders_by_tx_range(
&self,
_range: impl RangeBounds<TxNumber>,
) -> ProviderResult<Vec<Address>> {
Ok(Vec::default())
}
fn transaction_sender(&self, _id: TxNumber) -> ProviderResult<Option<Address>> {
Ok(None)
}
}
impl<C: Send + Sync, N: NodePrimitives> ReceiptProvider for NoopProvider<C, N> {
type Receipt = N::Receipt;
fn receipt(&self, _id: TxNumber) -> ProviderResult<Option<Self::Receipt>> {
Ok(None)
}
fn receipt_by_hash(&self, _hash: TxHash) -> ProviderResult<Option<Self::Receipt>> {
Ok(None)
}
fn receipts_by_block(
&self,
_block: BlockHashOrNumber,
) -> ProviderResult<Option<Vec<Self::Receipt>>> {
Ok(None)
}
fn receipts_by_tx_range(
&self,
_range: impl RangeBounds<TxNumber>,
) -> ProviderResult<Vec<Self::Receipt>> {
Ok(Vec::new())
}
fn receipts_by_block_range(
&self,
_block_range: RangeInclusive<BlockNumber>,
) -> ProviderResult<Vec<Vec<Self::Receipt>>> {
Ok(Vec::new())
}
}
impl<C: Send + Sync, N: NodePrimitives> ReceiptProviderIdExt for NoopProvider<C, N> {}
impl<C: Send + Sync, N: NodePrimitives> HeaderProvider for NoopProvider<C, N> {
type Header = N::BlockHeader;
fn header(&self, _block_hash: &BlockHash) -> ProviderResult<Option<Self::Header>> {
Ok(None)
}
fn header_by_number(&self, _num: u64) -> ProviderResult<Option<Self::Header>> {
Ok(None)
}
fn header_td(&self, _hash: &BlockHash) -> ProviderResult<Option<U256>> {
Ok(None)
}
fn header_td_by_number(&self, _number: BlockNumber) -> ProviderResult<Option<U256>> {
Ok(None)
}
fn headers_range(
&self,
_range: impl RangeBounds<BlockNumber>,
) -> ProviderResult<Vec<Self::Header>> {
Ok(Vec::new())
}
fn sealed_header(
&self,
_number: BlockNumber,
) -> ProviderResult<Option<SealedHeader<Self::Header>>> {
Ok(None)
}
fn sealed_headers_while(
&self,
_range: impl RangeBounds<BlockNumber>,
_predicate: impl FnMut(&SealedHeader<Self::Header>) -> bool,
) -> ProviderResult<Vec<SealedHeader<Self::Header>>> {
Ok(Vec::new())
}
}
impl<C: Send + Sync, N: NodePrimitives> AccountReader for NoopProvider<C, N> {
fn basic_account(&self, _address: &Address) -> ProviderResult<Option<Account>> {
Ok(None)
}
}
impl<C: Send + Sync, N: NodePrimitives> ChangeSetReader for NoopProvider<C, N> {
fn account_block_changeset(
&self,
_block_number: BlockNumber,
) -> ProviderResult<Vec<AccountBeforeTx>> {
Ok(Vec::default())
}
}
impl<C: Send + Sync, N: NodePrimitives> StateRootProvider for NoopProvider<C, N> {
fn state_root(&self, _state: HashedPostState) -> ProviderResult<B256> {
Ok(B256::default())
}
fn state_root_from_nodes(&self, _input: TrieInput) -> ProviderResult<B256> {
Ok(B256::default())
}
fn state_root_with_updates(
&self,
_state: HashedPostState,
) -> ProviderResult<(B256, TrieUpdates)> {
Ok((B256::default(), TrieUpdates::default()))
}
fn state_root_from_nodes_with_updates(
&self,
_input: TrieInput,
) -> ProviderResult<(B256, TrieUpdates)> {
Ok((B256::default(), TrieUpdates::default()))
}
}
impl<C: Send + Sync, N: NodePrimitives> StorageRootProvider for NoopProvider<C, N> {
fn storage_root(
&self,
_address: Address,
_hashed_storage: HashedStorage,
) -> ProviderResult<B256> {
Ok(B256::default())
}
fn storage_proof(
&self,
_address: Address,
slot: B256,
_hashed_storage: HashedStorage,
) -> ProviderResult<StorageProof> {
Ok(StorageProof::new(slot))
}
fn storage_multiproof(
&self,
_address: Address,
_slots: &[B256],
_hashed_storage: HashedStorage,
) -> ProviderResult<StorageMultiProof> {
Ok(StorageMultiProof::empty())
}
}
impl<C: Send + Sync, N: NodePrimitives> StateProofProvider for NoopProvider<C, N> {
fn proof(
&self,
_input: TrieInput,
address: Address,
_slots: &[B256],
) -> ProviderResult<AccountProof> {
Ok(AccountProof::new(address))
}
fn multiproof(
&self,
_input: TrieInput,
_targets: MultiProofTargets,
) -> ProviderResult<MultiProof> {
Ok(MultiProof::default())
}
fn witness(&self, _input: TrieInput, _target: HashedPostState) -> ProviderResult<Vec<Bytes>> {
Ok(Vec::default())
}
}
impl<C: Send + Sync, N: NodePrimitives> HashedPostStateProvider for NoopProvider<C, N> {
fn hashed_post_state(&self, _bundle_state: &revm_database::BundleState) -> HashedPostState {
HashedPostState::default()
}
}
impl<C: Send + Sync, N: NodePrimitives> StateReader for NoopProvider<C, N> {
type Receipt = N::Receipt;
fn get_state(
&self,
_block: BlockNumber,
) -> ProviderResult<Option<ExecutionOutcome<Self::Receipt>>> {
Ok(None)
}
}
impl<C: Send + Sync, N: NodePrimitives> StateProvider for NoopProvider<C, N> {
fn storage(
&self,
_account: Address,
_storage_key: StorageKey,
) -> ProviderResult<Option<FlaggedStorage>> {
Ok(None)
}
}
impl<C: Send + Sync, N: NodePrimitives> BytecodeReader for NoopProvider<C, N> {
fn bytecode_by_hash(&self, _code_hash: &B256) -> ProviderResult<Option<Bytecode>> {
Ok(None)
}
}
impl<C: Send + Sync + 'static, N: NodePrimitives> StateProviderFactory for NoopProvider<C, N> {
fn latest(&self) -> ProviderResult<StateProviderBox> {
Ok(Box::new(self.clone()))
}
fn state_by_block_number_or_tag(
&self,
number_or_tag: BlockNumberOrTag,
) -> ProviderResult<StateProviderBox> {
match number_or_tag {
BlockNumberOrTag::Latest => self.latest(),
BlockNumberOrTag::Finalized => {
// we can only get the finalized state by hash, not by num
let hash =
self.finalized_block_hash()?.ok_or(ProviderError::FinalizedBlockNotFound)?;
// only look at historical state
self.history_by_block_hash(hash)
}
BlockNumberOrTag::Safe => {
// we can only get the safe state by hash, not by num
let hash = self.safe_block_hash()?.ok_or(ProviderError::SafeBlockNotFound)?;
self.history_by_block_hash(hash)
}
BlockNumberOrTag::Earliest => {
self.history_by_block_number(self.earliest_block_number()?)
}
BlockNumberOrTag::Pending => self.pending(),
BlockNumberOrTag::Number(num) => self.history_by_block_number(num),
}
}
fn history_by_block_number(&self, _block: BlockNumber) -> ProviderResult<StateProviderBox> {
Ok(Box::new(self.clone()))
}
fn history_by_block_hash(&self, _block: BlockHash) -> ProviderResult<StateProviderBox> {
Ok(Box::new(self.clone()))
}
fn state_by_block_hash(&self, _block: BlockHash) -> ProviderResult<StateProviderBox> {
Ok(Box::new(self.clone()))
}
fn pending(&self) -> ProviderResult<StateProviderBox> {
Ok(Box::new(self.clone()))
}
fn pending_state_by_hash(&self, _block_hash: B256) -> ProviderResult<Option<StateProviderBox>> {
Ok(Some(Box::new(self.clone())))
}
fn maybe_pending(&self) -> ProviderResult<Option<StateProviderBox>> {
Ok(Some(Box::new(self.clone())))
}
}
impl<C: Send + Sync, N: NodePrimitives> StageCheckpointReader for NoopProvider<C, N> {
fn get_stage_checkpoint(&self, _id: StageId) -> ProviderResult<Option<StageCheckpoint>> {
Ok(None)
}
fn get_stage_checkpoint_progress(&self, _id: StageId) -> ProviderResult<Option<Vec<u8>>> {
Ok(None)
}
fn get_all_checkpoints(&self) -> ProviderResult<Vec<(String, StageCheckpoint)>> {
Ok(Vec::new())
}
}
impl<C: Send + Sync, N: NodePrimitives> PruneCheckpointReader for NoopProvider<C, N> {
fn get_prune_checkpoint(
&self,
_segment: PruneSegment,
) -> ProviderResult<Option<PruneCheckpoint>> {
Ok(None)
}
fn get_prune_checkpoints(&self) -> ProviderResult<Vec<(PruneSegment, PruneCheckpoint)>> {
Ok(Vec::new())
}
}
impl<C: Send + Sync, N: NodePrimitives> NodePrimitivesProvider for NoopProvider<C, N> {
type Primitives = N;
}
impl<C: Send + Sync, N: Send + Sync> BlockBodyIndicesProvider for NoopProvider<C, N> {
fn block_body_indices(&self, _num: u64) -> ProviderResult<Option<StoredBlockBodyIndices>> {
Ok(None)
}
fn block_body_indices_range(
&self,
_range: RangeInclusive<BlockNumber>,
) -> ProviderResult<Vec<StoredBlockBodyIndices>> {
Ok(Vec::new())
}
}
#[cfg(feature = "db-api")]
impl<ChainSpec: Send + Sync, N: NodePrimitives> DBProvider for NoopProvider<ChainSpec, N> {
type Tx = TxMock;
fn tx_ref(&self) -> &Self::Tx {
&self.tx
}
fn tx_mut(&mut self) -> &mut Self::Tx {
&mut self.tx
}
fn into_tx(self) -> Self::Tx {
self.tx
}
fn prune_modes_ref(&self) -> &PruneModes {
&self.prune_modes
}
}
#[cfg(feature = "db-api")]
impl<ChainSpec: Send + Sync, N: NodePrimitives> DatabaseProviderFactory
for NoopProvider<ChainSpec, N>
{
type DB = DatabaseMock;
type Provider = Self;
type ProviderRW = Self;
fn database_provider_ro(&self) -> ProviderResult<Self::Provider> {
Ok(self.clone())
}
fn database_provider_rw(&self) -> ProviderResult<Self::ProviderRW> {
Ok(self.clone())
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/storage-api/src/transactions.rs | crates/storage/storage-api/src/transactions.rs | use crate::{BlockNumReader, BlockReader};
use alloc::vec::Vec;
use alloy_consensus::transaction::TransactionMeta;
use alloy_eips::BlockHashOrNumber;
use alloy_primitives::{Address, BlockNumber, TxHash, TxNumber};
use core::ops::{Range, RangeBounds, RangeInclusive};
use reth_primitives_traits::SignedTransaction;
use reth_storage_errors::provider::{ProviderError, ProviderResult};
/// Enum to control transaction hash inclusion.
///
/// This serves as a hint to the provider to include or omit hashes because hashes are
/// stored separately and are not always needed.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Default)]
pub enum TransactionVariant {
/// Indicates that transactions should be processed without including their hashes.
NoHash,
/// Indicates that transactions should be processed along with their hashes.
#[default]
WithHash,
}
/// Client trait for fetching transactions related data.
#[auto_impl::auto_impl(&, Arc)]
pub trait TransactionsProvider: BlockNumReader + Send + Sync {
/// The transaction type this provider reads.
type Transaction: Send + Sync + SignedTransaction;
/// Get internal transaction identifier by transaction hash.
///
/// This is the inverse of [`TransactionsProvider::transaction_by_id`].
/// Returns None if the transaction is not found.
fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult<Option<TxNumber>>;
/// Get transaction by id, computes hash every time so more expensive.
fn transaction_by_id(&self, id: TxNumber) -> ProviderResult<Option<Self::Transaction>>;
/// Get transaction by id without computing the hash.
fn transaction_by_id_unhashed(&self, id: TxNumber)
-> ProviderResult<Option<Self::Transaction>>;
/// Get transaction by transaction hash.
fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult<Option<Self::Transaction>>;
/// Get transaction by transaction hash and additional metadata of the block the transaction was
/// mined in
fn transaction_by_hash_with_meta(
&self,
hash: TxHash,
) -> ProviderResult<Option<(Self::Transaction, TransactionMeta)>>;
/// Get transaction block number
fn transaction_block(&self, id: TxNumber) -> ProviderResult<Option<BlockNumber>>;
/// Get transactions by block id.
fn transactions_by_block(
&self,
block: BlockHashOrNumber,
) -> ProviderResult<Option<Vec<Self::Transaction>>>;
/// Get transactions by block range.
fn transactions_by_block_range(
&self,
range: impl RangeBounds<BlockNumber>,
) -> ProviderResult<Vec<Vec<Self::Transaction>>>;
/// Get transactions by tx range.
fn transactions_by_tx_range(
&self,
range: impl RangeBounds<TxNumber>,
) -> ProviderResult<Vec<Self::Transaction>>;
/// Get Senders from a tx range.
fn senders_by_tx_range(
&self,
range: impl RangeBounds<TxNumber>,
) -> ProviderResult<Vec<Address>>;
/// Get transaction sender.
///
/// Returns None if the transaction is not found.
fn transaction_sender(&self, id: TxNumber) -> ProviderResult<Option<Address>>;
}
/// A helper type alias to access [`TransactionsProvider::Transaction`].
pub type ProviderTx<P> = <P as TransactionsProvider>::Transaction;
/// Client trait for fetching additional transactions related data.
#[auto_impl::auto_impl(&, Arc)]
pub trait TransactionsProviderExt: BlockReader {
/// Get transactions range by block range.
fn transaction_range_by_block_range(
&self,
block_range: RangeInclusive<BlockNumber>,
) -> ProviderResult<RangeInclusive<TxNumber>> {
let from = self
.block_body_indices(*block_range.start())?
.ok_or_else(|| ProviderError::BlockBodyIndicesNotFound(*block_range.start()))?
.first_tx_num();
let to = self
.block_body_indices(*block_range.end())?
.ok_or_else(|| ProviderError::BlockBodyIndicesNotFound(*block_range.end()))?
.last_tx_num();
Ok(from..=to)
}
/// Get transaction hashes from a transaction range.
fn transaction_hashes_by_range(
&self,
tx_range: Range<TxNumber>,
) -> ProviderResult<Vec<(TxHash, TxNumber)>>;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/storage-api/src/block_id.rs | crates/storage/storage-api/src/block_id.rs | use crate::BlockHashReader;
use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumberOrTag};
use alloy_primitives::{BlockNumber, B256};
use reth_chainspec::ChainInfo;
use reth_storage_errors::provider::{ProviderError, ProviderResult};
/// Client trait for getting important block numbers (such as the latest block number), converting
/// block hashes to numbers, and fetching a block hash from its block number.
///
/// This trait also supports fetching block hashes and block numbers from a [`BlockHashOrNumber`].
#[auto_impl::auto_impl(&, Arc)]
pub trait BlockNumReader: BlockHashReader + Send + Sync {
/// Returns the current info for the chain.
fn chain_info(&self) -> ProviderResult<ChainInfo>;
/// Returns the best block number in the chain.
fn best_block_number(&self) -> ProviderResult<BlockNumber>;
/// Returns the last block number associated with the last canonical header in the database.
fn last_block_number(&self) -> ProviderResult<BlockNumber>;
/// Returns earliest block number to keep track of the expired block range.
fn earliest_block_number(&self) -> ProviderResult<BlockNumber> {
Ok(0)
}
/// Gets the `BlockNumber` for the given hash. Returns `None` if no block with this hash exists.
fn block_number(&self, hash: B256) -> ProviderResult<Option<BlockNumber>>;
/// Gets the block number for the given `BlockHashOrNumber`. Returns `None` if no block with
/// this hash exists. If the `BlockHashOrNumber` is a `Number`, it is returned as is.
fn convert_hash_or_number(&self, id: BlockHashOrNumber) -> ProviderResult<Option<BlockNumber>> {
match id {
BlockHashOrNumber::Hash(hash) => self.block_number(hash),
BlockHashOrNumber::Number(num) => Ok(Some(num)),
}
}
/// Gets the block hash for the given `BlockHashOrNumber`. Returns `None` if no block with this
/// number exists. If the `BlockHashOrNumber` is a `Hash`, it is returned as is.
fn convert_number(&self, id: BlockHashOrNumber) -> ProviderResult<Option<B256>> {
match id {
BlockHashOrNumber::Hash(hash) => Ok(Some(hash)),
BlockHashOrNumber::Number(num) => self.block_hash(num),
}
}
}
/// Client trait for transforming [`BlockId`] into block numbers or hashes.
///
/// Types that implement this trait must be able to resolve all variants of [`BlockNumberOrTag`] to
/// block numbers or hashes. Automatic implementations for resolving [`BlockNumberOrTag`] variants
/// are provided if the type implements the `pending_block_num_hash`, `finalized_block_num`, and
/// `safe_block_num` methods.
///
/// The resulting block numbers can be converted to hashes using the underlying [`BlockNumReader`]
/// methods, and vice versa.
#[auto_impl::auto_impl(&, Arc)]
pub trait BlockIdReader: BlockNumReader + Send + Sync {
/// Converts the `BlockNumberOrTag` variants to a block number.
fn convert_block_number(&self, num: BlockNumberOrTag) -> ProviderResult<Option<BlockNumber>> {
let num = match num {
BlockNumberOrTag::Latest => self.best_block_number()?,
BlockNumberOrTag::Earliest => self.earliest_block_number()?,
BlockNumberOrTag::Pending => {
return self
.pending_block_num_hash()
.map(|res_opt| res_opt.map(|num_hash| num_hash.number))
}
BlockNumberOrTag::Number(num) => num,
BlockNumberOrTag::Finalized => {
self.finalized_block_number()?.ok_or(ProviderError::FinalizedBlockNotFound)?
}
BlockNumberOrTag::Safe => {
self.safe_block_number()?.ok_or(ProviderError::SafeBlockNotFound)?
}
};
Ok(Some(num))
}
/// Get the hash of the block by matching the given id.
fn block_hash_for_id(&self, block_id: BlockId) -> ProviderResult<Option<B256>> {
match block_id {
BlockId::Hash(hash) => Ok(Some(hash.into())),
BlockId::Number(num) => match num {
BlockNumberOrTag::Latest => Ok(Some(self.chain_info()?.best_hash)),
BlockNumberOrTag::Pending => self
.pending_block_num_hash()
.map(|res_opt| res_opt.map(|num_hash| num_hash.hash)),
BlockNumberOrTag::Finalized => self.finalized_block_hash(),
BlockNumberOrTag::Safe => self.safe_block_hash(),
BlockNumberOrTag::Earliest => self.block_hash(self.earliest_block_number()?),
BlockNumberOrTag::Number(num) => self.block_hash(num),
},
}
}
/// Get the number of the block by matching the given id.
fn block_number_for_id(&self, block_id: BlockId) -> ProviderResult<Option<BlockNumber>> {
match block_id {
BlockId::Hash(hash) => self.block_number(hash.into()),
BlockId::Number(num) => self.convert_block_number(num),
}
}
/// Get the current pending block number and hash.
fn pending_block_num_hash(&self) -> ProviderResult<Option<alloy_eips::BlockNumHash>>;
/// Get the current safe block number and hash.
fn safe_block_num_hash(&self) -> ProviderResult<Option<alloy_eips::BlockNumHash>>;
/// Get the current finalized block number and hash.
fn finalized_block_num_hash(&self) -> ProviderResult<Option<alloy_eips::BlockNumHash>>;
/// Get the safe block number.
fn safe_block_number(&self) -> ProviderResult<Option<BlockNumber>> {
self.safe_block_num_hash().map(|res_opt| res_opt.map(|num_hash| num_hash.number))
}
/// Get the finalized block number.
fn finalized_block_number(&self) -> ProviderResult<Option<BlockNumber>> {
self.finalized_block_num_hash().map(|res_opt| res_opt.map(|num_hash| num_hash.number))
}
/// Get the safe block hash.
fn safe_block_hash(&self) -> ProviderResult<Option<B256>> {
self.safe_block_num_hash().map(|res_opt| res_opt.map(|num_hash| num_hash.hash))
}
/// Get the finalized block hash.
fn finalized_block_hash(&self) -> ProviderResult<Option<B256>> {
self.finalized_block_num_hash().map(|res_opt| res_opt.map(|num_hash| num_hash.hash))
}
}
#[cfg(test)]
fn _object_safe(_: Box<dyn BlockIdReader>) {}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/storage-api/src/block_indices.rs | crates/storage/storage-api/src/block_indices.rs | use alloc::vec::Vec;
use alloy_primitives::BlockNumber;
use core::ops::RangeInclusive;
use reth_db_models::StoredBlockBodyIndices;
use reth_storage_errors::provider::ProviderResult;
/// Client trait for fetching block body indices related data.
#[auto_impl::auto_impl(&, Arc)]
pub trait BlockBodyIndicesProvider: Send + Sync {
/// Returns the block body indices with matching number from database.
///
/// Returns `None` if block is not found.
fn block_body_indices(&self, num: u64) -> ProviderResult<Option<StoredBlockBodyIndices>>;
/// Returns the block body indices within the requested range matching number from storage.
fn block_body_indices_range(
&self,
range: RangeInclusive<BlockNumber>,
) -> ProviderResult<Vec<StoredBlockBodyIndices>>;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/storage-api/src/state.rs | crates/storage/storage-api/src/state.rs | use super::{
AccountReader, BlockHashReader, BlockIdReader, StateProofProvider, StateRootProvider,
StorageRootProvider,
};
use alloc::boxed::Box;
use alloy_consensus::constants::KECCAK_EMPTY;
use alloy_eips::{BlockId, BlockNumberOrTag};
use alloy_primitives::{Address, BlockHash, BlockNumber, StorageKey, B256, U256};
use auto_impl::auto_impl;
use reth_execution_types::ExecutionOutcome;
use reth_primitives_traits::Bytecode;
use reth_storage_errors::provider::ProviderResult;
use reth_trie_common::HashedPostState;
use revm_database::BundleState;
use revm_state::FlaggedStorage;
/// This just receives state, or [`ExecutionOutcome`], from the provider
#[auto_impl::auto_impl(&, Arc, Box)]
pub trait StateReader: Send + Sync {
/// Receipt type in [`ExecutionOutcome`].
type Receipt: Send + Sync;
/// Get the [`ExecutionOutcome`] for the given block
fn get_state(
&self,
block: BlockNumber,
) -> ProviderResult<Option<ExecutionOutcome<Self::Receipt>>>;
}
/// Type alias of boxed [`StateProvider`].
pub type StateProviderBox = Box<dyn StateProvider>;
/// An abstraction for a type that provides state data.
#[auto_impl(&, Arc, Box)]
pub trait StateProvider:
BlockHashReader
+ AccountReader
+ BytecodeReader
+ StateRootProvider
+ StorageRootProvider
+ StateProofProvider
+ HashedPostStateProvider
+ Send
+ Sync
{
/// Get storage of given account.
fn storage(
&self,
account: Address,
storage_key: StorageKey,
) -> ProviderResult<Option<FlaggedStorage>>;
/// Get account code by its address.
///
/// Returns `None` if the account doesn't exist or account is not a contract
fn account_code(&self, addr: &Address) -> ProviderResult<Option<Bytecode>> {
// Get basic account information
// Returns None if acc doesn't exist
let acc = match self.basic_account(addr)? {
Some(acc) => acc,
None => return Ok(None),
};
if let Some(code_hash) = acc.bytecode_hash {
if code_hash == KECCAK_EMPTY {
return Ok(None)
}
// Get the code from the code hash
return self.bytecode_by_hash(&code_hash)
}
// Return `None` if no code hash is set
Ok(None)
}
/// Get account balance by its address.
///
/// Returns `None` if the account doesn't exist
fn account_balance(&self, addr: &Address) -> ProviderResult<Option<U256>> {
// Get basic account information
// Returns None if acc doesn't exist
self.basic_account(addr)?.map_or_else(|| Ok(None), |acc| Ok(Some(acc.balance)))
}
/// Get account nonce by its address.
///
/// Returns `None` if the account doesn't exist
fn account_nonce(&self, addr: &Address) -> ProviderResult<Option<u64>> {
// Get basic account information
// Returns None if acc doesn't exist
self.basic_account(addr)?.map_or_else(|| Ok(None), |acc| Ok(Some(acc.nonce)))
}
}
/// Minimal requirements to read a full account, for example, to validate its new transactions
pub trait AccountInfoReader: AccountReader + BytecodeReader {}
impl<T: AccountReader + BytecodeReader> AccountInfoReader for T {}
/// Trait that provides the hashed state from various sources.
#[auto_impl(&, Arc, Box)]
pub trait HashedPostStateProvider: Send + Sync {
/// Returns the `HashedPostState` of the provided [`BundleState`].
fn hashed_post_state(&self, bundle_state: &BundleState) -> HashedPostState;
}
/// Trait for reading bytecode associated with a given code hash.
#[auto_impl(&, Arc, Box)]
pub trait BytecodeReader: Send + Sync {
/// Get account code by its hash
fn bytecode_by_hash(&self, code_hash: &B256) -> ProviderResult<Option<Bytecode>>;
}
/// Trait implemented for database providers that can be converted into a historical state provider.
pub trait TryIntoHistoricalStateProvider {
/// Returns a historical [`StateProvider`] indexed by the given historic block number.
fn try_into_history_at_block(
self,
block_number: BlockNumber,
) -> ProviderResult<StateProviderBox>;
}
/// Light wrapper that returns `StateProvider` implementations that correspond to the given
/// `BlockNumber`, the latest state, or the pending state.
///
/// This type differentiates states into `historical`, `latest` and `pending`, where the `latest`
/// block determines what is historical or pending: `[historical..latest..pending]`.
///
/// The `latest` state represents the state after the most recent block has been committed to the
/// database, `historical` states are states that have been committed to the database before the
/// `latest` state, and `pending` states are states that have not yet been committed to the
/// database which may or may not become the `latest` state, depending on consensus.
///
/// Note: the `pending` block is considered the block that extends the canonical chain but one and
/// has the `latest` block as its parent.
///
/// All states are _inclusive_, meaning they include _all_ all changes made (executed transactions)
/// in their respective blocks. For example [`StateProviderFactory::history_by_block_number`] for
/// block number `n` will return the state after block `n` was executed (transactions, withdrawals).
/// In other words, all states point to the end of the state's respective block, which is equivalent
/// to state at the beginning of the child block.
///
/// This affects tracing, or replaying blocks, which will need to be executed on top of the state of
/// the parent block. For example, in order to trace block `n`, the state after block `n - 1` needs
/// to be used, since block `n` was executed on its parent block's state.
#[auto_impl(&, Arc, Box)]
pub trait StateProviderFactory: BlockIdReader + Send + Sync {
/// Storage provider for latest block.
fn latest(&self) -> ProviderResult<StateProviderBox>;
/// Returns a [`StateProvider`] indexed by the given [`BlockId`].
///
/// Note: if a number or hash is provided this will __only__ look at historical(canonical)
/// state.
fn state_by_block_id(&self, block_id: BlockId) -> ProviderResult<StateProviderBox> {
match block_id {
BlockId::Number(block_number) => self.state_by_block_number_or_tag(block_number),
BlockId::Hash(block_hash) => self.history_by_block_hash(block_hash.into()),
}
}
/// Returns a [`StateProvider`] indexed by the given block number or tag.
///
/// Note: if a number is provided this will only look at historical(canonical) state.
fn state_by_block_number_or_tag(
&self,
number_or_tag: BlockNumberOrTag,
) -> ProviderResult<StateProviderBox>;
/// Returns a historical [`StateProvider`] indexed by the given historic block number.
///
///
/// Note: this only looks at historical blocks, not pending blocks.
fn history_by_block_number(&self, block: BlockNumber) -> ProviderResult<StateProviderBox>;
/// Returns a historical [`StateProvider`] indexed by the given block hash.
///
/// Note: this only looks at historical blocks, not pending blocks.
fn history_by_block_hash(&self, block: BlockHash) -> ProviderResult<StateProviderBox>;
/// Returns _any_ [StateProvider] with matching block hash.
///
/// This will return a [StateProvider] for either a historical or pending block.
fn state_by_block_hash(&self, block: BlockHash) -> ProviderResult<StateProviderBox>;
/// Storage provider for pending state.
///
/// Represents the state at the block that extends the canonical chain by one.
/// If there's no `pending` block, then this is equal to [`StateProviderFactory::latest`]
fn pending(&self) -> ProviderResult<StateProviderBox>;
/// Storage provider for pending state for the given block hash.
///
/// Represents the state at the block that extends the canonical chain.
///
/// If the block couldn't be found, returns `None`.
fn pending_state_by_hash(&self, block_hash: B256) -> ProviderResult<Option<StateProviderBox>>;
/// Returns a pending [`StateProvider`] if it exists.
///
/// This will return `None` if there's no pending state.
fn maybe_pending(&self) -> ProviderResult<Option<StateProviderBox>>;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/storage-api/src/chain.rs | crates/storage/storage-api/src/chain.rs | use crate::{DBProvider, StorageLocation};
use alloc::vec::Vec;
use alloy_consensus::Header;
use alloy_primitives::BlockNumber;
use core::marker::PhantomData;
use reth_chainspec::{ChainSpecProvider, EthereumHardforks};
use reth_db_api::{
cursor::{DbCursorRO, DbCursorRW},
models::StoredBlockOmmers,
tables,
transaction::{DbTx, DbTxMut},
DbTxUnwindExt,
};
use reth_db_models::StoredBlockWithdrawals;
use reth_ethereum_primitives::TransactionSigned;
use reth_primitives_traits::{
Block, BlockBody, FullBlockHeader, FullNodePrimitives, SignedTransaction,
};
use reth_storage_errors::provider::ProviderResult;
/// Trait that implements how block bodies are written to the storage.
///
/// Note: Within the current abstraction, this should only write to tables unrelated to
/// transactions. Writing of transactions is handled separately.
#[auto_impl::auto_impl(&, Arc)]
pub trait BlockBodyWriter<Provider, Body: BlockBody> {
/// Writes a set of block bodies to the storage.
fn write_block_bodies(
&self,
provider: &Provider,
bodies: Vec<(BlockNumber, Option<Body>)>,
write_to: StorageLocation,
) -> ProviderResult<()>;
/// Removes all block bodies above the given block number from the database.
fn remove_block_bodies_above(
&self,
provider: &Provider,
block: BlockNumber,
remove_from: StorageLocation,
) -> ProviderResult<()>;
}
/// Trait that implements how chain-specific types are written to the storage.
pub trait ChainStorageWriter<Provider, Primitives: FullNodePrimitives>:
BlockBodyWriter<Provider, <Primitives::Block as Block>::Body>
{
}
impl<T, Provider, Primitives: FullNodePrimitives> ChainStorageWriter<Provider, Primitives> for T where
T: BlockBodyWriter<Provider, <Primitives::Block as Block>::Body>
{
}
/// Input for reading a block body. Contains a header of block being read and a list of pre-fetched
/// transactions.
pub type ReadBodyInput<'a, B> =
(&'a <B as Block>::Header, Vec<<<B as Block>::Body as BlockBody>::Transaction>);
/// Trait that implements how block bodies are read from the storage.
///
/// Note: Within the current abstraction, transactions persistence is handled separately, thus this
/// trait is provided with transactions read beforehand and is expected to construct the block body
/// from those transactions and additional data read from elsewhere.
#[auto_impl::auto_impl(&, Arc)]
pub trait BlockBodyReader<Provider> {
/// The block type.
type Block: Block;
/// Receives a list of block headers along with block transactions and returns the block bodies.
fn read_block_bodies(
&self,
provider: &Provider,
inputs: Vec<ReadBodyInput<'_, Self::Block>>,
) -> ProviderResult<Vec<<Self::Block as Block>::Body>>;
}
/// Trait that implements how chain-specific types are read from storage.
pub trait ChainStorageReader<Provider, Primitives: FullNodePrimitives>:
BlockBodyReader<Provider, Block = Primitives::Block>
{
}
impl<T, Provider, Primitives: FullNodePrimitives> ChainStorageReader<Provider, Primitives> for T where
T: BlockBodyReader<Provider, Block = Primitives::Block>
{
}
/// Ethereum storage implementation.
#[derive(Debug, Clone, Copy)]
pub struct EthStorage<T = TransactionSigned, H = Header>(PhantomData<(T, H)>);
impl<T, H> Default for EthStorage<T, H> {
fn default() -> Self {
Self(Default::default())
}
}
impl<Provider, T, H> BlockBodyWriter<Provider, alloy_consensus::BlockBody<T, H>>
for EthStorage<T, H>
where
Provider: DBProvider<Tx: DbTxMut>,
T: SignedTransaction,
H: FullBlockHeader,
{
fn write_block_bodies(
&self,
provider: &Provider,
bodies: Vec<(u64, Option<alloy_consensus::BlockBody<T, H>>)>,
_write_to: StorageLocation,
) -> ProviderResult<()> {
let mut ommers_cursor = provider.tx_ref().cursor_write::<tables::BlockOmmers<H>>()?;
let mut withdrawals_cursor =
provider.tx_ref().cursor_write::<tables::BlockWithdrawals>()?;
for (block_number, body) in bodies {
let Some(body) = body else { continue };
// Write ommers if any
if !body.ommers.is_empty() {
ommers_cursor.append(block_number, &StoredBlockOmmers { ommers: body.ommers })?;
}
// Write withdrawals if any
if let Some(withdrawals) = body.withdrawals {
if !withdrawals.is_empty() {
withdrawals_cursor
.append(block_number, &StoredBlockWithdrawals { withdrawals })?;
}
}
}
Ok(())
}
fn remove_block_bodies_above(
&self,
provider: &Provider,
block: BlockNumber,
_remove_from: StorageLocation,
) -> ProviderResult<()> {
provider.tx_ref().unwind_table_by_num::<tables::BlockWithdrawals>(block)?;
provider.tx_ref().unwind_table_by_num::<tables::BlockOmmers>(block)?;
Ok(())
}
}
impl<Provider, T, H> BlockBodyReader<Provider> for EthStorage<T, H>
where
Provider: DBProvider + ChainSpecProvider<ChainSpec: EthereumHardforks>,
T: SignedTransaction,
H: FullBlockHeader,
{
type Block = alloy_consensus::Block<T, H>;
fn read_block_bodies(
&self,
provider: &Provider,
inputs: Vec<ReadBodyInput<'_, Self::Block>>,
) -> ProviderResult<Vec<<Self::Block as Block>::Body>> {
// TODO: Ideally storage should hold its own copy of chain spec
let chain_spec = provider.chain_spec();
let mut withdrawals_cursor = provider.tx_ref().cursor_read::<tables::BlockWithdrawals>()?;
let mut bodies = Vec::with_capacity(inputs.len());
for (header, transactions) in inputs {
// If we are past shanghai, then all blocks should have a withdrawal list,
// even if empty
let withdrawals =
if chain_spec.is_shanghai_active_at_timestamp(header.timestamp_seconds()) {
withdrawals_cursor
.seek_exact(header.number())?
.map(|(_, w)| w.withdrawals)
.unwrap_or_default()
.into()
} else {
None
};
let ommers = if chain_spec.is_paris_active_at_block(header.number()) {
Vec::new()
} else {
// Pre-merge: fetch ommers from database using direct database access
provider
.tx_ref()
.cursor_read::<tables::BlockOmmers<H>>()?
.seek_exact(header.number())?
.map(|(_, stored_ommers)| stored_ommers.ommers)
.unwrap_or_default()
};
bodies.push(alloy_consensus::BlockBody { transactions, ommers, withdrawals });
}
Ok(bodies)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/storage-api/src/stage_checkpoint.rs | crates/storage/storage-api/src/stage_checkpoint.rs | use alloc::{string::String, vec::Vec};
use alloy_primitives::BlockNumber;
use reth_stages_types::{StageCheckpoint, StageId};
use reth_storage_errors::provider::ProviderResult;
/// The trait for fetching stage checkpoint related data.
#[auto_impl::auto_impl(&, Arc)]
pub trait StageCheckpointReader: Send + Sync {
/// Fetch the checkpoint for the given stage.
fn get_stage_checkpoint(&self, id: StageId) -> ProviderResult<Option<StageCheckpoint>>;
/// Get stage checkpoint progress.
fn get_stage_checkpoint_progress(&self, id: StageId) -> ProviderResult<Option<Vec<u8>>>;
/// Reads all stage checkpoints and returns a list with the name of the stage and the checkpoint
/// data.
fn get_all_checkpoints(&self) -> ProviderResult<Vec<(String, StageCheckpoint)>>;
}
/// The trait for updating stage checkpoint related data.
#[auto_impl::auto_impl(&, Arc)]
pub trait StageCheckpointWriter: Send + Sync {
/// Save stage checkpoint.
fn save_stage_checkpoint(&self, id: StageId, checkpoint: StageCheckpoint)
-> ProviderResult<()>;
/// Save stage checkpoint progress.
fn save_stage_checkpoint_progress(
&self,
id: StageId,
checkpoint: Vec<u8>,
) -> ProviderResult<()>;
/// Update all pipeline sync stage progress.
fn update_pipeline_stages(
&self,
block_number: BlockNumber,
drop_stage_checkpoint: bool,
) -> ProviderResult<()>;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/storage-api/src/block.rs | crates/storage/storage-api/src/block.rs | use crate::{
BlockBodyIndicesProvider, BlockNumReader, HeaderProvider, ReceiptProvider,
ReceiptProviderIdExt, TransactionVariant, TransactionsProvider,
};
use alloc::{sync::Arc, vec::Vec};
use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumberOrTag};
use alloy_primitives::{BlockNumber, B256};
use core::ops::RangeInclusive;
use reth_primitives_traits::{RecoveredBlock, SealedHeader};
use reth_storage_errors::provider::ProviderResult;
/// A helper enum that represents the origin of the requested block.
///
/// This helper type's sole purpose is to give the caller more control over from where blocks can be
/// fetched.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Default)]
pub enum BlockSource {
/// Check all available sources.
///
/// Note: it's expected that looking up pending blocks is faster than looking up blocks in the
/// database so this prioritizes Pending > Database.
#[default]
Any,
/// The block was fetched from the pending block source, the blockchain tree that buffers
/// blocks that are not yet part of the canonical chain.
Pending,
/// The block must be part of the canonical chain.
Canonical,
}
impl BlockSource {
/// Returns `true` if the block source is `Pending` or `Any`.
pub const fn is_pending(&self) -> bool {
matches!(self, Self::Pending | Self::Any)
}
/// Returns `true` if the block source is `Canonical` or `Any`.
pub const fn is_canonical(&self) -> bool {
matches!(self, Self::Canonical | Self::Any)
}
}
/// A helper type alias to access [`BlockReader::Block`].
pub type ProviderBlock<P> = <P as BlockReader>::Block;
/// Api trait for fetching `Block` related data.
///
/// If not requested otherwise, implementers of this trait should prioritize fetching blocks from
/// the database.
pub trait BlockReader:
BlockNumReader
+ HeaderProvider
+ BlockBodyIndicesProvider
+ TransactionsProvider
+ ReceiptProvider
+ Send
+ Sync
{
/// The block type this provider reads.
type Block: reth_primitives_traits::Block<
Body: reth_primitives_traits::BlockBody<Transaction = Self::Transaction>,
Header = Self::Header,
>;
/// Tries to find in the given block source.
///
/// Note: this only operates on the hash because the number might be ambiguous.
///
/// Returns `None` if block is not found.
fn find_block_by_hash(
&self,
hash: B256,
source: BlockSource,
) -> ProviderResult<Option<Self::Block>>;
/// Returns the block with given id from the database.
///
/// Returns `None` if block is not found.
fn block(&self, id: BlockHashOrNumber) -> ProviderResult<Option<Self::Block>>;
/// Returns the pending block if available
///
/// Note: This returns a [`RecoveredBlock`] because it's expected that this is sealed by
/// the provider and the caller does not know the hash.
fn pending_block(&self) -> ProviderResult<Option<RecoveredBlock<Self::Block>>>;
/// Returns the pending block and receipts if available.
#[expect(clippy::type_complexity)]
fn pending_block_and_receipts(
&self,
) -> ProviderResult<Option<(RecoveredBlock<Self::Block>, Vec<Self::Receipt>)>>;
/// Returns the block with matching hash from the database.
///
/// Returns `None` if block is not found.
fn block_by_hash(&self, hash: B256) -> ProviderResult<Option<Self::Block>> {
self.block(hash.into())
}
/// Returns the block with matching number from database.
///
/// Returns `None` if block is not found.
fn block_by_number(&self, num: u64) -> ProviderResult<Option<Self::Block>> {
self.block(num.into())
}
/// Returns the block with senders with matching number or hash from database.
///
/// Returns the block's transactions in the requested variant.
///
/// Returns `None` if block is not found.
fn recovered_block(
&self,
id: BlockHashOrNumber,
transaction_kind: TransactionVariant,
) -> ProviderResult<Option<RecoveredBlock<Self::Block>>>;
/// Returns the sealed block with senders with matching number or hash from database.
///
/// Returns the block's transactions in the requested variant.
///
/// Returns `None` if block is not found.
fn sealed_block_with_senders(
&self,
id: BlockHashOrNumber,
transaction_kind: TransactionVariant,
) -> ProviderResult<Option<RecoveredBlock<Self::Block>>>;
/// Returns all blocks in the given inclusive range.
///
/// Note: returns only available blocks
fn block_range(&self, range: RangeInclusive<BlockNumber>) -> ProviderResult<Vec<Self::Block>>;
/// Returns a range of blocks from the database, along with the senders of each
/// transaction in the blocks.
fn block_with_senders_range(
&self,
range: RangeInclusive<BlockNumber>,
) -> ProviderResult<Vec<RecoveredBlock<Self::Block>>>;
/// Returns a range of sealed blocks from the database, along with the senders of each
/// transaction in the blocks.
fn recovered_block_range(
&self,
range: RangeInclusive<BlockNumber>,
) -> ProviderResult<Vec<RecoveredBlock<Self::Block>>>;
}
impl<T: BlockReader> BlockReader for Arc<T> {
type Block = T::Block;
fn find_block_by_hash(
&self,
hash: B256,
source: BlockSource,
) -> ProviderResult<Option<Self::Block>> {
T::find_block_by_hash(self, hash, source)
}
fn block(&self, id: BlockHashOrNumber) -> ProviderResult<Option<Self::Block>> {
T::block(self, id)
}
fn pending_block(&self) -> ProviderResult<Option<RecoveredBlock<Self::Block>>> {
T::pending_block(self)
}
fn pending_block_and_receipts(
&self,
) -> ProviderResult<Option<(RecoveredBlock<Self::Block>, Vec<Self::Receipt>)>> {
T::pending_block_and_receipts(self)
}
fn block_by_hash(&self, hash: B256) -> ProviderResult<Option<Self::Block>> {
T::block_by_hash(self, hash)
}
fn block_by_number(&self, num: u64) -> ProviderResult<Option<Self::Block>> {
T::block_by_number(self, num)
}
fn recovered_block(
&self,
id: BlockHashOrNumber,
transaction_kind: TransactionVariant,
) -> ProviderResult<Option<RecoveredBlock<Self::Block>>> {
T::recovered_block(self, id, transaction_kind)
}
fn sealed_block_with_senders(
&self,
id: BlockHashOrNumber,
transaction_kind: TransactionVariant,
) -> ProviderResult<Option<RecoveredBlock<Self::Block>>> {
T::sealed_block_with_senders(self, id, transaction_kind)
}
fn block_range(&self, range: RangeInclusive<BlockNumber>) -> ProviderResult<Vec<Self::Block>> {
T::block_range(self, range)
}
fn block_with_senders_range(
&self,
range: RangeInclusive<BlockNumber>,
) -> ProviderResult<Vec<RecoveredBlock<Self::Block>>> {
T::block_with_senders_range(self, range)
}
fn recovered_block_range(
&self,
range: RangeInclusive<BlockNumber>,
) -> ProviderResult<Vec<RecoveredBlock<Self::Block>>> {
T::recovered_block_range(self, range)
}
}
impl<T: BlockReader> BlockReader for &T {
type Block = T::Block;
fn find_block_by_hash(
&self,
hash: B256,
source: BlockSource,
) -> ProviderResult<Option<Self::Block>> {
T::find_block_by_hash(self, hash, source)
}
fn block(&self, id: BlockHashOrNumber) -> ProviderResult<Option<Self::Block>> {
T::block(self, id)
}
fn pending_block(&self) -> ProviderResult<Option<RecoveredBlock<Self::Block>>> {
T::pending_block(self)
}
fn pending_block_and_receipts(
&self,
) -> ProviderResult<Option<(RecoveredBlock<Self::Block>, Vec<Self::Receipt>)>> {
T::pending_block_and_receipts(self)
}
fn block_by_hash(&self, hash: B256) -> ProviderResult<Option<Self::Block>> {
T::block_by_hash(self, hash)
}
fn block_by_number(&self, num: u64) -> ProviderResult<Option<Self::Block>> {
T::block_by_number(self, num)
}
fn recovered_block(
&self,
id: BlockHashOrNumber,
transaction_kind: TransactionVariant,
) -> ProviderResult<Option<RecoveredBlock<Self::Block>>> {
T::recovered_block(self, id, transaction_kind)
}
fn sealed_block_with_senders(
&self,
id: BlockHashOrNumber,
transaction_kind: TransactionVariant,
) -> ProviderResult<Option<RecoveredBlock<Self::Block>>> {
T::sealed_block_with_senders(self, id, transaction_kind)
}
fn block_range(&self, range: RangeInclusive<BlockNumber>) -> ProviderResult<Vec<Self::Block>> {
T::block_range(self, range)
}
fn block_with_senders_range(
&self,
range: RangeInclusive<BlockNumber>,
) -> ProviderResult<Vec<RecoveredBlock<Self::Block>>> {
T::block_with_senders_range(self, range)
}
fn recovered_block_range(
&self,
range: RangeInclusive<BlockNumber>,
) -> ProviderResult<Vec<RecoveredBlock<Self::Block>>> {
T::recovered_block_range(self, range)
}
}
/// Trait extension for `BlockReader`, for types that implement `BlockId` conversion.
///
/// The `BlockReader` trait should be implemented on types that can retrieve a block from either
/// a block number or hash. However, it might be desirable to fetch a block from a `BlockId` type,
/// which can be a number, hash, or tag such as `BlockNumberOrTag::Safe`.
///
/// Resolving tags requires keeping track of block hashes or block numbers associated with the tag,
/// so this trait can only be implemented for types that implement `BlockIdReader`. The
/// `BlockIdReader` methods should be used to resolve `BlockId`s to block numbers or hashes, and
/// retrieving the block should be done using the type's `BlockReader` methods.
pub trait BlockReaderIdExt: BlockReader + ReceiptProviderIdExt {
/// Returns the block with matching tag from the database
///
/// Returns `None` if block is not found.
fn block_by_number_or_tag(&self, id: BlockNumberOrTag) -> ProviderResult<Option<Self::Block>> {
self.convert_block_number(id)?.map_or_else(|| Ok(None), |num| self.block(num.into()))
}
/// Returns the pending block header if available
///
/// Note: This returns a [`SealedHeader`] because it's expected that this is sealed by the
/// provider and the caller does not know the hash.
fn pending_header(&self) -> ProviderResult<Option<SealedHeader<Self::Header>>> {
self.sealed_header_by_id(BlockNumberOrTag::Pending.into())
}
/// Returns the latest block header if available
///
/// Note: This returns a [`SealedHeader`] because it's expected that this is sealed by the
/// provider and the caller does not know the hash.
fn latest_header(&self) -> ProviderResult<Option<SealedHeader<Self::Header>>> {
self.sealed_header_by_id(BlockNumberOrTag::Latest.into())
}
/// Returns the safe block header if available
///
/// Note: This returns a [`SealedHeader`] because it's expected that this is sealed by the
/// provider and the caller does not know the hash.
fn safe_header(&self) -> ProviderResult<Option<SealedHeader<Self::Header>>> {
self.sealed_header_by_id(BlockNumberOrTag::Safe.into())
}
/// Returns the finalized block header if available
///
/// Note: This returns a [`SealedHeader`] because it's expected that this is sealed by the
/// provider and the caller does not know the hash.
fn finalized_header(&self) -> ProviderResult<Option<SealedHeader<Self::Header>>> {
self.sealed_header_by_id(BlockNumberOrTag::Finalized.into())
}
/// Returns the block with the matching [`BlockId`] from the database.
///
/// Returns `None` if block is not found.
fn block_by_id(&self, id: BlockId) -> ProviderResult<Option<Self::Block>>;
/// Returns the block with senders with matching [`BlockId`].
///
/// Returns the block's transactions in the requested variant.
///
/// Returns `None` if block is not found.
fn block_with_senders_by_id(
&self,
id: BlockId,
transaction_kind: TransactionVariant,
) -> ProviderResult<Option<RecoveredBlock<Self::Block>>> {
match id {
BlockId::Hash(hash) => self.recovered_block(hash.block_hash.into(), transaction_kind),
BlockId::Number(num) => self
.convert_block_number(num)?
.map_or_else(|| Ok(None), |num| self.recovered_block(num.into(), transaction_kind)),
}
}
/// Returns the header with matching tag from the database
///
/// Returns `None` if header is not found.
fn header_by_number_or_tag(
&self,
id: BlockNumberOrTag,
) -> ProviderResult<Option<Self::Header>> {
self.convert_block_number(id)?
.map_or_else(|| Ok(None), |num| self.header_by_hash_or_number(num.into()))
}
/// Returns the header with matching tag from the database
///
/// Returns `None` if header is not found.
fn sealed_header_by_number_or_tag(
&self,
id: BlockNumberOrTag,
) -> ProviderResult<Option<SealedHeader<Self::Header>>> {
self.convert_block_number(id)?
.map_or_else(|| Ok(None), |num| self.header_by_hash_or_number(num.into()))?
.map_or_else(|| Ok(None), |h| Ok(Some(SealedHeader::seal_slow(h))))
}
/// Returns the sealed header with the matching `BlockId` from the database.
///
/// Returns `None` if header is not found.
fn sealed_header_by_id(
&self,
id: BlockId,
) -> ProviderResult<Option<SealedHeader<Self::Header>>>;
/// Returns the header with the matching `BlockId` from the database.
///
/// Returns `None` if header is not found.
fn header_by_id(&self, id: BlockId) -> ProviderResult<Option<Self::Header>>;
}
/// Functionality to read the last known chain blocks from the database.
pub trait ChainStateBlockReader: Send + Sync {
/// Returns the last finalized block number.
///
/// If no finalized block has been written yet, this returns `None`.
fn last_finalized_block_number(&self) -> ProviderResult<Option<BlockNumber>>;
/// Returns the last safe block number.
///
/// If no safe block has been written yet, this returns `None`.
fn last_safe_block_number(&self) -> ProviderResult<Option<BlockNumber>>;
}
/// Functionality to write the last known chain blocks to the database.
pub trait ChainStateBlockWriter: Send + Sync {
/// Saves the given finalized block number in the DB.
fn save_finalized_block_number(&self, block_number: BlockNumber) -> ProviderResult<()>;
/// Saves the given safe block number in the DB.
fn save_safe_block_number(&self, block_number: BlockNumber) -> ProviderResult<()>;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/storage-api/src/primitives.rs | crates/storage/storage-api/src/primitives.rs | use reth_primitives_traits::NodePrimitives;
/// Provider implementation that knows configured [`NodePrimitives`].
#[auto_impl::auto_impl(&, Arc, Box)]
pub trait NodePrimitivesProvider {
/// The node primitive types.
type Primitives: NodePrimitives;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/storage-api/src/header.rs | crates/storage/storage-api/src/header.rs | use alloc::vec::Vec;
use alloy_eips::BlockHashOrNumber;
use alloy_primitives::{BlockHash, BlockNumber, U256};
use core::ops::RangeBounds;
use reth_primitives_traits::{BlockHeader, SealedHeader};
use reth_storage_errors::provider::ProviderResult;
/// A helper type alias to access [`HeaderProvider::Header`].
pub type ProviderHeader<P> = <P as HeaderProvider>::Header;
/// Client trait for fetching `Header` related data.
#[auto_impl::auto_impl(&, Arc)]
pub trait HeaderProvider: Send + Sync {
/// The header type this provider supports.
type Header: BlockHeader;
/// Check if block is known
fn is_known(&self, block_hash: &BlockHash) -> ProviderResult<bool> {
self.header(block_hash).map(|header| header.is_some())
}
/// Get header by block hash
fn header(&self, block_hash: &BlockHash) -> ProviderResult<Option<Self::Header>>;
/// Retrieves the header sealed by the given block hash.
fn sealed_header_by_hash(
&self,
block_hash: BlockHash,
) -> ProviderResult<Option<SealedHeader<Self::Header>>> {
Ok(self.header(&block_hash)?.map(|header| SealedHeader::new(header, block_hash)))
}
/// Get header by block number
fn header_by_number(&self, num: u64) -> ProviderResult<Option<Self::Header>>;
/// Get header by block number or hash
fn header_by_hash_or_number(
&self,
hash_or_num: BlockHashOrNumber,
) -> ProviderResult<Option<Self::Header>> {
match hash_or_num {
BlockHashOrNumber::Hash(hash) => self.header(&hash),
BlockHashOrNumber::Number(num) => self.header_by_number(num),
}
}
/// Get total difficulty by block hash.
fn header_td(&self, hash: &BlockHash) -> ProviderResult<Option<U256>>;
/// Get total difficulty by block number.
fn header_td_by_number(&self, number: BlockNumber) -> ProviderResult<Option<U256>>;
/// Get headers in range of block numbers
fn headers_range(
&self,
range: impl RangeBounds<BlockNumber>,
) -> ProviderResult<Vec<Self::Header>>;
/// Get a single sealed header by block number.
fn sealed_header(
&self,
number: BlockNumber,
) -> ProviderResult<Option<SealedHeader<Self::Header>>>;
/// Get headers in range of block numbers.
fn sealed_headers_range(
&self,
range: impl RangeBounds<BlockNumber>,
) -> ProviderResult<Vec<SealedHeader<Self::Header>>> {
self.sealed_headers_while(range, |_| true)
}
/// Get sealed headers while `predicate` returns `true` or the range is exhausted.
fn sealed_headers_while(
&self,
range: impl RangeBounds<BlockNumber>,
predicate: impl FnMut(&SealedHeader<Self::Header>) -> bool,
) -> ProviderResult<Vec<SealedHeader<Self::Header>>>;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/storage-api/src/storage.rs | crates/storage/storage-api/src/storage.rs | use alloc::{
collections::{BTreeMap, BTreeSet},
vec::Vec,
};
use alloy_primitives::{Address, BlockNumber, B256};
use core::ops::RangeInclusive;
use reth_primitives_traits::StorageEntry;
use reth_storage_errors::provider::ProviderResult;
/// Storage reader
#[auto_impl::auto_impl(&, Arc, Box)]
pub trait StorageReader: Send + Sync {
/// Get plainstate storages for addresses and storage keys.
fn plain_state_storages(
&self,
addresses_with_keys: impl IntoIterator<Item = (Address, impl IntoIterator<Item = B256>)>,
) -> ProviderResult<Vec<(Address, Vec<StorageEntry>)>>;
/// Iterate over storage changesets and return all storage slots that were changed.
fn changed_storages_with_range(
&self,
range: RangeInclusive<BlockNumber>,
) -> ProviderResult<BTreeMap<Address, BTreeSet<B256>>>;
/// Iterate over storage changesets and return all storage slots that were changed alongside
/// each specific set of blocks.
///
/// NOTE: Get inclusive range of blocks.
fn changed_storages_and_blocks_with_range(
&self,
range: RangeInclusive<BlockNumber>,
) -> ProviderResult<BTreeMap<(Address, B256), Vec<u64>>>;
}
/// Storage `ChangeSet` reader
#[cfg(feature = "db-api")]
#[auto_impl::auto_impl(&, Arc, Box)]
pub trait StorageChangeSetReader: Send + Sync {
/// Iterate over storage changesets and return the storage state from before this block.
fn storage_changeset(
&self,
block_number: BlockNumber,
) -> ProviderResult<Vec<(reth_db_api::models::BlockNumberAddress, StorageEntry)>>;
}
/// An enum that represents the storage location for a piece of data.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum StorageLocation {
/// Write only to static files.
StaticFiles,
/// Write only to the database.
Database,
/// Write to both the database and static files.
Both,
}
impl StorageLocation {
/// Returns true if the storage location includes static files.
pub const fn static_files(&self) -> bool {
matches!(self, Self::StaticFiles | Self::Both)
}
/// Returns true if the storage location includes the database.
pub const fn database(&self) -> bool {
matches!(self, Self::Database | Self::Both)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/storage-api/src/history.rs | crates/storage/storage-api/src/history.rs | use alloy_primitives::{Address, BlockNumber, B256};
use auto_impl::auto_impl;
use core::ops::{RangeBounds, RangeInclusive};
use reth_db_api::models::BlockNumberAddress;
use reth_db_models::AccountBeforeTx;
use reth_primitives_traits::StorageEntry;
use reth_storage_errors::provider::ProviderResult;
/// History Writer
#[auto_impl(&, Arc, Box)]
pub trait HistoryWriter: Send + Sync {
/// Unwind and clear account history indices.
///
/// Returns number of changesets walked.
fn unwind_account_history_indices<'a>(
&self,
changesets: impl Iterator<Item = &'a (BlockNumber, AccountBeforeTx)>,
) -> ProviderResult<usize>;
/// Unwind and clear account history indices in a given block range.
///
/// Returns number of changesets walked.
fn unwind_account_history_indices_range(
&self,
range: impl RangeBounds<BlockNumber>,
) -> ProviderResult<usize>;
/// Insert account change index to database. Used inside `AccountHistoryIndex` stage
fn insert_account_history_index(
&self,
index_updates: impl IntoIterator<Item = (Address, impl IntoIterator<Item = u64>)>,
) -> ProviderResult<()>;
/// Unwind and clear storage history indices.
///
/// Returns number of changesets walked.
fn unwind_storage_history_indices(
&self,
changesets: impl Iterator<Item = (BlockNumberAddress, StorageEntry)>,
) -> ProviderResult<usize>;
/// Unwind and clear storage history indices in a given block range.
///
/// Returns number of changesets walked.
fn unwind_storage_history_indices_range(
&self,
range: impl RangeBounds<BlockNumberAddress>,
) -> ProviderResult<usize>;
/// Insert storage change index to database. Used inside `StorageHistoryIndex` stage
fn insert_storage_history_index(
&self,
storage_transitions: impl IntoIterator<Item = ((Address, B256), impl IntoIterator<Item = u64>)>,
) -> ProviderResult<()>;
/// Read account/storage changesets and update account/storage history indices.
fn update_history_indices(&self, range: RangeInclusive<BlockNumber>) -> ProviderResult<()>;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/storage-api/src/receipts.rs | crates/storage/storage-api/src/receipts.rs | use crate::BlockIdReader;
use alloc::vec::Vec;
use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumberOrTag};
use alloy_primitives::{BlockNumber, TxHash, TxNumber};
use core::ops::{RangeBounds, RangeInclusive};
use reth_primitives_traits::Receipt;
use reth_storage_errors::provider::ProviderResult;
/// A helper type alias to access [`ReceiptProvider::Receipt`].
pub type ProviderReceipt<P> = <P as ReceiptProvider>::Receipt;
/// Client trait for fetching receipt data.
#[auto_impl::auto_impl(&, Arc)]
pub trait ReceiptProvider: Send + Sync {
/// The receipt type.
type Receipt: Receipt;
/// Get receipt by transaction number
///
/// Returns `None` if the transaction is not found.
fn receipt(&self, id: TxNumber) -> ProviderResult<Option<Self::Receipt>>;
/// Get receipt by transaction hash.
///
/// Returns `None` if the transaction is not found.
fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult<Option<Self::Receipt>>;
/// Get receipts by block num or hash.
///
/// Returns `None` if the block is not found.
fn receipts_by_block(
&self,
block: BlockHashOrNumber,
) -> ProviderResult<Option<Vec<Self::Receipt>>>;
/// Get receipts by tx range.
fn receipts_by_tx_range(
&self,
range: impl RangeBounds<TxNumber>,
) -> ProviderResult<Vec<Self::Receipt>>;
/// Get receipts by block range.
///
/// Returns a vector where each element contains all receipts for a block in the range.
/// The outer vector index corresponds to blocks in the range (`block_range.start()` + index).
/// Empty blocks will have empty inner vectors.
///
/// This is more efficient than calling `receipts_by_block` multiple times for contiguous ranges
/// because it can leverage the underlying `receipts_by_tx_range` for the entire transaction
/// span.
fn receipts_by_block_range(
&self,
block_range: RangeInclusive<BlockNumber>,
) -> ProviderResult<Vec<Vec<Self::Receipt>>>;
}
/// Trait extension for `ReceiptProvider`, for types that implement `BlockId` conversion.
///
/// The `Receipt` trait should be implemented on types that can retrieve receipts from either
/// a block number or hash. However, it might be desirable to fetch receipts from a `BlockId` type,
/// which can be a number, hash, or tag such as `BlockNumberOrTag::Safe`.
///
/// Resolving tags requires keeping track of block hashes or block numbers associated with the tag,
/// so this trait can only be implemented for types that implement `BlockIdReader`. The
/// `BlockIdReader` methods should be used to resolve `BlockId`s to block numbers or hashes, and
/// retrieving the receipts should be done using the type's `ReceiptProvider` methods.
pub trait ReceiptProviderIdExt: ReceiptProvider + BlockIdReader {
/// Get receipt by block id
fn receipts_by_block_id(&self, block: BlockId) -> ProviderResult<Option<Vec<Self::Receipt>>> {
let id = match block {
BlockId::Hash(hash) => BlockHashOrNumber::Hash(hash.block_hash),
BlockId::Number(num_tag) => {
if let Some(num) = self.convert_block_number(num_tag)? {
BlockHashOrNumber::Number(num)
} else {
return Ok(None)
}
}
};
self.receipts_by_block(id)
}
/// Returns the block with the matching `BlockId` from the database.
///
/// Returns `None` if block is not found.
fn receipts_by_number_or_tag(
&self,
number_or_tag: BlockNumberOrTag,
) -> ProviderResult<Option<Vec<Self::Receipt>>> {
self.receipts_by_block_id(number_or_tag.into())
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/storage-api/src/prune_checkpoint.rs | crates/storage/storage-api/src/prune_checkpoint.rs | use alloc::vec::Vec;
use reth_prune_types::{PruneCheckpoint, PruneSegment};
use reth_storage_errors::provider::ProviderResult;
/// The trait for fetching prune checkpoint related data.
#[auto_impl::auto_impl(&, Arc)]
pub trait PruneCheckpointReader: Send + Sync {
/// Fetch the prune checkpoint for the given segment.
fn get_prune_checkpoint(
&self,
segment: PruneSegment,
) -> ProviderResult<Option<PruneCheckpoint>>;
/// Fetch all the prune checkpoints.
fn get_prune_checkpoints(&self) -> ProviderResult<Vec<(PruneSegment, PruneCheckpoint)>>;
}
/// The trait for updating prune checkpoint related data.
#[auto_impl::auto_impl(&, Arc)]
pub trait PruneCheckpointWriter: Send + Sync {
/// Save prune checkpoint.
fn save_prune_checkpoint(
&self,
segment: PruneSegment,
checkpoint: PruneCheckpoint,
) -> ProviderResult<()>;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/storage-api/src/state_writer.rs | crates/storage/storage-api/src/state_writer.rs | use alloy_primitives::BlockNumber;
use reth_execution_types::ExecutionOutcome;
use reth_storage_errors::provider::ProviderResult;
use reth_trie_common::HashedPostStateSorted;
use revm_database::{
states::{PlainStateReverts, StateChangeset},
OriginalValuesKnown,
};
use super::StorageLocation;
/// A trait specifically for writing state changes or reverts
pub trait StateWriter {
/// Receipt type included into [`ExecutionOutcome`].
type Receipt;
/// Write the state and receipts to the database or static files if `static_file_producer` is
/// `Some`. It should be `None` if there is any kind of pruning/filtering over the receipts.
fn write_state(
&self,
execution_outcome: &ExecutionOutcome<Self::Receipt>,
is_value_known: OriginalValuesKnown,
write_receipts_to: StorageLocation,
) -> ProviderResult<()>;
/// Write state reverts to the database.
///
/// NOTE: Reverts will delete all wiped storage from plain state.
fn write_state_reverts(
&self,
reverts: PlainStateReverts,
first_block: BlockNumber,
) -> ProviderResult<()>;
/// Write state changes to the database.
fn write_state_changes(&self, changes: StateChangeset) -> ProviderResult<()>;
/// Writes the hashed state changes to the database
fn write_hashed_state(&self, hashed_state: &HashedPostStateSorted) -> ProviderResult<()>;
/// Remove the block range of state above the given block. The state of the passed block is not
/// removed.
fn remove_state_above(
&self,
block: BlockNumber,
remove_receipts_from: StorageLocation,
) -> ProviderResult<()>;
/// Take the block range of state, recreating the [`ExecutionOutcome`]. The state of the passed
/// block is not removed.
fn take_state_above(
&self,
block: BlockNumber,
remove_receipts_from: StorageLocation,
) -> ProviderResult<ExecutionOutcome<Self::Receipt>>;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/storage-api/src/header_sync_gap.rs | crates/storage/storage-api/src/header_sync_gap.rs | use alloy_primitives::BlockNumber;
use reth_primitives_traits::{BlockHeader, SealedHeader};
use reth_storage_errors::provider::ProviderResult;
/// Provider for getting the local tip header for sync gap calculation.
pub trait HeaderSyncGapProvider: Send + Sync {
/// The header type.
type Header: BlockHeader;
/// Returns the local tip header for the given highest uninterrupted block.
fn local_tip_header(
&self,
highest_uninterrupted_block: BlockNumber,
) -> ProviderResult<SealedHeader<Self::Header>>;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/storage-api/src/block_writer.rs | crates/storage/storage-api/src/block_writer.rs | use crate::{NodePrimitivesProvider, StorageLocation};
use alloc::vec::Vec;
use alloy_primitives::BlockNumber;
use reth_db_models::StoredBlockBodyIndices;
use reth_execution_types::{Chain, ExecutionOutcome};
use reth_primitives_traits::{Block, NodePrimitives, RecoveredBlock};
use reth_storage_errors::provider::ProviderResult;
use reth_trie_common::{updates::TrieUpdates, HashedPostStateSorted};
/// `BlockExecution` Writer
pub trait BlockExecutionWriter:
NodePrimitivesProvider<Primitives: NodePrimitives<Block = Self::Block>> + BlockWriter + Send + Sync
{
/// Take all of the blocks above the provided number and their execution result
///
/// The passed block number will stay in the database.
///
/// Accepts [`StorageLocation`] specifying from where should transactions and receipts be
/// removed.
fn take_block_and_execution_above(
&self,
block: BlockNumber,
remove_from: StorageLocation,
) -> ProviderResult<Chain<Self::Primitives>>;
/// Remove all of the blocks above the provided number and their execution result
///
/// The passed block number will stay in the database.
///
/// Accepts [`StorageLocation`] specifying from where should transactions and receipts be
/// removed.
fn remove_block_and_execution_above(
&self,
block: BlockNumber,
remove_from: StorageLocation,
) -> ProviderResult<()>;
}
impl<T: BlockExecutionWriter> BlockExecutionWriter for &T {
fn take_block_and_execution_above(
&self,
block: BlockNumber,
remove_from: StorageLocation,
) -> ProviderResult<Chain<Self::Primitives>> {
(*self).take_block_and_execution_above(block, remove_from)
}
fn remove_block_and_execution_above(
&self,
block: BlockNumber,
remove_from: StorageLocation,
) -> ProviderResult<()> {
(*self).remove_block_and_execution_above(block, remove_from)
}
}
/// Block Writer
#[auto_impl::auto_impl(&, Arc, Box)]
pub trait BlockWriter: Send + Sync {
/// The body this writer can write.
type Block: Block;
/// The receipt type for [`ExecutionOutcome`].
type Receipt: Send + Sync;
/// Insert full block and make it canonical. Parent tx num and transition id is taken from
/// parent block in database.
///
/// Return [`StoredBlockBodyIndices`] that contains indices of the first and last transactions
/// and transition in the block.
///
/// Accepts [`StorageLocation`] value which specifies where transactions and headers should be
/// written.
fn insert_block(
&self,
block: RecoveredBlock<Self::Block>,
write_to: StorageLocation,
) -> ProviderResult<StoredBlockBodyIndices>;
/// Appends a batch of block bodies extending the canonical chain. This is invoked during
/// `Bodies` stage and does not write to `TransactionHashNumbers` and `TransactionSenders`
/// tables which are populated on later stages.
///
/// Bodies are passed as [`Option`]s, if body is `None` the corresponding block is empty.
fn append_block_bodies(
&self,
bodies: Vec<(BlockNumber, Option<<Self::Block as Block>::Body>)>,
write_to: StorageLocation,
) -> ProviderResult<()>;
/// Removes all blocks above the given block number from the database.
///
/// Note: This does not remove state or execution data.
fn remove_blocks_above(
&self,
block: BlockNumber,
remove_from: StorageLocation,
) -> ProviderResult<()>;
/// Removes all block bodies above the given block number from the database.
fn remove_bodies_above(
&self,
block: BlockNumber,
remove_from: StorageLocation,
) -> ProviderResult<()>;
/// Appends a batch of sealed blocks to the blockchain, including sender information, and
/// updates the post-state.
///
/// Inserts the blocks into the database and updates the state with
/// provided `BundleState`.
///
/// # Parameters
///
/// - `blocks`: Vector of `RecoveredBlock` instances to append.
/// - `state`: Post-state information to update after appending.
///
/// # Returns
///
/// Returns `Ok(())` on success, or an error if any operation fails.
fn append_blocks_with_state(
&self,
blocks: Vec<RecoveredBlock<Self::Block>>,
execution_outcome: &ExecutionOutcome<Self::Receipt>,
hashed_state: HashedPostStateSorted,
trie_updates: TrieUpdates,
) -> ProviderResult<()>;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/storage-api/src/trie.rs | crates/storage/storage-api/src/trie.rs | use alloc::vec::Vec;
use alloy_primitives::{map::B256Map, Address, Bytes, B256};
use reth_storage_errors::provider::ProviderResult;
use reth_trie_common::{
updates::{StorageTrieUpdates, TrieUpdates},
AccountProof, HashedPostState, HashedStorage, MultiProof, MultiProofTargets, StorageMultiProof,
StorageProof, TrieInput,
};
/// A type that can compute the state root of a given post state.
#[auto_impl::auto_impl(&, Box, Arc)]
pub trait StateRootProvider: Send + Sync {
/// Returns the state root of the `BundleState` on top of the current state.
///
/// # Note
///
/// It is recommended to provide a different implementation from
/// `state_root_with_updates` since it affects the memory usage during state root
/// computation.
fn state_root(&self, hashed_state: HashedPostState) -> ProviderResult<B256>;
/// Returns the state root of the `HashedPostState` on top of the current state but reuses the
/// intermediate nodes to speed up the computation. It's up to the caller to construct the
/// prefix sets and inform the provider of the trie paths that have changes.
fn state_root_from_nodes(&self, input: TrieInput) -> ProviderResult<B256>;
/// Returns the state root of the `HashedPostState` on top of the current state with trie
/// updates to be committed to the database.
fn state_root_with_updates(
&self,
hashed_state: HashedPostState,
) -> ProviderResult<(B256, TrieUpdates)>;
/// Returns state root and trie updates.
/// See [`StateRootProvider::state_root_from_nodes`] for more info.
fn state_root_from_nodes_with_updates(
&self,
input: TrieInput,
) -> ProviderResult<(B256, TrieUpdates)>;
}
/// A type that can compute the storage root for a given account.
#[auto_impl::auto_impl(&, Box, Arc)]
pub trait StorageRootProvider: Send + Sync {
/// Returns the storage root of the `HashedStorage` for target address on top of the current
/// state.
fn storage_root(&self, address: Address, hashed_storage: HashedStorage)
-> ProviderResult<B256>;
/// Returns the storage proof of the `HashedStorage` for target slot on top of the current
/// state.
fn storage_proof(
&self,
address: Address,
slot: B256,
hashed_storage: HashedStorage,
) -> ProviderResult<StorageProof>;
/// Returns the storage multiproof for target slots.
fn storage_multiproof(
&self,
address: Address,
slots: &[B256],
hashed_storage: HashedStorage,
) -> ProviderResult<StorageMultiProof>;
}
/// A type that can generate state proof on top of a given post state.
#[auto_impl::auto_impl(&, Box, Arc)]
pub trait StateProofProvider: Send + Sync {
/// Get account and storage proofs of target keys in the `HashedPostState`
/// on top of the current state.
fn proof(
&self,
input: TrieInput,
address: Address,
slots: &[B256],
) -> ProviderResult<AccountProof>;
/// Generate [`MultiProof`] for target hashed account and corresponding
/// hashed storage slot keys.
fn multiproof(
&self,
input: TrieInput,
targets: MultiProofTargets,
) -> ProviderResult<MultiProof>;
/// Get trie witness for provided state.
fn witness(&self, input: TrieInput, target: HashedPostState) -> ProviderResult<Vec<Bytes>>;
}
/// Trie Writer
#[auto_impl::auto_impl(&, Arc, Box)]
pub trait TrieWriter: Send + Sync {
/// Writes trie updates to the database.
///
/// Returns the number of entries modified.
fn write_trie_updates(&self, trie_updates: &TrieUpdates) -> ProviderResult<usize>;
}
/// Storage Trie Writer
#[auto_impl::auto_impl(&, Arc, Box)]
pub trait StorageTrieWriter: Send + Sync {
/// Writes storage trie updates from the given storage trie map.
///
/// First sorts the storage trie updates by the hashed address key, writing in sorted order.
///
/// Returns the number of entries modified.
fn write_storage_trie_updates(
&self,
storage_tries: &B256Map<StorageTrieUpdates>,
) -> ProviderResult<usize>;
/// Writes storage trie updates for the given hashed address.
fn write_individual_storage_trie_updates(
&self,
hashed_address: B256,
updates: &StorageTrieUpdates,
) -> ProviderResult<usize>;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/storage-api/src/full.rs | crates/storage/storage-api/src/full.rs | //! Helper trait for full rpc provider
use reth_chainspec::{ChainSpecProvider, EthereumHardforks};
use crate::{
BlockReaderIdExt, HeaderProvider, StageCheckpointReader, StateProviderFactory,
TransactionsProvider,
};
/// Helper trait to unify all provider traits required to support `eth` RPC server behaviour, for
/// simplicity.
pub trait FullRpcProvider:
StateProviderFactory
+ ChainSpecProvider<ChainSpec: EthereumHardforks>
+ BlockReaderIdExt
+ HeaderProvider
+ TransactionsProvider
+ StageCheckpointReader
+ Clone
+ Unpin
+ 'static
{
}
impl<T> FullRpcProvider for T where
T: StateProviderFactory
+ ChainSpecProvider<ChainSpec: EthereumHardforks>
+ BlockReaderIdExt
+ HeaderProvider
+ TransactionsProvider
+ StageCheckpointReader
+ Clone
+ Unpin
+ 'static
{
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/storage-api/src/hashing.rs | crates/storage/storage-api/src/hashing.rs | use alloc::collections::{BTreeMap, BTreeSet};
use alloy_primitives::{map::HashMap, Address, BlockNumber, B256};
use auto_impl::auto_impl;
use core::ops::{RangeBounds, RangeInclusive};
use reth_db_api::models::BlockNumberAddress;
use reth_db_models::AccountBeforeTx;
use reth_primitives_traits::{Account, StorageEntry};
use reth_storage_errors::provider::ProviderResult;
/// Hashing Writer
#[auto_impl(&, Arc, Box)]
pub trait HashingWriter: Send + Sync {
/// Unwind and clear account hashing.
///
/// # Returns
///
/// Set of hashed keys of updated accounts.
fn unwind_account_hashing<'a>(
&self,
changesets: impl Iterator<Item = &'a (BlockNumber, AccountBeforeTx)>,
) -> ProviderResult<BTreeMap<B256, Option<Account>>>;
/// Unwind and clear account hashing in a given block range.
///
/// # Returns
///
/// Set of hashed keys of updated accounts.
fn unwind_account_hashing_range(
&self,
range: impl RangeBounds<BlockNumber>,
) -> ProviderResult<BTreeMap<B256, Option<Account>>>;
/// Inserts all accounts into [`AccountsHistory`][reth_db_api::tables::AccountsHistory] table.
///
/// # Returns
///
/// Set of hashed keys of updated accounts.
fn insert_account_for_hashing(
&self,
accounts: impl IntoIterator<Item = (Address, Option<Account>)>,
) -> ProviderResult<BTreeMap<B256, Option<Account>>>;
/// Unwind and clear storage hashing.
///
/// # Returns
///
/// Mapping of hashed keys of updated accounts to their respective updated hashed slots.
fn unwind_storage_hashing(
&self,
changesets: impl Iterator<Item = (BlockNumberAddress, StorageEntry)>,
) -> ProviderResult<HashMap<B256, BTreeSet<B256>>>;
/// Unwind and clear storage hashing in a given block range.
///
/// # Returns
///
/// Mapping of hashed keys of updated accounts to their respective updated hashed slots.
fn unwind_storage_hashing_range(
&self,
range: impl RangeBounds<BlockNumberAddress>,
) -> ProviderResult<HashMap<B256, BTreeSet<B256>>>;
/// Iterates over storages and inserts them to hashing table.
///
/// # Returns
///
/// Mapping of hashed keys of updated accounts to their respective updated hashed slots.
fn insert_storage_for_hashing(
&self,
storages: impl IntoIterator<Item = (Address, impl IntoIterator<Item = StorageEntry>)>,
) -> ProviderResult<HashMap<B256, BTreeSet<B256>>>;
/// Calculate the hashes of all changed accounts and storages, and finally calculate the state
/// root.
///
/// The hashes are calculated from `fork_block_number + 1` to `current_block_number`.
///
/// The resulting state root is compared with `expected_state_root`.
fn insert_hashes(
&self,
range: RangeInclusive<BlockNumber>,
end_block_hash: B256,
expected_state_root: B256,
) -> ProviderResult<()>;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/storage-api/src/block_hash.rs | crates/storage/storage-api/src/block_hash.rs | use alloc::vec::Vec;
use alloy_eips::BlockHashOrNumber;
use alloy_primitives::{BlockNumber, B256};
use reth_storage_errors::provider::ProviderResult;
/// Client trait for fetching block hashes by number.
#[auto_impl::auto_impl(&, Arc, Box)]
pub trait BlockHashReader: Send + Sync {
/// Get the hash of the block with the given number. Returns `None` if no block with this number
/// exists.
fn block_hash(&self, number: BlockNumber) -> ProviderResult<Option<B256>>;
/// Get the hash of the block with the given number. Returns `None` if no block with this number
/// exists.
fn convert_block_hash(
&self,
hash_or_number: BlockHashOrNumber,
) -> ProviderResult<Option<B256>> {
match hash_or_number {
BlockHashOrNumber::Hash(hash) => Ok(Some(hash)),
BlockHashOrNumber::Number(num) => self.block_hash(num),
}
}
/// Get headers in range of block hashes or numbers
///
/// Returns the available hashes of that range.
///
/// Note: The range is `start..end`, so the expected result is `[start..end)`
fn canonical_hashes_range(
&self,
start: BlockNumber,
end: BlockNumber,
) -> ProviderResult<Vec<B256>>;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/storage-api/src/account.rs | crates/storage/storage-api/src/account.rs | use alloc::{
collections::{BTreeMap, BTreeSet},
vec::Vec,
};
use alloy_primitives::{Address, BlockNumber};
use auto_impl::auto_impl;
use core::ops::{RangeBounds, RangeInclusive};
use reth_db_models::AccountBeforeTx;
use reth_primitives_traits::Account;
use reth_storage_errors::provider::ProviderResult;
/// Account reader
#[auto_impl(&, Arc, Box)]
pub trait AccountReader {
/// Get basic account information.
///
/// Returns `None` if the account doesn't exist.
fn basic_account(&self, address: &Address) -> ProviderResult<Option<Account>>;
}
/// Account reader
#[auto_impl(&, Arc, Box)]
pub trait AccountExtReader {
/// Iterate over account changesets and return all account address that were changed.
fn changed_accounts_with_range(
&self,
_range: impl RangeBounds<BlockNumber>,
) -> ProviderResult<BTreeSet<Address>>;
/// Get basic account information for multiple accounts. A more efficient version than calling
/// [`AccountReader::basic_account`] repeatedly.
///
/// Returns `None` if the account doesn't exist.
fn basic_accounts(
&self,
_iter: impl IntoIterator<Item = Address>,
) -> ProviderResult<Vec<(Address, Option<Account>)>>;
/// Iterate over account changesets and return all account addresses that were changed alongside
/// each specific set of blocks.
///
/// NOTE: Get inclusive range of blocks.
fn changed_accounts_and_blocks_with_range(
&self,
range: RangeInclusive<BlockNumber>,
) -> ProviderResult<BTreeMap<Address, Vec<BlockNumber>>>;
}
/// `AccountChange` reader
#[auto_impl(&, Arc, Box)]
pub trait ChangeSetReader {
/// Iterate over account changesets and return the account state from before this block.
fn account_block_changeset(
&self,
block_number: BlockNumber,
) -> ProviderResult<Vec<AccountBeforeTx>>;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/libmdbx-rs/mdbx-sys/build.rs | crates/storage/libmdbx-rs/mdbx-sys/build.rs | use std::{
env,
path::{Path, PathBuf},
};
fn main() {
let manifest_dir = PathBuf::from(env::var("CARGO_MANIFEST_DIR").unwrap());
let mdbx = manifest_dir.join("libmdbx");
println!("cargo:rerun-if-changed={}", mdbx.display());
let bindings = PathBuf::from(std::env::var("OUT_DIR").unwrap()).join("bindings.rs");
generate_bindings(&mdbx, &bindings);
let mut cc = cc::Build::new();
cc.flag_if_supported("-Wno-unused-parameter").flag_if_supported("-Wuninitialized");
if env::var("CARGO_CFG_TARGET_OS").unwrap() != "linux" {
cc.flag_if_supported("-Wbad-function-cast");
}
let flags = format!("{:?}", cc.get_compiler().cflags_env());
cc.define("MDBX_BUILD_FLAGS", flags.as_str()).define("MDBX_TXN_CHECKOWNER", "0");
// Enable debugging on debug builds
#[cfg(debug_assertions)]
cc.define("MDBX_DEBUG", "1").define("MDBX_ENABLE_PROFGC", "1");
// Disables debug logging on optimized builds
#[cfg(not(debug_assertions))]
cc.define("MDBX_DEBUG", "0").define("NDEBUG", None);
// Propagate `-C target-cpu=native`
let rustflags = env::var("CARGO_ENCODED_RUSTFLAGS").unwrap();
if rustflags.contains("target-cpu=native") &&
env::var("CARGO_CFG_TARGET_ENV").unwrap() != "msvc"
{
cc.flag("-march=native");
}
cc.file(mdbx.join("mdbx.c")).compile("libmdbx.a");
}
fn generate_bindings(mdbx: &Path, out_file: &Path) {
use bindgen::{
callbacks::{IntKind, ParseCallbacks},
Formatter,
};
#[derive(Debug)]
struct Callbacks;
impl ParseCallbacks for Callbacks {
fn int_macro(&self, name: &str, _value: i64) -> Option<IntKind> {
match name {
"MDBX_SUCCESS" |
"MDBX_KEYEXIST" |
"MDBX_NOTFOUND" |
"MDBX_PAGE_NOTFOUND" |
"MDBX_CORRUPTED" |
"MDBX_PANIC" |
"MDBX_VERSION_MISMATCH" |
"MDBX_INVALID" |
"MDBX_MAP_FULL" |
"MDBX_DBS_FULL" |
"MDBX_READERS_FULL" |
"MDBX_TLS_FULL" |
"MDBX_TXN_FULL" |
"MDBX_CURSOR_FULL" |
"MDBX_PAGE_FULL" |
"MDBX_MAP_RESIZED" |
"MDBX_INCOMPATIBLE" |
"MDBX_BAD_RSLOT" |
"MDBX_BAD_TXN" |
"MDBX_BAD_VALSIZE" |
"MDBX_BAD_DBI" |
"MDBX_LOG_DONTCHANGE" |
"MDBX_DBG_DONTCHANGE" |
"MDBX_RESULT_TRUE" |
"MDBX_UNABLE_EXTEND_MAPSIZE" |
"MDBX_PROBLEM" |
"MDBX_LAST_LMDB_ERRCODE" |
"MDBX_BUSY" |
"MDBX_EMULTIVAL" |
"MDBX_EBADSIGN" |
"MDBX_WANNA_RECOVERY" |
"MDBX_EKEYMISMATCH" |
"MDBX_TOO_LARGE" |
"MDBX_THREAD_MISMATCH" |
"MDBX_TXN_OVERLAPPING" |
"MDBX_LAST_ERRCODE" => Some(IntKind::Int),
_ => Some(IntKind::UInt),
}
}
}
let bindings = bindgen::Builder::default()
.header(mdbx.join("mdbx.h").to_string_lossy())
.allowlist_var("^(MDBX|mdbx)_.*")
.allowlist_type("^(MDBX|mdbx)_.*")
.allowlist_function("^(MDBX|mdbx)_.*")
.size_t_is_usize(true)
.merge_extern_blocks(true)
.parse_callbacks(Box::new(Callbacks))
.layout_tests(false)
.prepend_enum_name(false)
.generate_comments(false)
.formatter(Formatter::Rustfmt)
.generate()
.expect("Unable to generate bindings");
bindings.write_to_file(out_file).expect("Couldn't write bindings!");
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/libmdbx-rs/mdbx-sys/src/lib.rs | crates/storage/libmdbx-rs/mdbx-sys/src/lib.rs | //! [`libmdbx`](https://github.com/erthink/libmdbx) bindings.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![allow(non_upper_case_globals, non_camel_case_types, non_snake_case, clippy::all)]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
include!(concat!(env!("OUT_DIR"), "/bindings.rs"));
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/libmdbx-rs/src/cursor.rs | crates/storage/libmdbx-rs/src/cursor.rs | use crate::{
error::{mdbx_result, Error, Result},
flags::*,
mdbx_try_optional,
transaction::{TransactionKind, RW},
TableObject, Transaction,
};
use ffi::{
MDBX_cursor_op, MDBX_FIRST, MDBX_FIRST_DUP, MDBX_GET_BOTH, MDBX_GET_BOTH_RANGE,
MDBX_GET_CURRENT, MDBX_GET_MULTIPLE, MDBX_LAST, MDBX_LAST_DUP, MDBX_NEXT, MDBX_NEXT_DUP,
MDBX_NEXT_MULTIPLE, MDBX_NEXT_NODUP, MDBX_PREV, MDBX_PREV_DUP, MDBX_PREV_MULTIPLE,
MDBX_PREV_NODUP, MDBX_SET, MDBX_SET_KEY, MDBX_SET_LOWERBOUND, MDBX_SET_RANGE,
};
use std::{borrow::Cow, ffi::c_void, fmt, marker::PhantomData, mem, ptr};
/// A cursor for navigating the items within a database.
pub struct Cursor<K>
where
K: TransactionKind,
{
txn: Transaction<K>,
cursor: *mut ffi::MDBX_cursor,
}
impl<K> Cursor<K>
where
K: TransactionKind,
{
pub(crate) fn new(txn: Transaction<K>, dbi: ffi::MDBX_dbi) -> Result<Self> {
let mut cursor: *mut ffi::MDBX_cursor = ptr::null_mut();
unsafe {
txn.txn_execute(|txn_ptr| {
mdbx_result(ffi::mdbx_cursor_open(txn_ptr, dbi, &mut cursor))
})??;
}
Ok(Self { txn, cursor })
}
fn new_at_position(other: &Self) -> Result<Self> {
unsafe {
let cursor = ffi::mdbx_cursor_create(ptr::null_mut());
let res = ffi::mdbx_cursor_copy(other.cursor(), cursor);
let s = Self { txn: other.txn.clone(), cursor };
mdbx_result(res)?;
Ok(s)
}
}
/// Returns a raw pointer to the underlying MDBX cursor.
///
/// The caller **must** ensure that the pointer is not used after the
/// lifetime of the cursor.
pub const fn cursor(&self) -> *mut ffi::MDBX_cursor {
self.cursor
}
/// Returns an iterator over the raw key value slices.
pub fn iter_slices<'a>(self) -> IntoIter<K, Cow<'a, [u8]>, Cow<'a, [u8]>> {
self.into_iter()
}
/// Returns an iterator over database items.
#[expect(clippy::should_implement_trait)]
pub fn into_iter<Key, Value>(self) -> IntoIter<K, Key, Value>
where
Key: TableObject,
Value: TableObject,
{
IntoIter::new(self, MDBX_NEXT, MDBX_NEXT)
}
/// Retrieves a key/data pair from the cursor. Depending on the cursor op,
/// the current key may be returned.
fn get<Key, Value>(
&self,
key: Option<&[u8]>,
data: Option<&[u8]>,
op: MDBX_cursor_op,
) -> Result<(Option<Key>, Value, bool)>
where
Key: TableObject,
Value: TableObject,
{
unsafe {
let mut key_val = slice_to_val(key);
let mut data_val = slice_to_val(data);
let key_ptr = key_val.iov_base;
let data_ptr = data_val.iov_base;
self.txn.txn_execute(|txn| {
let v = mdbx_result(ffi::mdbx_cursor_get(
self.cursor,
&mut key_val,
&mut data_val,
op,
))?;
assert_ne!(data_ptr, data_val.iov_base);
let key_out = {
// MDBX wrote in new key
if ptr::eq(key_ptr, key_val.iov_base) {
None
} else {
Some(Key::decode_val::<K>(txn, key_val)?)
}
};
let data_out = Value::decode_val::<K>(txn, data_val)?;
Ok((key_out, data_out, v))
})?
}
}
fn get_value<Value>(
&mut self,
key: Option<&[u8]>,
data: Option<&[u8]>,
op: MDBX_cursor_op,
) -> Result<Option<Value>>
where
Value: TableObject,
{
let (_, v, _) = mdbx_try_optional!(self.get::<(), Value>(key, data, op));
Ok(Some(v))
}
fn get_full<Key, Value>(
&mut self,
key: Option<&[u8]>,
data: Option<&[u8]>,
op: MDBX_cursor_op,
) -> Result<Option<(Key, Value)>>
where
Key: TableObject,
Value: TableObject,
{
let (k, v, _) = mdbx_try_optional!(self.get(key, data, op));
Ok(Some((k.unwrap(), v)))
}
/// Position at first key/data item.
pub fn first<Key, Value>(&mut self) -> Result<Option<(Key, Value)>>
where
Key: TableObject,
Value: TableObject,
{
self.get_full(None, None, MDBX_FIRST)
}
/// [`DatabaseFlags::DUP_SORT`]-only: Position at first data item of current key.
pub fn first_dup<Value>(&mut self) -> Result<Option<Value>>
where
Value: TableObject,
{
self.get_value(None, None, MDBX_FIRST_DUP)
}
/// [`DatabaseFlags::DUP_SORT`]-only: Position at key/data pair.
pub fn get_both<Value>(&mut self, k: &[u8], v: &[u8]) -> Result<Option<Value>>
where
Value: TableObject,
{
self.get_value(Some(k), Some(v), MDBX_GET_BOTH)
}
/// [`DatabaseFlags::DUP_SORT`]-only: Position at given key and at first data greater than or
/// equal to specified data.
pub fn get_both_range<Value>(&mut self, k: &[u8], v: &[u8]) -> Result<Option<Value>>
where
Value: TableObject,
{
self.get_value(Some(k), Some(v), MDBX_GET_BOTH_RANGE)
}
/// Return key/data at current cursor position.
pub fn get_current<Key, Value>(&mut self) -> Result<Option<(Key, Value)>>
where
Key: TableObject,
Value: TableObject,
{
self.get_full(None, None, MDBX_GET_CURRENT)
}
/// DupFixed-only: Return up to a page of duplicate data items from current cursor position.
/// Move cursor to prepare for [`Self::next_multiple()`].
pub fn get_multiple<Value>(&mut self) -> Result<Option<Value>>
where
Value: TableObject,
{
self.get_value(None, None, MDBX_GET_MULTIPLE)
}
/// Position at last key/data item.
pub fn last<Key, Value>(&mut self) -> Result<Option<(Key, Value)>>
where
Key: TableObject,
Value: TableObject,
{
self.get_full(None, None, MDBX_LAST)
}
/// DupSort-only: Position at last data item of current key.
pub fn last_dup<Value>(&mut self) -> Result<Option<Value>>
where
Value: TableObject,
{
self.get_value(None, None, MDBX_LAST_DUP)
}
/// Position at next data item
#[expect(clippy::should_implement_trait)]
pub fn next<Key, Value>(&mut self) -> Result<Option<(Key, Value)>>
where
Key: TableObject,
Value: TableObject,
{
self.get_full(None, None, MDBX_NEXT)
}
/// [`DatabaseFlags::DUP_SORT`]-only: Position at next data item of current key.
pub fn next_dup<Key, Value>(&mut self) -> Result<Option<(Key, Value)>>
where
Key: TableObject,
Value: TableObject,
{
self.get_full(None, None, MDBX_NEXT_DUP)
}
/// [`DatabaseFlags::DUP_FIXED`]-only: Return up to a page of duplicate data items from next
/// cursor position. Move cursor to prepare for `MDBX_NEXT_MULTIPLE`.
pub fn next_multiple<Key, Value>(&mut self) -> Result<Option<(Key, Value)>>
where
Key: TableObject,
Value: TableObject,
{
self.get_full(None, None, MDBX_NEXT_MULTIPLE)
}
/// Position at first data item of next key.
pub fn next_nodup<Key, Value>(&mut self) -> Result<Option<(Key, Value)>>
where
Key: TableObject,
Value: TableObject,
{
self.get_full(None, None, MDBX_NEXT_NODUP)
}
/// Position at previous data item.
pub fn prev<Key, Value>(&mut self) -> Result<Option<(Key, Value)>>
where
Key: TableObject,
Value: TableObject,
{
self.get_full(None, None, MDBX_PREV)
}
/// [`DatabaseFlags::DUP_SORT`]-only: Position at previous data item of current key.
pub fn prev_dup<Key, Value>(&mut self) -> Result<Option<(Key, Value)>>
where
Key: TableObject,
Value: TableObject,
{
self.get_full(None, None, MDBX_PREV_DUP)
}
/// Position at last data item of previous key.
pub fn prev_nodup<Key, Value>(&mut self) -> Result<Option<(Key, Value)>>
where
Key: TableObject,
Value: TableObject,
{
self.get_full(None, None, MDBX_PREV_NODUP)
}
/// Position at specified key.
pub fn set<Value>(&mut self, key: &[u8]) -> Result<Option<Value>>
where
Value: TableObject,
{
self.get_value(Some(key), None, MDBX_SET)
}
/// Position at specified key, return both key and data.
pub fn set_key<Key, Value>(&mut self, key: &[u8]) -> Result<Option<(Key, Value)>>
where
Key: TableObject,
Value: TableObject,
{
self.get_full(Some(key), None, MDBX_SET_KEY)
}
/// Position at first key greater than or equal to specified key.
pub fn set_range<Key, Value>(&mut self, key: &[u8]) -> Result<Option<(Key, Value)>>
where
Key: TableObject,
Value: TableObject,
{
self.get_full(Some(key), None, MDBX_SET_RANGE)
}
/// [`DatabaseFlags::DUP_FIXED`]-only: Position at previous page and return up to a page of
/// duplicate data items.
pub fn prev_multiple<Key, Value>(&mut self) -> Result<Option<(Key, Value)>>
where
Key: TableObject,
Value: TableObject,
{
self.get_full(None, None, MDBX_PREV_MULTIPLE)
}
/// Position at first key-value pair greater than or equal to specified, return both key and
/// data, and the return code depends on a exact match.
///
/// For non DupSort-ed collections this works the same as [`Self::set_range()`], but returns
/// [false] if key found exactly and [true] if greater key was found.
///
/// For DupSort-ed a data value is taken into account for duplicates, i.e. for a pairs/tuples of
/// a key and an each data value of duplicates. Returns [false] if key-value pair found
/// exactly and [true] if the next pair was returned.
pub fn set_lowerbound<Key, Value>(&mut self, key: &[u8]) -> Result<Option<(bool, Key, Value)>>
where
Key: TableObject,
Value: TableObject,
{
let (k, v, found) = mdbx_try_optional!(self.get(Some(key), None, MDBX_SET_LOWERBOUND));
Ok(Some((found, k.unwrap(), v)))
}
/// Returns an iterator over database items.
///
/// The iterator will begin with item next after the cursor, and continue until the end of the
/// database. For new cursors, the iterator will begin with the first item in the database.
///
/// For databases with duplicate data items ([`DatabaseFlags::DUP_SORT`]), the
/// duplicate data items of each key will be returned before moving on to
/// the next key.
pub fn iter<Key, Value>(&mut self) -> Iter<'_, K, Key, Value>
where
Key: TableObject,
Value: TableObject,
{
Iter::new(self, ffi::MDBX_NEXT, ffi::MDBX_NEXT)
}
/// Iterate over database items starting from the beginning of the database.
///
/// For databases with duplicate data items ([`DatabaseFlags::DUP_SORT`]), the
/// duplicate data items of each key will be returned before moving on to
/// the next key.
pub fn iter_start<Key, Value>(&mut self) -> Iter<'_, K, Key, Value>
where
Key: TableObject,
Value: TableObject,
{
Iter::new(self, ffi::MDBX_FIRST, ffi::MDBX_NEXT)
}
/// Iterate over database items starting from the given key.
///
/// For databases with duplicate data items ([`DatabaseFlags::DUP_SORT`]), the
/// duplicate data items of each key will be returned before moving on to
/// the next key.
pub fn iter_from<Key, Value>(&mut self, key: &[u8]) -> Iter<'_, K, Key, Value>
where
Key: TableObject,
Value: TableObject,
{
let res: Result<Option<((), ())>> = self.set_range(key);
if let Err(error) = res {
return Iter::Err(Some(error))
};
Iter::new(self, ffi::MDBX_GET_CURRENT, ffi::MDBX_NEXT)
}
/// Iterate over duplicate database items. The iterator will begin with the
/// item next after the cursor, and continue until the end of the database.
/// Each item will be returned as an iterator of its duplicates.
pub fn iter_dup<Key, Value>(&mut self) -> IterDup<'_, K, Key, Value>
where
Key: TableObject,
Value: TableObject,
{
IterDup::new(self, ffi::MDBX_NEXT)
}
/// Iterate over duplicate database items starting from the beginning of the
/// database. Each item will be returned as an iterator of its duplicates.
pub fn iter_dup_start<Key, Value>(&mut self) -> IterDup<'_, K, Key, Value>
where
Key: TableObject,
Value: TableObject,
{
IterDup::new(self, ffi::MDBX_FIRST)
}
/// Iterate over duplicate items in the database starting from the given
/// key. Each item will be returned as an iterator of its duplicates.
pub fn iter_dup_from<Key, Value>(&mut self, key: &[u8]) -> IterDup<'_, K, Key, Value>
where
Key: TableObject,
Value: TableObject,
{
let res: Result<Option<((), ())>> = self.set_range(key);
if let Err(error) = res {
return IterDup::Err(Some(error))
};
IterDup::new(self, ffi::MDBX_GET_CURRENT)
}
/// Iterate over the duplicates of the item in the database with the given key.
pub fn iter_dup_of<Key, Value>(&mut self, key: &[u8]) -> Iter<'_, K, Key, Value>
where
Key: TableObject,
Value: TableObject,
{
let res: Result<Option<()>> = self.set(key);
match res {
Ok(Some(_)) => (),
Ok(None) => {
let _: Result<Option<((), ())>> = self.last();
return Iter::new(self, ffi::MDBX_NEXT, ffi::MDBX_NEXT)
}
Err(error) => return Iter::Err(Some(error)),
};
Iter::new(self, ffi::MDBX_GET_CURRENT, ffi::MDBX_NEXT_DUP)
}
}
impl Cursor<RW> {
/// Puts a key/data pair into the database. The cursor will be positioned at
/// the new data item, or on failure usually near it.
pub fn put(&mut self, key: &[u8], data: &[u8], flags: WriteFlags) -> Result<()> {
let key_val: ffi::MDBX_val =
ffi::MDBX_val { iov_len: key.len(), iov_base: key.as_ptr() as *mut c_void };
let mut data_val: ffi::MDBX_val =
ffi::MDBX_val { iov_len: data.len(), iov_base: data.as_ptr() as *mut c_void };
mdbx_result(unsafe {
self.txn.txn_execute(|_| {
ffi::mdbx_cursor_put(self.cursor, &key_val, &mut data_val, flags.bits())
})?
})?;
Ok(())
}
/// Deletes the current key/data pair.
///
/// ### Flags
///
/// [`WriteFlags::NO_DUP_DATA`] may be used to delete all data items for the
/// current key, if the database was opened with [`DatabaseFlags::DUP_SORT`].
pub fn del(&mut self, flags: WriteFlags) -> Result<()> {
mdbx_result(unsafe {
self.txn.txn_execute(|_| ffi::mdbx_cursor_del(self.cursor, flags.bits()))?
})?;
Ok(())
}
}
impl<K> Clone for Cursor<K>
where
K: TransactionKind,
{
fn clone(&self) -> Self {
self.txn.txn_execute(|_| Self::new_at_position(self).unwrap()).unwrap()
}
}
impl<K> fmt::Debug for Cursor<K>
where
K: TransactionKind,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Cursor").finish_non_exhaustive()
}
}
impl<K> Drop for Cursor<K>
where
K: TransactionKind,
{
fn drop(&mut self) {
// To be able to close a cursor of a timed out transaction, we need to renew it first.
// Hence the usage of `txn_execute_renew_on_timeout` here.
let _ = self
.txn
.txn_execute_renew_on_timeout(|_| unsafe { ffi::mdbx_cursor_close(self.cursor) });
}
}
const unsafe fn slice_to_val(slice: Option<&[u8]>) -> ffi::MDBX_val {
match slice {
Some(slice) => {
ffi::MDBX_val { iov_len: slice.len(), iov_base: slice.as_ptr() as *mut c_void }
}
None => ffi::MDBX_val { iov_len: 0, iov_base: ptr::null_mut() },
}
}
unsafe impl<K> Send for Cursor<K> where K: TransactionKind {}
unsafe impl<K> Sync for Cursor<K> where K: TransactionKind {}
/// An iterator over the key/value pairs in an MDBX database.
#[derive(Debug)]
pub enum IntoIter<K, Key, Value>
where
K: TransactionKind,
Key: TableObject,
Value: TableObject,
{
/// An iterator that returns an error on every call to [`Iter::next()`].
/// Cursor.iter*() creates an Iter of this type when MDBX returns an error
/// on retrieval of a cursor. Using this variant instead of returning
/// an error makes `Cursor.iter()`* methods infallible, so consumers only
/// need to check the result of `Iter.next()`.
Err(Option<Error>),
/// An iterator that returns an Item on calls to [`Iter::next()`].
/// The Item is a [Result], so this variant
/// might still return an error, if retrieval of the key/value pair
/// fails for some reason.
Ok {
/// The MDBX cursor with which to iterate.
cursor: Cursor<K>,
/// The first operation to perform when the consumer calls [`Iter::next()`].
op: ffi::MDBX_cursor_op,
/// The next and subsequent operations to perform.
next_op: ffi::MDBX_cursor_op,
_marker: PhantomData<(Key, Value)>,
},
}
impl<K, Key, Value> IntoIter<K, Key, Value>
where
K: TransactionKind,
Key: TableObject,
Value: TableObject,
{
/// Creates a new iterator backed by the given cursor.
fn new(cursor: Cursor<K>, op: ffi::MDBX_cursor_op, next_op: ffi::MDBX_cursor_op) -> Self {
Self::Ok { cursor, op, next_op, _marker: Default::default() }
}
}
impl<K, Key, Value> Iterator for IntoIter<K, Key, Value>
where
K: TransactionKind,
Key: TableObject,
Value: TableObject,
{
type Item = Result<(Key, Value)>;
fn next(&mut self) -> Option<Self::Item> {
match self {
Self::Ok { cursor, op, next_op, .. } => {
let mut key = ffi::MDBX_val { iov_len: 0, iov_base: ptr::null_mut() };
let mut data = ffi::MDBX_val { iov_len: 0, iov_base: ptr::null_mut() };
let op = mem::replace(op, *next_op);
unsafe {
let result = cursor.txn.txn_execute(|txn| {
match ffi::mdbx_cursor_get(cursor.cursor(), &mut key, &mut data, op) {
ffi::MDBX_SUCCESS => {
let key = match Key::decode_val::<K>(txn, key) {
Ok(v) => v,
Err(e) => return Some(Err(e)),
};
let data = match Value::decode_val::<K>(txn, data) {
Ok(v) => v,
Err(e) => return Some(Err(e)),
};
Some(Ok((key, data)))
}
// MDBX_ENODATA can occur when the cursor was previously sought to a
// non-existent value, e.g. iter_from with a
// key greater than all values in the database.
ffi::MDBX_NOTFOUND | ffi::MDBX_ENODATA => None,
error => Some(Err(Error::from_err_code(error))),
}
});
match result {
Ok(result) => result,
Err(err) => Some(Err(err)),
}
}
}
Self::Err(err) => err.take().map(Err),
}
}
}
/// An iterator over the key/value pairs in an MDBX database.
#[derive(Debug)]
pub enum Iter<'cur, K, Key, Value>
where
K: TransactionKind,
Key: TableObject,
Value: TableObject,
{
/// An iterator that returns an error on every call to [`Iter::next()`].
/// Cursor.iter*() creates an Iter of this type when MDBX returns an error
/// on retrieval of a cursor. Using this variant instead of returning
/// an error makes `Cursor.iter()`* methods infallible, so consumers only
/// need to check the result of `Iter.next()`.
Err(Option<Error>),
/// An iterator that returns an Item on calls to [`Iter::next()`].
/// The Item is a [Result], so this variant
/// might still return an error, if retrieval of the key/value pair
/// fails for some reason.
Ok {
/// The MDBX cursor with which to iterate.
cursor: &'cur mut Cursor<K>,
/// The first operation to perform when the consumer calls [`Iter::next()`].
op: ffi::MDBX_cursor_op,
/// The next and subsequent operations to perform.
next_op: ffi::MDBX_cursor_op,
_marker: PhantomData<fn(&'cur (), K, Key, Value)>,
},
}
impl<'cur, K, Key, Value> Iter<'cur, K, Key, Value>
where
K: TransactionKind,
Key: TableObject,
Value: TableObject,
{
/// Creates a new iterator backed by the given cursor.
fn new(
cursor: &'cur mut Cursor<K>,
op: ffi::MDBX_cursor_op,
next_op: ffi::MDBX_cursor_op,
) -> Self {
Iter::Ok { cursor, op, next_op, _marker: Default::default() }
}
}
impl<K, Key, Value> Iterator for Iter<'_, K, Key, Value>
where
K: TransactionKind,
Key: TableObject,
Value: TableObject,
{
type Item = Result<(Key, Value)>;
fn next(&mut self) -> Option<Self::Item> {
match self {
Iter::Ok { cursor, op, next_op, .. } => {
let mut key = ffi::MDBX_val { iov_len: 0, iov_base: ptr::null_mut() };
let mut data = ffi::MDBX_val { iov_len: 0, iov_base: ptr::null_mut() };
let op = mem::replace(op, *next_op);
unsafe {
let result = cursor.txn.txn_execute(|txn| {
match ffi::mdbx_cursor_get(cursor.cursor(), &mut key, &mut data, op) {
ffi::MDBX_SUCCESS => {
let key = match Key::decode_val::<K>(txn, key) {
Ok(v) => v,
Err(e) => return Some(Err(e)),
};
let data = match Value::decode_val::<K>(txn, data) {
Ok(v) => v,
Err(e) => return Some(Err(e)),
};
Some(Ok((key, data)))
}
// MDBX_NODATA can occur when the cursor was previously sought to a
// non-existent value, e.g. iter_from with a
// key greater than all values in the database.
ffi::MDBX_NOTFOUND | ffi::MDBX_ENODATA => None,
error => Some(Err(Error::from_err_code(error))),
}
});
match result {
Ok(result) => result,
Err(err) => Some(Err(err)),
}
}
}
Iter::Err(err) => err.take().map(Err),
}
}
}
/// An iterator over the keys and duplicate values in an MDBX database.
///
/// The yielded items of the iterator are themselves iterators over the duplicate values for a
/// specific key.
pub enum IterDup<'cur, K, Key, Value>
where
K: TransactionKind,
Key: TableObject,
Value: TableObject,
{
/// An iterator that returns an error on every call to `Iter.next()`.
/// Cursor.iter*() creates an Iter of this type when MDBX returns an error
/// on retrieval of a cursor. Using this variant instead of returning
/// an error makes `Cursor.iter()`* methods infallible, so consumers only
/// need to check the result of `Iter.next()`.
Err(Option<Error>),
/// An iterator that returns an Item on calls to `Iter.next()`.
/// The Item is a Result<(&'txn [u8], &'txn [u8])>, so this variant
/// might still return an error, if retrieval of the key/value pair
/// fails for some reason.
Ok {
/// The MDBX cursor with which to iterate.
cursor: &'cur mut Cursor<K>,
/// The first operation to perform when the consumer calls `Iter.next()`.
op: MDBX_cursor_op,
_marker: PhantomData<fn(&'cur (Key, Value))>,
},
}
impl<'cur, K, Key, Value> IterDup<'cur, K, Key, Value>
where
K: TransactionKind,
Key: TableObject,
Value: TableObject,
{
/// Creates a new iterator backed by the given cursor.
fn new(cursor: &'cur mut Cursor<K>, op: MDBX_cursor_op) -> Self {
IterDup::Ok { cursor, op, _marker: Default::default() }
}
}
impl<K, Key, Value> fmt::Debug for IterDup<'_, K, Key, Value>
where
K: TransactionKind,
Key: TableObject,
Value: TableObject,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("IterDup").finish()
}
}
impl<K, Key, Value> Iterator for IterDup<'_, K, Key, Value>
where
K: TransactionKind,
Key: TableObject,
Value: TableObject,
{
type Item = IntoIter<K, Key, Value>;
fn next(&mut self) -> Option<Self::Item> {
match self {
IterDup::Ok { cursor, op, .. } => {
let mut key = ffi::MDBX_val { iov_len: 0, iov_base: ptr::null_mut() };
let mut data = ffi::MDBX_val { iov_len: 0, iov_base: ptr::null_mut() };
let op = mem::replace(op, ffi::MDBX_NEXT_NODUP);
let result = cursor.txn.txn_execute(|_| {
let err_code =
unsafe { ffi::mdbx_cursor_get(cursor.cursor(), &mut key, &mut data, op) };
(err_code == ffi::MDBX_SUCCESS).then(|| {
IntoIter::new(
Cursor::new_at_position(&**cursor).unwrap(),
ffi::MDBX_GET_CURRENT,
ffi::MDBX_NEXT_DUP,
)
})
});
match result {
Ok(result) => result,
Err(err) => Some(IntoIter::Err(Some(err))),
}
}
IterDup::Err(err) => err.take().map(|e| IntoIter::Err(Some(e))),
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/libmdbx-rs/src/lib.rs | crates/storage/libmdbx-rs/src/lib.rs | #![doc = include_str!("../README.md")]
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![allow(missing_docs, clippy::needless_pass_by_ref_mut)]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![allow(clippy::borrow_as_ptr)]
pub extern crate reth_mdbx_sys as ffi;
pub use crate::{
codec::*,
cursor::{Cursor, Iter, IterDup},
database::Database,
environment::{
Environment, EnvironmentBuilder, EnvironmentKind, Geometry, HandleSlowReadersCallback,
HandleSlowReadersReturnCode, Info, PageSize, Stat,
},
error::{Error, Result},
flags::*,
transaction::{CommitLatency, Transaction, TransactionKind, RO, RW},
};
#[cfg(feature = "read-tx-timeouts")]
pub use crate::environment::read_transactions::MaxReadTransactionDuration;
mod codec;
mod cursor;
mod database;
mod environment;
mod error;
mod flags;
mod transaction;
mod txn_manager;
#[cfg(test)]
mod test_utils {
use super::*;
use byteorder::{ByteOrder, LittleEndian};
use tempfile::tempdir;
/// Regression test for <https://github.com/danburkert/lmdb-rs/issues/21>.
/// This test reliably segfaults when run against lmdb compiled with opt level -O3 and newer
/// GCC compilers.
#[test]
fn issue_21_regression() {
const HEIGHT_KEY: [u8; 1] = [0];
let dir = tempdir().unwrap();
let env = {
let mut builder = Environment::builder();
builder.set_max_dbs(2);
builder
.set_geometry(Geometry { size: Some(1_000_000..1_000_000), ..Default::default() });
builder.open(dir.path()).expect("open mdbx env")
};
for height in 0..1000 {
let mut value = [0u8; 8];
LittleEndian::write_u64(&mut value, height);
let tx = env.begin_rw_txn().expect("begin_rw_txn");
let index = tx.create_db(None, DatabaseFlags::DUP_SORT).expect("open index db");
tx.put(index.dbi(), HEIGHT_KEY, value, WriteFlags::empty()).expect("tx.put");
tx.commit().expect("tx.commit");
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/libmdbx-rs/src/environment.rs | crates/storage/libmdbx-rs/src/environment.rs | use crate::{
database::Database,
error::{mdbx_result, Error, Result},
flags::EnvironmentFlags,
transaction::{RO, RW},
txn_manager::{TxnManager, TxnManagerMessage, TxnPtr},
Mode, SyncMode, Transaction, TransactionKind,
};
use byteorder::{ByteOrder, NativeEndian};
use mem::size_of;
use std::{
ffi::CString,
fmt::{self, Debug},
mem,
ops::{Bound, RangeBounds},
path::Path,
ptr,
sync::{mpsc::sync_channel, Arc},
thread::sleep,
time::Duration,
};
use tracing::warn;
/// The default maximum duration of a read transaction.
#[cfg(feature = "read-tx-timeouts")]
const DEFAULT_MAX_READ_TRANSACTION_DURATION: Duration = Duration::from_secs(5 * 60);
/// An environment supports multiple databases, all residing in the same shared-memory map.
///
/// Accessing the environment is thread-safe.
/// The environment will be closed when the last instance of this type is dropped.
#[derive(Clone)]
pub struct Environment {
inner: Arc<EnvironmentInner>,
}
impl Environment {
/// Creates a new builder for specifying options for opening an MDBX environment.
pub fn builder() -> EnvironmentBuilder {
EnvironmentBuilder {
flags: EnvironmentFlags::default(),
max_readers: None,
max_dbs: None,
sync_bytes: None,
sync_period: None,
rp_augment_limit: None,
loose_limit: None,
dp_reserve_limit: None,
txn_dp_limit: None,
spill_max_denominator: None,
spill_min_denominator: None,
geometry: None,
log_level: None,
kind: Default::default(),
handle_slow_readers: None,
#[cfg(feature = "read-tx-timeouts")]
max_read_transaction_duration: None,
}
}
/// Returns true if the environment was opened as WRITEMAP.
#[inline]
pub fn is_write_map(&self) -> bool {
self.inner.env_kind.is_write_map()
}
/// Returns the kind of the environment.
#[inline]
pub fn env_kind(&self) -> EnvironmentKind {
self.inner.env_kind
}
/// Returns true if the environment was opened in [`crate::Mode::ReadWrite`] mode.
#[inline]
pub fn is_read_write(&self) -> Result<bool> {
Ok(!self.is_read_only()?)
}
/// Returns true if the environment was opened in [`crate::Mode::ReadOnly`] mode.
#[inline]
pub fn is_read_only(&self) -> Result<bool> {
Ok(matches!(self.info()?.mode(), Mode::ReadOnly))
}
/// Returns the transaction manager.
#[inline]
pub(crate) fn txn_manager(&self) -> &TxnManager {
&self.inner.txn_manager
}
/// Returns the number of timed out transactions that were not aborted by the user yet.
#[cfg(feature = "read-tx-timeouts")]
pub fn timed_out_not_aborted_transactions(&self) -> usize {
self.inner.txn_manager.timed_out_not_aborted_read_transactions().unwrap_or(0)
}
/// Create a read-only transaction for use with the environment.
#[inline]
pub fn begin_ro_txn(&self) -> Result<Transaction<RO>> {
Transaction::new(self.clone())
}
/// Create a read-write transaction for use with the environment. This method will block while
/// there are any other read-write transactions open on the environment.
pub fn begin_rw_txn(&self) -> Result<Transaction<RW>> {
let mut warned = false;
let txn = loop {
let (tx, rx) = sync_channel(0);
self.txn_manager().send_message(TxnManagerMessage::Begin {
parent: TxnPtr(ptr::null_mut()),
flags: RW::OPEN_FLAGS,
sender: tx,
});
let res = rx.recv().unwrap();
if matches!(&res, Err(Error::Busy)) {
if !warned {
warned = true;
warn!(target: "libmdbx", "Process stalled, awaiting read-write transaction lock.");
}
sleep(Duration::from_millis(250));
continue
}
break res
}?;
Ok(Transaction::new_from_ptr(self.clone(), txn.0))
}
/// Returns a raw pointer to the underlying MDBX environment.
///
/// The caller **must** ensure that the pointer is never dereferenced after the environment has
/// been dropped.
#[inline]
pub(crate) fn env_ptr(&self) -> *mut ffi::MDBX_env {
self.inner.env
}
/// Executes the given closure once
///
/// This is only intended to be used when accessing mdbx ffi functions directly is required.
///
/// The caller **must** ensure that the pointer is only used within the closure.
#[inline]
#[doc(hidden)]
pub fn with_raw_env_ptr<F, T>(&self, f: F) -> T
where
F: FnOnce(*mut ffi::MDBX_env) -> T,
{
f(self.env_ptr())
}
/// Flush the environment data buffers to disk.
pub fn sync(&self, force: bool) -> Result<bool> {
mdbx_result(unsafe { ffi::mdbx_env_sync_ex(self.env_ptr(), force, false) })
}
/// Retrieves statistics about this environment.
pub fn stat(&self) -> Result<Stat> {
unsafe {
let mut stat = Stat::new();
mdbx_result(ffi::mdbx_env_stat_ex(
self.env_ptr(),
ptr::null(),
stat.mdb_stat(),
size_of::<Stat>(),
))?;
Ok(stat)
}
}
/// Retrieves info about this environment.
pub fn info(&self) -> Result<Info> {
unsafe {
let mut info = Info(mem::zeroed());
mdbx_result(ffi::mdbx_env_info_ex(
self.env_ptr(),
ptr::null(),
&mut info.0,
size_of::<Info>(),
))?;
Ok(info)
}
}
/// Retrieves the total number of pages on the freelist.
///
/// Along with [`Environment::info()`], this can be used to calculate the exact number
/// of used pages as well as free pages in this environment.
///
/// ```
/// # use reth_libmdbx::Environment;
/// let dir = tempfile::tempdir().unwrap();
/// let env = Environment::builder().open(dir.path()).unwrap();
/// let info = env.info().unwrap();
/// let stat = env.stat().unwrap();
/// let freelist = env.freelist().unwrap();
/// let last_pgno = info.last_pgno() + 1; // pgno is 0 based.
/// let total_pgs = info.map_size() / stat.page_size() as usize;
/// let pgs_in_use = last_pgno - freelist;
/// let pgs_free = total_pgs - pgs_in_use;
/// ```
///
/// Note:
///
/// * MDBX stores all the freelists in the designated database 0 in each environment, and the
/// freelist count is stored at the beginning of the value as `uint32_t` in the native byte
/// order.
///
/// * It will create a read transaction to traverse the freelist database.
pub fn freelist(&self) -> Result<usize> {
let mut freelist: usize = 0;
let txn = self.begin_ro_txn()?;
let db = Database::freelist_db();
let cursor = txn.cursor(&db)?;
for result in cursor.iter_slices() {
let (_key, value) = result?;
if value.len() < size_of::<usize>() {
return Err(Error::Corrupted)
}
let s = &value[..size_of::<usize>()];
freelist += NativeEndian::read_u32(s) as usize;
}
Ok(freelist)
}
}
/// Container type for Environment internals.
///
/// This holds the raw pointer to the MDBX environment and the transaction manager.
/// The env is opened via [`mdbx_env_create`](ffi::mdbx_env_create) and closed when this type drops.
struct EnvironmentInner {
/// The raw pointer to the MDBX environment.
///
/// Accessing the environment is thread-safe as long as long as this type exists.
env: *mut ffi::MDBX_env,
/// Whether the environment was opened as WRITEMAP.
env_kind: EnvironmentKind,
/// Transaction manager
txn_manager: TxnManager,
}
impl Drop for EnvironmentInner {
fn drop(&mut self) {
// Close open mdbx environment on drop
unsafe {
ffi::mdbx_env_close_ex(self.env, false);
}
}
}
// SAFETY: internal type, only used inside [Environment]. Accessing the environment pointer is
// thread-safe
unsafe impl Send for EnvironmentInner {}
unsafe impl Sync for EnvironmentInner {}
/// Determines how data is mapped into memory
///
/// It only takes effect when the environment is opened.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
pub enum EnvironmentKind {
/// Open the environment in default mode, without WRITEMAP.
#[default]
Default,
/// Open the environment as mdbx-WRITEMAP.
/// Use a writeable memory map unless the environment is opened as `MDBX_RDONLY`
/// ([`crate::Mode::ReadOnly`]).
///
/// All data will be mapped into memory in the read-write mode [`crate::Mode::ReadWrite`]. This
/// offers a significant performance benefit, since the data will be modified directly in
/// mapped memory and then flushed to disk by single system call, without any memory
/// management nor copying.
///
/// This mode is incompatible with nested transactions.
WriteMap,
}
impl EnvironmentKind {
/// Returns true if the environment was opened as WRITEMAP.
#[inline]
pub const fn is_write_map(&self) -> bool {
matches!(self, Self::WriteMap)
}
/// Additional flags required when opening the environment.
pub(crate) const fn extra_flags(&self) -> ffi::MDBX_env_flags_t {
match self {
Self::Default => ffi::MDBX_ENV_DEFAULTS,
Self::WriteMap => ffi::MDBX_WRITEMAP,
}
}
}
#[derive(Copy, Clone, Debug)]
pub(crate) struct EnvPtr(pub(crate) *mut ffi::MDBX_env);
unsafe impl Send for EnvPtr {}
unsafe impl Sync for EnvPtr {}
/// Environment statistics.
///
/// Contains information about the size and layout of an MDBX environment or database.
#[derive(Debug)]
#[repr(transparent)]
pub struct Stat(ffi::MDBX_stat);
impl Stat {
/// Create a new Stat with zero'd inner struct `ffi::MDB_stat`.
pub(crate) const fn new() -> Self {
unsafe { Self(mem::zeroed()) }
}
/// Returns a mut pointer to `ffi::MDB_stat`.
pub(crate) const fn mdb_stat(&mut self) -> *mut ffi::MDBX_stat {
&mut self.0
}
}
impl Stat {
/// Size of a database page. This is the same for all databases in the environment.
#[inline]
pub const fn page_size(&self) -> u32 {
self.0.ms_psize
}
/// Depth (height) of the B-tree.
#[inline]
pub const fn depth(&self) -> u32 {
self.0.ms_depth
}
/// Number of internal (non-leaf) pages.
#[inline]
pub const fn branch_pages(&self) -> usize {
self.0.ms_branch_pages as usize
}
/// Number of leaf pages.
#[inline]
pub const fn leaf_pages(&self) -> usize {
self.0.ms_leaf_pages as usize
}
/// Number of overflow pages.
#[inline]
pub const fn overflow_pages(&self) -> usize {
self.0.ms_overflow_pages as usize
}
/// Number of data items.
#[inline]
pub const fn entries(&self) -> usize {
self.0.ms_entries as usize
}
}
#[derive(Debug)]
#[repr(transparent)]
pub struct GeometryInfo(ffi::MDBX_envinfo__bindgen_ty_1);
impl GeometryInfo {
pub const fn min(&self) -> u64 {
self.0.lower
}
}
/// Environment information.
///
/// Contains environment information about the map size, readers, last txn id etc.
#[derive(Debug)]
#[repr(transparent)]
pub struct Info(ffi::MDBX_envinfo);
impl Info {
pub const fn geometry(&self) -> GeometryInfo {
GeometryInfo(self.0.mi_geo)
}
/// Size of memory map.
#[inline]
pub const fn map_size(&self) -> usize {
self.0.mi_mapsize as usize
}
/// Last used page number
#[inline]
pub const fn last_pgno(&self) -> usize {
self.0.mi_last_pgno as usize
}
/// Last transaction ID
#[inline]
pub const fn last_txnid(&self) -> usize {
self.0.mi_recent_txnid as usize
}
/// Max reader slots in the environment
#[inline]
pub const fn max_readers(&self) -> usize {
self.0.mi_maxreaders as usize
}
/// Max reader slots used in the environment
#[inline]
pub const fn num_readers(&self) -> usize {
self.0.mi_numreaders as usize
}
/// Return the internal page ops metrics
#[inline]
pub const fn page_ops(&self) -> PageOps {
PageOps {
newly: self.0.mi_pgop_stat.newly,
cow: self.0.mi_pgop_stat.cow,
clone: self.0.mi_pgop_stat.clone,
split: self.0.mi_pgop_stat.split,
merge: self.0.mi_pgop_stat.merge,
spill: self.0.mi_pgop_stat.spill,
unspill: self.0.mi_pgop_stat.unspill,
wops: self.0.mi_pgop_stat.wops,
prefault: self.0.mi_pgop_stat.prefault,
mincore: self.0.mi_pgop_stat.mincore,
msync: self.0.mi_pgop_stat.msync,
fsync: self.0.mi_pgop_stat.fsync,
}
}
/// Return the mode of the database
#[inline]
pub const fn mode(&self) -> Mode {
let mode = self.0.mi_mode as ffi::MDBX_env_flags_t;
if (mode & ffi::MDBX_RDONLY) != 0 {
Mode::ReadOnly
} else if (mode & ffi::MDBX_UTTERLY_NOSYNC) != 0 {
Mode::ReadWrite { sync_mode: SyncMode::UtterlyNoSync }
} else if (mode & ffi::MDBX_NOMETASYNC) != 0 {
Mode::ReadWrite { sync_mode: SyncMode::NoMetaSync }
} else if (mode & ffi::MDBX_SAFE_NOSYNC) != 0 {
Mode::ReadWrite { sync_mode: SyncMode::SafeNoSync }
} else {
Mode::ReadWrite { sync_mode: SyncMode::Durable }
}
}
}
impl fmt::Debug for Environment {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Environment").field("kind", &self.inner.env_kind).finish_non_exhaustive()
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
// Environment Builder
///////////////////////////////////////////////////////////////////////////////////////////////////
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum PageSize {
MinimalAcceptable,
Set(usize),
}
/// Statistics of page operations overall of all (running, completed and aborted) transactions
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct PageOps {
/// Quantity of a new pages added
pub newly: u64,
/// Quantity of pages copied for update
pub cow: u64,
/// Quantity of parent's dirty pages clones for nested transactions
pub clone: u64,
/// Page splits
pub split: u64,
/// Page merges
pub merge: u64,
/// Quantity of spilled dirty pages
pub spill: u64,
/// Quantity of unspilled/reloaded pages
pub unspill: u64,
/// Number of explicit write operations (not a pages) to a disk
pub wops: u64,
/// Number of explicit msync/flush-to-disk operations
pub msync: u64,
/// Number of explicit fsync/flush-to-disk operations
pub fsync: u64,
/// Number of prefault write operations
pub prefault: u64,
/// Number of `mincore()` calls
pub mincore: u64,
}
/// Represents the geometry settings for the database environment
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct Geometry<R> {
/// The size range in bytes.
pub size: Option<R>,
pub growth_step: Option<isize>,
pub shrink_threshold: Option<isize>,
pub page_size: Option<PageSize>,
}
impl<R> Default for Geometry<R> {
fn default() -> Self {
Self { size: None, growth_step: None, shrink_threshold: None, page_size: None }
}
}
/// Handle-Slow-Readers callback function to resolve database full/overflow issue due to a reader(s)
/// which prevents the old data from being recycled.
///
/// Read transactions prevent reuse of pages freed by newer write transactions, thus the database
/// can grow quickly. This callback will be called when there is not enough space in the database
/// (i.e. before increasing the database size or before `MDBX_MAP_FULL` error) and thus can be
/// used to resolve issues with a "long-lived" read transactions.
///
/// Depending on the arguments and needs, your implementation may wait,
/// terminate a process or thread that is performing a long read, or perform
/// some other action. In doing so it is important that the returned code always
/// corresponds to the performed action.
///
/// # Arguments
///
/// * `process_id` – A process id of the reader process.
/// * `thread_id` – A thread id of the reader thread.
/// * `read_txn_id` – An oldest read transaction number on which stalled.
/// * `gap` – A lag from the last committed txn.
/// * `space` – A space that actually become available for reuse after this reader finished. The
/// callback function can take this value into account to evaluate the impact that a long-running
/// transaction has.
/// * `retry` – A retry number starting from 0. If callback has returned 0 at least once, then at
/// end of current handling loop the callback function will be called additionally with negative
/// `retry` value to notify about the end of loop. The callback function can use this fact to
/// implement timeout reset logic while waiting for a readers.
///
/// # Returns
///
/// A return code that determines the further actions for MDBX and must match the action which
/// was executed by the callback:
/// * `-2` or less – An error condition and the reader was not killed.
/// * `-1` – The callback was unable to solve the problem and agreed on `MDBX_MAP_FULL` error; MDBX
/// should increase the database size or return `MDBX_MAP_FULL` error.
/// * `0` – The callback solved the problem or just waited for a while, libmdbx should rescan the
/// reader lock table and retry. This also includes a situation when corresponding transaction
/// terminated in normal way by `mdbx_txn_abort()` or `mdbx_txn_reset()`, and may be restarted.
/// I.e. reader slot isn't needed to be cleaned from transaction.
/// * `1` – Transaction aborted asynchronous and reader slot should be cleared immediately, i.e.
/// read transaction will not continue but `mdbx_txn_abort()` nor `mdbx_txn_reset()` will be
/// called later.
/// * `2` or greater – The reader process was terminated or killed, and MDBX should entirely reset
/// reader registration.
pub type HandleSlowReadersCallback = extern "C" fn(
env: *const ffi::MDBX_env,
txn: *const ffi::MDBX_txn,
pid: ffi::mdbx_pid_t,
tid: ffi::mdbx_tid_t,
laggard: u64,
gap: std::ffi::c_uint,
space: usize,
retry: std::ffi::c_int,
) -> HandleSlowReadersReturnCode;
#[derive(Debug)]
#[repr(i32)]
pub enum HandleSlowReadersReturnCode {
/// An error condition and the reader was not killed.
Error = -2,
/// The callback was unable to solve the problem and agreed on `MDBX_MAP_FULL` error;
/// MDBX should increase the database size or return `MDBX_MAP_FULL` error.
ProceedWithoutKillingReader = -1,
/// The callback solved the problem or just waited for a while, libmdbx should rescan the
/// reader lock table and retry. This also includes a situation when corresponding transaction
/// terminated in normal way by `mdbx_txn_abort()` or `mdbx_txn_reset()`, and may be restarted.
/// I.e. reader slot isn't needed to be cleaned from transaction.
Success = 0,
/// Transaction aborted asynchronous and reader slot should be cleared immediately, i.e. read
/// transaction will not continue but `mdbx_txn_abort()` nor `mdbx_txn_reset()` will be called
/// later.
ClearReaderSlot = 1,
/// The reader process was terminated or killed, and MDBX should entirely reset reader
/// registration.
ReaderProcessTerminated = 2,
}
/// Options for opening or creating an environment.
#[derive(Debug, Clone)]
pub struct EnvironmentBuilder {
flags: EnvironmentFlags,
max_readers: Option<u64>,
max_dbs: Option<u64>,
sync_bytes: Option<u64>,
sync_period: Option<u64>,
rp_augment_limit: Option<u64>,
loose_limit: Option<u64>,
dp_reserve_limit: Option<u64>,
txn_dp_limit: Option<u64>,
spill_max_denominator: Option<u64>,
spill_min_denominator: Option<u64>,
geometry: Option<Geometry<(Option<usize>, Option<usize>)>>,
log_level: Option<ffi::MDBX_log_level_t>,
kind: EnvironmentKind,
handle_slow_readers: Option<HandleSlowReadersCallback>,
#[cfg(feature = "read-tx-timeouts")]
/// The maximum duration of a read transaction. If [None], but the `read-tx-timeout` feature is
/// enabled, the default value of [`DEFAULT_MAX_READ_TRANSACTION_DURATION`] is used.
max_read_transaction_duration: Option<read_transactions::MaxReadTransactionDuration>,
}
impl EnvironmentBuilder {
/// Open an environment.
///
/// Database files will be opened with 644 permissions.
pub fn open(&self, path: &Path) -> Result<Environment> {
self.open_with_permissions(path, 0o644)
}
/// Open an environment with the provided UNIX permissions.
///
/// The path may not contain the null character.
pub fn open_with_permissions(
&self,
path: &Path,
mode: ffi::mdbx_mode_t,
) -> Result<Environment> {
let mut env: *mut ffi::MDBX_env = ptr::null_mut();
unsafe {
if let Some(log_level) = self.log_level {
// Returns the previously debug_flags in the 0-15 bits and log_level in the
// 16-31 bits, no need to use `mdbx_result`.
ffi::mdbx_setup_debug(log_level, ffi::MDBX_DBG_DONTCHANGE, None);
}
mdbx_result(ffi::mdbx_env_create(&mut env))?;
if let Err(e) = (|| {
if let Some(geometry) = &self.geometry {
let mut min_size = -1;
let mut max_size = -1;
if let Some(size) = geometry.size {
if let Some(size) = size.0 {
min_size = size as isize;
}
if let Some(size) = size.1 {
max_size = size as isize;
}
}
mdbx_result(ffi::mdbx_env_set_geometry(
env,
min_size,
-1,
max_size,
geometry.growth_step.unwrap_or(-1),
geometry.shrink_threshold.unwrap_or(-1),
match geometry.page_size {
None => -1,
Some(PageSize::MinimalAcceptable) => 0,
Some(PageSize::Set(size)) => size as isize,
},
))?;
}
for (opt, v) in [
(ffi::MDBX_opt_max_db, self.max_dbs),
(ffi::MDBX_opt_rp_augment_limit, self.rp_augment_limit),
(ffi::MDBX_opt_loose_limit, self.loose_limit),
(ffi::MDBX_opt_dp_reserve_limit, self.dp_reserve_limit),
(ffi::MDBX_opt_txn_dp_limit, self.txn_dp_limit),
(ffi::MDBX_opt_spill_max_denominator, self.spill_max_denominator),
(ffi::MDBX_opt_spill_min_denominator, self.spill_min_denominator),
] {
if let Some(v) = v {
mdbx_result(ffi::mdbx_env_set_option(env, opt, v))?;
}
}
// set max readers if specified
if let Some(max_readers) = self.max_readers {
mdbx_result(ffi::mdbx_env_set_option(
env,
ffi::MDBX_opt_max_readers,
max_readers,
))?;
}
if let Some(handle_slow_readers) = self.handle_slow_readers {
mdbx_result(ffi::mdbx_env_set_hsr(
env,
convert_hsr_fn(Some(handle_slow_readers)),
))?;
}
#[cfg(unix)]
fn path_to_bytes<P: AsRef<Path>>(path: P) -> Vec<u8> {
use std::os::unix::ffi::OsStrExt;
path.as_ref().as_os_str().as_bytes().to_vec()
}
#[cfg(windows)]
fn path_to_bytes<P: AsRef<Path>>(path: P) -> Vec<u8> {
// On Windows, could use std::os::windows::ffi::OsStrExt to encode_wide(),
// but we end up with a Vec<u16> instead of a Vec<u8>, so that doesn't
// really help.
path.as_ref().to_string_lossy().to_string().into_bytes()
}
let path = match CString::new(path_to_bytes(path)) {
Ok(path) => path,
Err(_) => return Err(Error::Invalid),
};
mdbx_result(ffi::mdbx_env_open(
env,
path.as_ptr(),
self.flags.make_flags() | self.kind.extra_flags(),
mode,
))?;
for (opt, v) in [
(ffi::MDBX_opt_sync_bytes, self.sync_bytes),
(ffi::MDBX_opt_sync_period, self.sync_period),
] {
if let Some(v) = v {
mdbx_result(ffi::mdbx_env_set_option(env, opt, v))?;
}
}
Ok(())
})() {
ffi::mdbx_env_close_ex(env, false);
return Err(e)
}
}
let env_ptr = EnvPtr(env);
#[cfg(not(feature = "read-tx-timeouts"))]
let txn_manager = TxnManager::new(env_ptr);
#[cfg(feature = "read-tx-timeouts")]
let txn_manager = {
if let crate::MaxReadTransactionDuration::Set(duration) = self
.max_read_transaction_duration
.unwrap_or(read_transactions::MaxReadTransactionDuration::Set(
DEFAULT_MAX_READ_TRANSACTION_DURATION,
))
{
TxnManager::new_with_max_read_transaction_duration(env_ptr, duration)
} else {
TxnManager::new(env_ptr)
}
};
let env = EnvironmentInner { env, txn_manager, env_kind: self.kind };
Ok(Environment { inner: Arc::new(env) })
}
/// Configures how this environment will be opened.
pub const fn set_kind(&mut self, kind: EnvironmentKind) -> &mut Self {
self.kind = kind;
self
}
/// Opens the environment with mdbx WRITEMAP
///
/// See also [`EnvironmentKind`]
pub const fn write_map(&mut self) -> &mut Self {
self.set_kind(EnvironmentKind::WriteMap)
}
/// Sets the provided options in the environment.
pub const fn set_flags(&mut self, flags: EnvironmentFlags) -> &mut Self {
self.flags = flags;
self
}
/// Sets the maximum number of threads or reader slots for the environment.
///
/// This defines the number of slots in the lock table that is used to track readers in the
/// environment. The default is 126. Starting a read-only transaction normally ties a lock
/// table slot to the [Transaction] object until it or the [Environment] object is destroyed.
pub const fn set_max_readers(&mut self, max_readers: u64) -> &mut Self {
self.max_readers = Some(max_readers);
self
}
/// Sets the maximum number of named databases for the environment.
///
/// This function is only needed if multiple databases will be used in the
/// environment. Simpler applications that use the environment as a single
/// unnamed database can ignore this option.
///
/// Currently a moderate number of slots are cheap but a huge number gets
/// expensive: 7-120 words per transaction, and every [`Transaction::open_db()`]
/// does a linear search of the opened slots.
pub const fn set_max_dbs(&mut self, v: usize) -> &mut Self {
self.max_dbs = Some(v as u64);
self
}
/// Sets the interprocess/shared threshold to force flush the data buffers to disk, if
/// [`SyncMode::SafeNoSync`] is used.
pub const fn set_sync_bytes(&mut self, v: usize) -> &mut Self {
self.sync_bytes = Some(v as u64);
self
}
/// Sets the interprocess/shared relative period since the last unsteady commit to force flush
/// the data buffers to disk, if [`SyncMode::SafeNoSync`] is used.
pub fn set_sync_period(&mut self, v: Duration) -> &mut Self {
// For this option, mdbx uses units of 1/65536 of a second.
let as_mdbx_units = (v.as_secs_f64() * 65536f64) as u64;
self.sync_period = Some(as_mdbx_units);
self
}
pub const fn set_rp_augment_limit(&mut self, v: u64) -> &mut Self {
self.rp_augment_limit = Some(v);
self
}
pub const fn set_loose_limit(&mut self, v: u64) -> &mut Self {
self.loose_limit = Some(v);
self
}
pub const fn set_dp_reserve_limit(&mut self, v: u64) -> &mut Self {
self.dp_reserve_limit = Some(v);
self
}
pub const fn set_txn_dp_limit(&mut self, v: u64) -> &mut Self {
self.txn_dp_limit = Some(v);
self
}
pub fn set_spill_max_denominator(&mut self, v: u8) -> &mut Self {
self.spill_max_denominator = Some(v.into());
self
}
pub fn set_spill_min_denominator(&mut self, v: u8) -> &mut Self {
self.spill_min_denominator = Some(v.into());
self
}
/// Set all size-related parameters of environment, including page size and the min/max size of
/// the memory map.
pub fn set_geometry<R: RangeBounds<usize>>(&mut self, geometry: Geometry<R>) -> &mut Self {
let convert_bound = |bound: Bound<&usize>| match bound {
Bound::Included(v) | Bound::Excluded(v) => Some(*v),
_ => None,
};
self.geometry = Some(Geometry {
size: geometry.size.map(|range| {
(convert_bound(range.start_bound()), convert_bound(range.end_bound()))
}),
growth_step: geometry.growth_step,
shrink_threshold: geometry.shrink_threshold,
page_size: geometry.page_size,
});
self
}
pub const fn set_log_level(&mut self, log_level: ffi::MDBX_log_level_t) -> &mut Self {
self.log_level = Some(log_level);
self
}
/// Set the Handle-Slow-Readers callback. See [`HandleSlowReadersCallback`] for more
/// information.
pub fn set_handle_slow_readers(&mut self, hsr: HandleSlowReadersCallback) -> &mut Self {
self.handle_slow_readers = Some(hsr);
self
}
}
#[cfg(feature = "read-tx-timeouts")]
pub(crate) mod read_transactions {
use crate::EnvironmentBuilder;
use std::time::Duration;
/// The maximum duration of a read transaction.
#[derive(Debug, Clone, Copy)]
#[cfg(feature = "read-tx-timeouts")]
pub enum MaxReadTransactionDuration {
/// The maximum duration of a read transaction is unbounded.
Unbounded,
/// The maximum duration of a read transaction is set to the given duration.
Set(Duration),
}
#[cfg(feature = "read-tx-timeouts")]
impl MaxReadTransactionDuration {
pub const fn as_duration(&self) -> Option<Duration> {
match self {
Self::Unbounded => None,
Self::Set(duration) => Some(*duration),
}
}
}
impl EnvironmentBuilder {
/// Set the maximum time a read-only transaction can be open.
pub const fn set_max_read_transaction_duration(
&mut self,
max_read_transaction_duration: MaxReadTransactionDuration,
) -> &mut Self {
self.max_read_transaction_duration = Some(max_read_transaction_duration);
self
}
}
}
/// Converts a [`HandleSlowReadersCallback`] to the actual FFI function pointer.
fn convert_hsr_fn(callback: Option<HandleSlowReadersCallback>) -> ffi::MDBX_hsr_func {
unsafe { std::mem::transmute(callback) }
}
#[cfg(test)]
mod tests {
use crate::{Environment, Error, Geometry, HandleSlowReadersReturnCode, PageSize, WriteFlags};
use std::{
ops::RangeInclusive,
sync::atomic::{AtomicBool, Ordering},
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | true |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/libmdbx-rs/src/codec.rs | crates/storage/libmdbx-rs/src/codec.rs | use crate::{Error, TransactionKind};
use derive_more::{Debug, Deref, DerefMut};
use std::{borrow::Cow, slice};
/// Implement this to be able to decode data values
pub trait TableObject: Sized {
/// Decodes the object from the given bytes.
fn decode(data_val: &[u8]) -> Result<Self, Error>;
/// Decodes the value directly from the given MDBX_val pointer.
///
/// # Safety
///
/// This should only in the context of an MDBX transaction.
#[doc(hidden)]
unsafe fn decode_val<K: TransactionKind>(
_: *const ffi::MDBX_txn,
data_val: ffi::MDBX_val,
) -> Result<Self, Error> {
let s = slice::from_raw_parts(data_val.iov_base as *const u8, data_val.iov_len);
Self::decode(s)
}
}
impl TableObject for Cow<'_, [u8]> {
fn decode(_: &[u8]) -> Result<Self, Error> {
unreachable!()
}
#[doc(hidden)]
unsafe fn decode_val<K: TransactionKind>(
_txn: *const ffi::MDBX_txn,
data_val: ffi::MDBX_val,
) -> Result<Self, Error> {
let s = slice::from_raw_parts(data_val.iov_base as *const u8, data_val.iov_len);
#[cfg(feature = "return-borrowed")]
{
Ok(Cow::Borrowed(s))
}
#[cfg(not(feature = "return-borrowed"))]
{
let is_dirty = (!K::IS_READ_ONLY) &&
crate::error::mdbx_result(ffi::mdbx_is_dirty(_txn, data_val.iov_base))?;
Ok(if is_dirty { Cow::Owned(s.to_vec()) } else { Cow::Borrowed(s) })
}
}
}
impl TableObject for Vec<u8> {
fn decode(data_val: &[u8]) -> Result<Self, Error> {
Ok(data_val.to_vec())
}
}
impl TableObject for () {
fn decode(_: &[u8]) -> Result<Self, Error> {
Ok(())
}
unsafe fn decode_val<K: TransactionKind>(
_: *const ffi::MDBX_txn,
_: ffi::MDBX_val,
) -> Result<Self, Error> {
Ok(())
}
}
/// If you don't need the data itself, just its length.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, Deref, DerefMut)]
pub struct ObjectLength(pub usize);
impl TableObject for ObjectLength {
fn decode(data_val: &[u8]) -> Result<Self, Error> {
Ok(Self(data_val.len()))
}
}
impl<const LEN: usize> TableObject for [u8; LEN] {
fn decode(data_val: &[u8]) -> Result<Self, Error> {
if data_val.len() != LEN {
return Err(Error::DecodeErrorLenDiff)
}
let mut a = [0; LEN];
a[..].copy_from_slice(data_val);
Ok(a)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/libmdbx-rs/src/database.rs | crates/storage/libmdbx-rs/src/database.rs | use crate::{
error::{mdbx_result, Result},
transaction::TransactionKind,
Environment, Transaction,
};
use ffi::MDBX_db_flags_t;
use std::{ffi::CStr, ptr};
/// A handle to an individual database in an environment.
///
/// A database handle denotes the name and parameters of a database in an environment.
#[derive(Debug)]
pub struct Database {
dbi: ffi::MDBX_dbi,
/// The environment that this database belongs to keeps it alive as long as the database
/// instance exists.
_env: Option<Environment>,
}
impl Database {
/// Opens a new database handle in the given transaction.
///
/// Prefer using `Environment::open_db`, `Environment::create_db`, `TransactionExt::open_db`,
/// or `RwTransaction::create_db`.
pub(crate) fn new<K: TransactionKind>(
txn: &Transaction<K>,
name: Option<&str>,
flags: MDBX_db_flags_t,
) -> Result<Self> {
let mut c_name_buf = smallvec::SmallVec::<[u8; 32]>::new();
let c_name = name.map(|n| {
c_name_buf.extend_from_slice(n.as_bytes());
c_name_buf.push(0);
CStr::from_bytes_with_nul(&c_name_buf).unwrap()
});
let name_ptr = if let Some(c_name) = c_name { c_name.as_ptr() } else { ptr::null() };
let mut dbi: ffi::MDBX_dbi = 0;
txn.txn_execute(|txn_ptr| {
mdbx_result(unsafe { ffi::mdbx_dbi_open(txn_ptr, name_ptr, flags, &mut dbi) })
})??;
Ok(Self::new_from_ptr(dbi, txn.env().clone()))
}
pub(crate) const fn new_from_ptr(dbi: ffi::MDBX_dbi, env: Environment) -> Self {
Self { dbi, _env: Some(env) }
}
/// Opens the freelist database with DBI `0`.
pub const fn freelist_db() -> Self {
Self { dbi: 0, _env: None }
}
/// Returns the underlying MDBX database handle.
///
/// The caller **must** ensure that the handle is not used after the lifetime of the
/// environment, or after the database has been closed.
pub const fn dbi(&self) -> ffi::MDBX_dbi {
self.dbi
}
}
unsafe impl Send for Database {}
unsafe impl Sync for Database {}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/libmdbx-rs/src/flags.rs | crates/storage/libmdbx-rs/src/flags.rs | use bitflags::bitflags;
use ffi::*;
/// MDBX sync mode
#[derive(Clone, Copy, Debug, Default)]
pub enum SyncMode {
/// Default robust and durable sync mode.
/// Metadata is written and flushed to disk after a data is written and flushed, which
/// guarantees the integrity of the database in the event of a crash at any time.
#[default]
Durable,
/// Don't sync the meta-page after commit.
///
/// Flush system buffers to disk only once per transaction commit, omit the metadata flush.
/// Defer that until the system flushes files to disk, or next non-read-only commit or
/// [`Environment::sync()`](crate::Environment::sync). Depending on the platform and
/// hardware, with [`SyncMode::NoMetaSync`] you may get a doubling of write performance.
///
/// This trade-off maintains database integrity, but a system crash may undo the last committed
/// transaction. I.e. it preserves the ACPI (atomicity, consistency, isolation) but not D
/// (durability) database property.
NoMetaSync,
/// Don't sync anything but keep previous steady commits.
///
/// [`SyncMode::UtterlyNoSync`] the [`SyncMode::SafeNoSync`] flag disable similarly flush
/// system buffers to disk when committing a transaction. But there is a huge difference in
/// how are recycled the MVCC snapshots corresponding to previous "steady" transactions
/// (see below).
///
/// With [`crate::EnvironmentKind::WriteMap`] the [`SyncMode::SafeNoSync`] instructs MDBX to
/// use asynchronous mmap-flushes to disk. Asynchronous mmap-flushes means that actually
/// all writes will scheduled and performed by operation system on it own manner, i.e.
/// unordered. MDBX itself just notify operating system that it would be nice to write data
/// to disk, but no more.
///
/// Depending on the platform and hardware, with [`SyncMode::SafeNoSync`] you may get a
/// multiple increase of write performance, even 10 times or more.
///
/// In contrast to [`SyncMode::UtterlyNoSync`] mode, with [`SyncMode::SafeNoSync`] flag MDBX
/// will keeps untouched pages within B-tree of the last transaction "steady" which was
/// synced to disk completely. This has big implications for both data durability and
/// (unfortunately) performance:
/// - A system crash can't corrupt the database, but you will lose the last transactions;
/// because MDBX will rollback to last steady commit since it kept explicitly.
/// - The last steady transaction makes an effect similar to "long-lived" read transaction
/// since prevents reuse of pages freed by newer write transactions, thus the any data
/// changes will be placed in newly allocated pages.
/// - To avoid rapid database growth, the system will sync data and issue a steady commit-point
/// to resume reuse pages, each time there is insufficient space and before increasing the
/// size of the file on disk.
///
/// In other words, with
/// [`SyncMode::SafeNoSync`] flag MDBX protects you from the whole database corruption, at the
/// cost increasing database size and/or number of disk IOPs. So, [`SyncMode::SafeNoSync`]
/// flag could be used with [`Environment::sync()`](crate::Environment::sync) as alternatively
/// for batch committing or nested transaction (in some cases).
///
/// The number and volume of disk IOPs with [`SyncMode::SafeNoSync`] flag will exactly the
/// as without any no-sync flags. However, you should expect a larger process's work set
/// and significantly worse a locality of reference, due to the more intensive allocation
/// of previously unused pages and increase the size of the database.
SafeNoSync,
/// Don't sync anything and wipe previous steady commits.
///
/// Don't flush system buffers to disk when committing a transaction.
/// This optimization means a system crash can corrupt the database, if buffers are not yet
/// flushed to disk. Depending on the platform and hardware, with [`SyncMode::UtterlyNoSync`]
/// you may get a multiple increase of write performance, even 100 times or more.
///
/// If the filesystem preserves write order (which is rare and never provided unless explicitly
/// noted) and the [`WriteMap`](crate::EnvironmentKind::WriteMap) and
/// [`EnvironmentFlags::liforeclaim`] flags are not used, then a system crash can't corrupt
/// the database, but you can lose the last transactions, if at least one buffer is not yet
/// flushed to disk. The risk is governed by how often the system flushes dirty buffers to
/// disk and how often [`Environment::sync()`](crate::Environment::sync) is called. So,
/// transactions exhibit ACPI (atomicity, consistency, isolation) properties and only lose D
/// (durability). I.e. database integrity is maintained, but a system crash may undo the
/// final transactions.
///
/// Otherwise, if the filesystem not preserves write order (which is typically) or
/// [`WriteMap`](crate::EnvironmentKind::WriteMap) or [`EnvironmentFlags::liforeclaim`] flags
/// are used, you should expect the corrupted database after a system crash.
///
/// So, most important thing about [`SyncMode::UtterlyNoSync`]:
/// - A system crash immediately after commit the write transaction high likely lead to
/// database corruption.
/// - Successful completion of [`Environment::sync(force=true`)](crate::Environment::sync)
/// after one or more committed transactions guarantees consistency and durability.
/// - BUT by committing two or more transactions you back database into a weak state, in which
/// a system crash may lead to database corruption! In case single transaction after
/// [`Environment::sync()`](crate::Environment::sync), you may lose transaction itself, but
/// not a whole database.
///
/// Nevertheless, [`SyncMode::UtterlyNoSync`] provides "weak" durability in
/// case of an application crash (but no durability on system failure), and therefore may
/// be very useful in scenarios where data durability is not required over a system failure
/// (e.g for short-lived data), or if you can take such risk.
UtterlyNoSync,
}
#[derive(Clone, Copy, Debug)]
pub enum Mode {
ReadOnly,
ReadWrite { sync_mode: SyncMode },
}
impl Default for Mode {
fn default() -> Self {
Self::ReadWrite { sync_mode: SyncMode::default() }
}
}
impl From<Mode> for EnvironmentFlags {
fn from(mode: Mode) -> Self {
Self { mode, ..Default::default() }
}
}
#[derive(Clone, Copy, Debug, Default)]
pub struct EnvironmentFlags {
pub no_sub_dir: bool,
pub exclusive: bool,
/// Flag is intended to open an existing sub-database which was created with unknown flags
/// In such cases, instead of returning the `MDBX_INCOMPATIBLE` error, the sub-database will be
/// opened with flags which it was created, and then an application could determine the actual
/// flags.
pub accede: bool,
pub mode: Mode,
pub no_rdahead: bool,
pub no_meminit: bool,
pub coalesce: bool,
pub liforeclaim: bool,
}
impl EnvironmentFlags {
/// Configures the mdbx flags to use when opening the environment.
pub(crate) const fn make_flags(&self) -> ffi::MDBX_env_flags_t {
let mut flags = 0;
if self.no_sub_dir {
flags |= ffi::MDBX_NOSUBDIR;
}
if self.exclusive {
flags |= ffi::MDBX_EXCLUSIVE;
}
if self.accede {
flags |= ffi::MDBX_ACCEDE;
}
match self.mode {
Mode::ReadOnly => {
flags |= ffi::MDBX_RDONLY;
}
Mode::ReadWrite { sync_mode } => {
flags |= match sync_mode {
SyncMode::Durable => ffi::MDBX_SYNC_DURABLE,
SyncMode::NoMetaSync => ffi::MDBX_NOMETASYNC,
SyncMode::SafeNoSync => ffi::MDBX_SAFE_NOSYNC,
SyncMode::UtterlyNoSync => ffi::MDBX_UTTERLY_NOSYNC,
};
}
}
if self.no_rdahead {
flags |= ffi::MDBX_NORDAHEAD;
}
if self.no_meminit {
flags |= ffi::MDBX_NOMEMINIT;
}
if self.coalesce {
flags |= ffi::MDBX_COALESCE;
}
if self.liforeclaim {
flags |= ffi::MDBX_LIFORECLAIM;
}
flags |= ffi::MDBX_NOTLS;
flags
}
}
bitflags! {
#[doc="Database options."]
#[derive(Default)]
pub struct DatabaseFlags: MDBX_env_flags_t {
const REVERSE_KEY = MDBX_REVERSEKEY;
const DUP_SORT = MDBX_DUPSORT;
const INTEGER_KEY = MDBX_INTEGERKEY;
const DUP_FIXED = MDBX_DUPFIXED;
const INTEGER_DUP = MDBX_INTEGERDUP;
const REVERSE_DUP = MDBX_REVERSEDUP;
const CREATE = MDBX_CREATE;
const ACCEDE = MDBX_DB_ACCEDE;
}
}
bitflags! {
#[doc="Write options."]
#[derive(Default)]
pub struct WriteFlags: MDBX_env_flags_t {
const UPSERT = MDBX_UPSERT;
const NO_OVERWRITE = MDBX_NOOVERWRITE;
const NO_DUP_DATA = MDBX_NODUPDATA;
const CURRENT = MDBX_CURRENT;
const ALLDUPS = MDBX_ALLDUPS;
const RESERVE = MDBX_RESERVE;
const APPEND = MDBX_APPEND;
const APPEND_DUP = MDBX_APPENDDUP;
const MULTIPLE = MDBX_MULTIPLE;
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/libmdbx-rs/src/error.rs | crates/storage/libmdbx-rs/src/error.rs | use std::{ffi::c_int, result};
/// An MDBX result.
pub type Result<T> = result::Result<T, Error>;
/// An MDBX error kind.
#[derive(Debug, thiserror::Error, Clone, Copy, PartialEq, Eq)]
pub enum Error {
/// Key/data pair already exists.
#[error("key/data pair already exists")]
KeyExist,
/// No matching key/data pair found.
#[error("no matching key/data pair found")]
NotFound,
/// The cursor is already at the end of data.
#[error("the cursor is already at the end of data")]
NoData,
/// Requested page not found.
#[error("requested page not found")]
PageNotFound,
/// Database is corrupted.
#[error("database is corrupted")]
Corrupted,
/// Fatal environment error.
#[error("fatal environment error")]
Panic,
/// DB version mismatch.
#[error("DB version mismatch")]
VersionMismatch,
/// File is not an MDBX file.
#[error("file is not an MDBX file")]
Invalid,
/// Environment map size limit reached.
#[error("environment map size limit reached")]
MapFull,
/// Too many DBI-handles (maxdbs reached).
#[error("too many DBI-handles (maxdbs reached)")]
DbsFull,
/// Too many readers (maxreaders reached).
#[error("too many readers (maxreaders reached)")]
ReadersFull,
/// Transaction has too many dirty pages (i.e., the transaction is too big).
#[error("transaction has too many dirty pages (i.e., the transaction is too big)")]
TxnFull,
/// Cursor stack limit reached.
#[error("cursor stack limit reached")]
CursorFull,
/// Page has no more space.
#[error("page has no more space")]
PageFull,
/// The database engine was unable to extend mapping, e.g. the address space is unavailable or
/// busy.
///
/// This can mean:
/// - The database size was extended by other processes beyond the environment map size, and
/// the engine was unable to extend the mapping while starting a read transaction. The
/// environment should be re-opened to continue.
/// - The engine was unable to extend the mapping during a write transaction or an explicit
/// call to change the geometry of the environment.
#[error("database engine was unable to extend mapping")]
UnableExtendMapSize,
/// Environment or database is not compatible with the requested operation or flags.
#[error("environment or database is not compatible with the requested operation or flags")]
Incompatible,
/// Invalid reuse of reader locktable slot.
#[error("invalid reuse of reader locktable slot")]
BadRslot,
/// Transaction is not valid for requested operation.
#[error("transaction is not valid for requested operation")]
BadTxn,
/// Invalid size or alignment of key or data for the target database.
#[error("invalid size or alignment of key or data for the target database")]
BadValSize,
/// The specified DBI-handle is invalid.
#[error("the specified DBI-handle is invalid")]
BadDbi,
/// Unexpected internal error.
#[error("unexpected internal error")]
Problem,
/// Another write transaction is running.
#[error("another write transaction is running")]
Busy,
/// The specified key has more than one associated value.
#[error("the specified key has more than one associated value")]
Multival,
/// Wrong signature of a runtime object(s).
#[error("wrong signature of a runtime object(s)")]
BadSignature,
/// Database should be recovered, but cannot be done automatically since it's in read-only
/// mode.
#[error(
"database should be recovered, but cannot be done automatically since it's in read-only mode"
)]
WannaRecovery,
/// The given key value is mismatched to the current cursor position.
#[error("the given key value is mismatched to the current cursor position")]
KeyMismatch,
/// Decode error: An invalid parameter was specified.
#[error("invalid parameter specified")]
DecodeError,
/// The environment opened in read-only.
#[error(
"the environment opened in read-only, check <https://reth.rs/run/troubleshooting.html> for more"
)]
Access,
/// Database is too large for the current system.
#[error("database is too large for the current system")]
TooLarge,
/// Decode error length difference:
///
/// An invalid parameter was specified, or the environment has an active write transaction.
#[error("invalid parameter specified or active write transaction")]
DecodeErrorLenDiff,
/// If the [Environment](crate::Environment) was opened with
/// [`EnvironmentKind::WriteMap`](crate::EnvironmentKind::WriteMap) flag, nested transactions
/// are not supported.
#[error("nested transactions are not supported with WriteMap")]
NestedTransactionsUnsupportedWithWriteMap,
/// If the [Environment](crate::Environment) was opened with in read-only mode
/// [`Mode::ReadOnly`](crate::flags::Mode::ReadOnly), write transactions can't be opened.
#[error("write transactions are not supported in read-only mode")]
WriteTransactionUnsupportedInReadOnlyMode,
/// Read transaction has been timed out.
#[error("read transaction has been timed out")]
ReadTransactionTimeout,
/// Permission defined
#[error("permission denied to setup database")]
Permission,
/// Unknown error code.
#[error("unknown error code: {0}")]
Other(i32),
}
impl Error {
/// Converts a raw error code to an [Error].
pub const fn from_err_code(err_code: c_int) -> Self {
match err_code {
ffi::MDBX_KEYEXIST => Self::KeyExist,
ffi::MDBX_NOTFOUND => Self::NotFound,
ffi::MDBX_ENODATA => Self::NoData,
ffi::MDBX_PAGE_NOTFOUND => Self::PageNotFound,
ffi::MDBX_CORRUPTED => Self::Corrupted,
ffi::MDBX_PANIC => Self::Panic,
ffi::MDBX_VERSION_MISMATCH => Self::VersionMismatch,
ffi::MDBX_INVALID => Self::Invalid,
ffi::MDBX_MAP_FULL => Self::MapFull,
ffi::MDBX_DBS_FULL => Self::DbsFull,
ffi::MDBX_READERS_FULL => Self::ReadersFull,
ffi::MDBX_TXN_FULL => Self::TxnFull,
ffi::MDBX_CURSOR_FULL => Self::CursorFull,
ffi::MDBX_PAGE_FULL => Self::PageFull,
ffi::MDBX_UNABLE_EXTEND_MAPSIZE => Self::UnableExtendMapSize,
ffi::MDBX_INCOMPATIBLE => Self::Incompatible,
ffi::MDBX_BAD_RSLOT => Self::BadRslot,
ffi::MDBX_BAD_TXN => Self::BadTxn,
ffi::MDBX_BAD_VALSIZE => Self::BadValSize,
ffi::MDBX_BAD_DBI => Self::BadDbi,
ffi::MDBX_PROBLEM => Self::Problem,
ffi::MDBX_BUSY => Self::Busy,
ffi::MDBX_EMULTIVAL => Self::Multival,
ffi::MDBX_WANNA_RECOVERY => Self::WannaRecovery,
ffi::MDBX_EKEYMISMATCH => Self::KeyMismatch,
ffi::MDBX_EINVAL => Self::DecodeError,
ffi::MDBX_EACCESS => Self::Access,
ffi::MDBX_TOO_LARGE => Self::TooLarge,
ffi::MDBX_EBADSIGN => Self::BadSignature,
ffi::MDBX_EPERM => Self::Permission,
other => Self::Other(other),
}
}
/// Converts an [Error] to the raw error code.
pub const fn to_err_code(&self) -> i32 {
match self {
Self::KeyExist => ffi::MDBX_KEYEXIST,
Self::NotFound => ffi::MDBX_NOTFOUND,
Self::NoData => ffi::MDBX_ENODATA,
Self::PageNotFound => ffi::MDBX_PAGE_NOTFOUND,
Self::Corrupted => ffi::MDBX_CORRUPTED,
Self::Panic => ffi::MDBX_PANIC,
Self::VersionMismatch => ffi::MDBX_VERSION_MISMATCH,
Self::Invalid => ffi::MDBX_INVALID,
Self::MapFull => ffi::MDBX_MAP_FULL,
Self::DbsFull => ffi::MDBX_DBS_FULL,
Self::ReadersFull => ffi::MDBX_READERS_FULL,
Self::TxnFull => ffi::MDBX_TXN_FULL,
Self::CursorFull => ffi::MDBX_CURSOR_FULL,
Self::PageFull => ffi::MDBX_PAGE_FULL,
Self::UnableExtendMapSize => ffi::MDBX_UNABLE_EXTEND_MAPSIZE,
Self::Incompatible => ffi::MDBX_INCOMPATIBLE,
Self::BadRslot => ffi::MDBX_BAD_RSLOT,
Self::BadTxn => ffi::MDBX_BAD_TXN,
Self::BadValSize => ffi::MDBX_BAD_VALSIZE,
Self::BadDbi => ffi::MDBX_BAD_DBI,
Self::Problem => ffi::MDBX_PROBLEM,
Self::Busy => ffi::MDBX_BUSY,
Self::Multival => ffi::MDBX_EMULTIVAL,
Self::WannaRecovery => ffi::MDBX_WANNA_RECOVERY,
Self::KeyMismatch => ffi::MDBX_EKEYMISMATCH,
Self::DecodeErrorLenDiff | Self::DecodeError => ffi::MDBX_EINVAL,
Self::TooLarge => ffi::MDBX_TOO_LARGE,
Self::BadSignature => ffi::MDBX_EBADSIGN,
Self::Access |
Self::WriteTransactionUnsupportedInReadOnlyMode |
Self::NestedTransactionsUnsupportedWithWriteMap => ffi::MDBX_EACCESS,
Self::ReadTransactionTimeout => -96000, // Custom non-MDBX error code
Self::Permission => ffi::MDBX_EPERM,
Self::Other(err_code) => *err_code,
}
}
}
impl From<Error> for i32 {
fn from(value: Error) -> Self {
value.to_err_code()
}
}
#[inline]
pub(crate) const fn mdbx_result(err_code: c_int) -> Result<bool> {
match err_code {
ffi::MDBX_SUCCESS => Ok(false),
ffi::MDBX_RESULT_TRUE => Ok(true),
other => Err(Error::from_err_code(other)),
}
}
#[macro_export]
macro_rules! mdbx_try_optional {
($expr:expr) => {{
match $expr {
Err(Error::NotFound | Error::NoData) => return Ok(None),
Err(e) => return Err(e),
Ok(v) => v,
}
}};
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_description() {
assert_eq!(
"the environment opened in read-only, check <https://reth.rs/run/troubleshooting.html> for more",
Error::from_err_code(13).to_string()
);
assert_eq!("file is not an MDBX file", Error::Invalid.to_string());
}
#[test]
fn test_conversion() {
assert_eq!(Error::from_err_code(ffi::MDBX_KEYEXIST), Error::KeyExist);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/libmdbx-rs/src/txn_manager.rs | crates/storage/libmdbx-rs/src/txn_manager.rs | use crate::{
environment::EnvPtr,
error::{mdbx_result, Result},
CommitLatency,
};
use std::{
ptr,
sync::mpsc::{sync_channel, Receiver, SyncSender},
};
#[derive(Copy, Clone, Debug)]
pub(crate) struct TxnPtr(pub(crate) *mut ffi::MDBX_txn);
unsafe impl Send for TxnPtr {}
unsafe impl Sync for TxnPtr {}
pub(crate) enum TxnManagerMessage {
Begin { parent: TxnPtr, flags: ffi::MDBX_txn_flags_t, sender: SyncSender<Result<TxnPtr>> },
Abort { tx: TxnPtr, sender: SyncSender<Result<bool>> },
Commit { tx: TxnPtr, sender: SyncSender<Result<(bool, CommitLatency)>> },
}
/// Manages transactions by doing two things:
/// - Opening, aborting, and committing transactions using [`TxnManager::send_message`] with the
/// corresponding [`TxnManagerMessage`]
/// - Aborting long-lived read transactions (if the `read-tx-timeouts` feature is enabled and
/// `TxnManager::with_max_read_transaction_duration` is called)
#[derive(Debug)]
pub(crate) struct TxnManager {
sender: SyncSender<TxnManagerMessage>,
#[cfg(feature = "read-tx-timeouts")]
read_transactions: Option<std::sync::Arc<read_transactions::ReadTransactions>>,
}
impl TxnManager {
pub(crate) fn new(env: EnvPtr) -> Self {
let (tx, rx) = sync_channel(0);
let txn_manager = Self {
sender: tx,
#[cfg(feature = "read-tx-timeouts")]
read_transactions: None,
};
txn_manager.start_message_listener(env, rx);
txn_manager
}
/// Spawns a new [`std::thread`] that listens to incoming [`TxnManagerMessage`] messages,
/// executes an FFI function, and returns the result on the provided channel.
///
/// - [`TxnManagerMessage::Begin`] opens a new transaction with [`ffi::mdbx_txn_begin_ex`]
/// - [`TxnManagerMessage::Abort`] aborts a transaction with [`ffi::mdbx_txn_abort`]
/// - [`TxnManagerMessage::Commit`] commits a transaction with [`ffi::mdbx_txn_commit_ex`]
fn start_message_listener(&self, env: EnvPtr, rx: Receiver<TxnManagerMessage>) {
let task = move || {
let env = env;
loop {
match rx.recv() {
Ok(msg) => match msg {
TxnManagerMessage::Begin { parent, flags, sender } => {
let mut txn: *mut ffi::MDBX_txn = ptr::null_mut();
let res = mdbx_result(unsafe {
ffi::mdbx_txn_begin_ex(
env.0,
parent.0,
flags,
&mut txn,
ptr::null_mut(),
)
})
.map(|_| TxnPtr(txn));
sender.send(res).unwrap();
}
TxnManagerMessage::Abort { tx, sender } => {
sender.send(mdbx_result(unsafe { ffi::mdbx_txn_abort(tx.0) })).unwrap();
}
TxnManagerMessage::Commit { tx, sender } => {
sender
.send({
let mut latency = CommitLatency::new();
mdbx_result(unsafe {
ffi::mdbx_txn_commit_ex(tx.0, latency.mdb_commit_latency())
})
.map(|v| (v, latency))
})
.unwrap();
}
},
Err(_) => return,
}
}
};
std::thread::Builder::new().name("mdbx-rs-txn-manager".to_string()).spawn(task).unwrap();
}
pub(crate) fn send_message(&self, message: TxnManagerMessage) {
self.sender.send(message).unwrap()
}
}
#[cfg(feature = "read-tx-timeouts")]
mod read_transactions {
use crate::{
environment::EnvPtr, error::mdbx_result, transaction::TransactionPtr,
txn_manager::TxnManager,
};
use dashmap::{DashMap, DashSet};
use std::{
backtrace::Backtrace,
sync::{mpsc::sync_channel, Arc},
time::{Duration, Instant},
};
use tracing::{error, trace, warn};
const READ_TRANSACTIONS_CHECK_INTERVAL: Duration = Duration::from_secs(5);
impl TxnManager {
/// Returns a new instance for which the maximum duration that a read transaction can be
/// open is set.
pub(crate) fn new_with_max_read_transaction_duration(
env: EnvPtr,
duration: Duration,
) -> Self {
let read_transactions = Arc::new(ReadTransactions::new(duration));
read_transactions.clone().start_monitor();
let (tx, rx) = sync_channel(0);
let txn_manager = Self { sender: tx, read_transactions: Some(read_transactions) };
txn_manager.start_message_listener(env, rx);
txn_manager
}
/// Adds a new transaction to the list of active read transactions.
pub(crate) fn add_active_read_transaction(
&self,
ptr: *mut ffi::MDBX_txn,
tx: TransactionPtr,
) {
if let Some(read_transactions) = &self.read_transactions {
read_transactions.add_active(ptr, tx);
}
}
/// Removes a transaction from the list of active read transactions.
///
/// Returns `true` if the transaction was found and removed.
pub(crate) fn remove_active_read_transaction(&self, ptr: *mut ffi::MDBX_txn) -> bool {
self.read_transactions.as_ref().is_some_and(|txs| txs.remove_active(ptr))
}
/// Returns the number of timed out transactions that were not aborted by the user yet.
pub(crate) fn timed_out_not_aborted_read_transactions(&self) -> Option<usize> {
self.read_transactions
.as_ref()
.map(|read_transactions| read_transactions.timed_out_not_aborted())
}
}
#[derive(Debug, Default)]
pub(super) struct ReadTransactions {
/// Maximum duration that a read transaction can be open until the
/// [`ReadTransactions::start_monitor`] aborts it.
max_duration: Duration,
/// List of currently active read transactions.
///
/// We store `usize` instead of a raw pointer as a key, because pointers are not
/// comparable. The time of transaction opening is stored as a value.
///
/// The backtrace of the transaction opening is recorded only when debug assertions are
/// enabled.
active: DashMap<usize, (TransactionPtr, Instant, Option<Arc<Backtrace>>)>,
/// List of timed out transactions that were not aborted by the user yet, hence have a
/// dangling read transaction pointer.
timed_out_not_aborted: DashSet<usize>,
}
impl ReadTransactions {
pub(super) fn new(max_duration: Duration) -> Self {
Self { max_duration, ..Default::default() }
}
/// Adds a new transaction to the list of active read transactions.
pub(super) fn add_active(&self, ptr: *mut ffi::MDBX_txn, tx: TransactionPtr) {
let _ = self.active.insert(
ptr as usize,
(
tx,
Instant::now(),
cfg!(debug_assertions).then(|| Arc::new(Backtrace::force_capture())),
),
);
}
/// Removes a transaction from the list of active read transactions.
pub(super) fn remove_active(&self, ptr: *mut ffi::MDBX_txn) -> bool {
self.timed_out_not_aborted.remove(&(ptr as usize));
self.active.remove(&(ptr as usize)).is_some()
}
/// Returns the number of timed out transactions that were not aborted by the user yet.
pub(super) fn timed_out_not_aborted(&self) -> usize {
self.timed_out_not_aborted.len()
}
/// Spawns a new [`std::thread`] that monitors the list of active read transactions and
/// timeouts those that are open for longer than `ReadTransactions.max_duration`.
pub(super) fn start_monitor(self: Arc<Self>) {
let task = move || {
let mut timed_out_active = Vec::new();
loop {
let now = Instant::now();
let mut max_active_transaction_duration = None;
// Iterate through active read transactions and time out those that's open for
// longer than `self.max_duration`.
for entry in &self.active {
let (tx, start, backtrace) = entry.value();
let duration = now - *start;
if duration > self.max_duration {
let result = tx.txn_execute_fail_on_timeout(|txn_ptr| {
// Time out the transaction.
//
// We use `mdbx_txn_reset` instead of `mdbx_txn_abort` here to
// prevent MDBX from reusing the pointer of the aborted
// transaction for new read-only transactions. This is
// important because we store the pointer in the `active` list
// and assume that it is unique.
//
// See https://libmdbx.dqdkfa.ru/group__c__transactions.html#gae9f34737fe60b0ba538d5a09b6a25c8d for more info.
let result = mdbx_result(unsafe { ffi::mdbx_txn_reset(txn_ptr) });
if result.is_ok() {
tx.set_timed_out();
}
(txn_ptr, duration, result)
});
match result {
Ok((txn_ptr, duration, error)) => {
// Add the transaction to `timed_out_active`. We can't remove it
// instantly from the list of active transactions, because we
// iterate through it.
timed_out_active.push((
txn_ptr,
duration,
backtrace.clone(),
error,
));
}
Err(err) => {
error!(target: "libmdbx", %err, ?backtrace, "Failed to abort the long-lived read transaction")
}
}
} else {
max_active_transaction_duration = Some(
duration.max(max_active_transaction_duration.unwrap_or_default()),
);
}
}
// Walk through timed out transactions, and delete them from the list of active
// transactions.
for (ptr, open_duration, backtrace, err) in timed_out_active.iter().cloned() {
// Try deleting the transaction from the list of active transactions.
let was_in_active = self.remove_active(ptr);
if let Err(err) = err {
if was_in_active {
// If the transaction was in the list of active transactions,
// then user didn't abort it and we failed to do so.
error!(target: "libmdbx", %err, ?open_duration, ?backtrace, "Failed to time out the long-lived read transaction");
}
} else {
// Happy path, the transaction has been timed out by us with no errors.
warn!(target: "libmdbx", ?open_duration, ?backtrace, "Long-lived read transaction has been timed out");
// Add transaction to the list of timed out transactions that were not
// aborted by the user yet.
self.timed_out_not_aborted.insert(ptr as usize);
}
}
// Clear the list of timed out transactions, but not de-allocate the reserved
// capacity to save on further pushes.
timed_out_active.clear();
if !self.active.is_empty() {
trace!(
target: "libmdbx",
elapsed = ?now.elapsed(),
active = ?self.active.iter().map(|entry| {
let (tx, start, _) = entry.value();
(tx.clone(), start.elapsed())
}).collect::<Vec<_>>(),
"Read transactions"
);
}
// Sleep not more than `READ_TRANSACTIONS_CHECK_INTERVAL`, but at least until
// the closest deadline of an active read transaction
let sleep_duration = READ_TRANSACTIONS_CHECK_INTERVAL.min(
self.max_duration - max_active_transaction_duration.unwrap_or_default(),
);
trace!(target: "libmdbx", ?sleep_duration, elapsed = ?now.elapsed(), "Putting transaction monitor to sleep");
std::thread::sleep(sleep_duration);
}
};
std::thread::Builder::new()
.name("mdbx-rs-read-tx-timeouts".to_string())
.spawn(task)
.unwrap();
}
}
#[cfg(test)]
mod tests {
use crate::{
txn_manager::read_transactions::READ_TRANSACTIONS_CHECK_INTERVAL, Environment, Error,
MaxReadTransactionDuration,
};
use std::{thread::sleep, time::Duration};
use tempfile::tempdir;
#[test]
fn txn_manager_read_transactions_duration_set() {
const MAX_DURATION: Duration = Duration::from_secs(1);
let dir = tempdir().unwrap();
let env = Environment::builder()
.set_max_read_transaction_duration(MaxReadTransactionDuration::Set(MAX_DURATION))
.open(dir.path())
.unwrap();
let read_transactions = env.txn_manager().read_transactions.as_ref().unwrap();
// Create a read-only transaction, successfully use it, close it by dropping.
{
let tx = env.begin_ro_txn().unwrap();
let tx_ptr = tx.txn() as usize;
assert!(read_transactions.active.contains_key(&tx_ptr));
tx.open_db(None).unwrap();
drop(tx);
assert!(!read_transactions.active.contains_key(&tx_ptr));
}
// Create a read-only transaction, successfully use it, close it by committing.
{
let tx = env.begin_ro_txn().unwrap();
let tx_ptr = tx.txn() as usize;
assert!(read_transactions.active.contains_key(&tx_ptr));
tx.open_db(None).unwrap();
tx.commit().unwrap();
assert!(!read_transactions.active.contains_key(&tx_ptr));
}
{
// Create a read-only transaction and observe it's in the list of active
// transactions.
let tx = env.begin_ro_txn().unwrap();
let tx_ptr = tx.txn() as usize;
assert!(read_transactions.active.contains_key(&tx_ptr));
// Wait until the transaction is timed out by the manager.
sleep(MAX_DURATION + READ_TRANSACTIONS_CHECK_INTERVAL);
// Ensure that the transaction is not in the list of active transactions anymore,
// and is in the list of timed out but not aborted transactions.
assert!(!read_transactions.active.contains_key(&tx_ptr));
assert!(read_transactions.timed_out_not_aborted.contains(&tx_ptr));
// Use the timed out transaction and observe the `Error::ReadTransactionTimeout`
assert_eq!(tx.open_db(None).err(), Some(Error::ReadTransactionTimeout));
assert!(!read_transactions.active.contains_key(&tx_ptr));
assert!(read_transactions.timed_out_not_aborted.contains(&tx_ptr));
assert_eq!(tx.id().err(), Some(Error::ReadTransactionTimeout));
assert!(!read_transactions.active.contains_key(&tx_ptr));
assert!(read_transactions.timed_out_not_aborted.contains(&tx_ptr));
// Ensure that the transaction pointer is not reused when opening a new read-only
// transaction.
let new_tx = env.begin_ro_txn().unwrap();
let new_tx_ptr = new_tx.txn() as usize;
assert!(read_transactions.active.contains_key(&new_tx_ptr));
assert_ne!(tx_ptr, new_tx_ptr);
// Drop the transaction and ensure that it's not in the list of timed out but not
// aborted transactions anymore.
drop(tx);
assert!(!read_transactions.timed_out_not_aborted.contains(&tx_ptr));
}
}
#[test]
fn txn_manager_read_transactions_duration_unbounded() {
let dir = tempdir().unwrap();
let env = Environment::builder()
.set_max_read_transaction_duration(MaxReadTransactionDuration::Unbounded)
.open(dir.path())
.unwrap();
assert!(env.txn_manager().read_transactions.is_none());
let tx = env.begin_ro_txn().unwrap();
sleep(READ_TRANSACTIONS_CHECK_INTERVAL);
assert!(tx.commit().is_ok())
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/libmdbx-rs/src/transaction.rs | crates/storage/libmdbx-rs/src/transaction.rs | use crate::{
database::Database,
environment::Environment,
error::{mdbx_result, Result},
flags::{DatabaseFlags, WriteFlags},
txn_manager::{TxnManagerMessage, TxnPtr},
Cursor, Error, Stat, TableObject,
};
use ffi::{MDBX_txn_flags_t, MDBX_TXN_RDONLY, MDBX_TXN_READWRITE};
use indexmap::IndexSet;
use parking_lot::{Mutex, MutexGuard};
use std::{
ffi::{c_uint, c_void},
fmt::{self, Debug},
mem::size_of,
ptr, slice,
sync::{atomic::AtomicBool, mpsc::sync_channel, Arc},
time::Duration,
};
#[cfg(feature = "read-tx-timeouts")]
use ffi::mdbx_txn_renew;
mod private {
use super::*;
pub trait Sealed {}
impl Sealed for RO {}
impl Sealed for RW {}
}
pub trait TransactionKind: private::Sealed + Send + Sync + Debug + 'static {
#[doc(hidden)]
const OPEN_FLAGS: MDBX_txn_flags_t;
/// Convenience flag for distinguishing between read-only and read-write transactions.
#[doc(hidden)]
const IS_READ_ONLY: bool;
}
#[derive(Debug)]
#[non_exhaustive]
pub struct RO;
#[derive(Debug)]
#[non_exhaustive]
pub struct RW;
impl TransactionKind for RO {
const OPEN_FLAGS: MDBX_txn_flags_t = MDBX_TXN_RDONLY;
const IS_READ_ONLY: bool = true;
}
impl TransactionKind for RW {
const OPEN_FLAGS: MDBX_txn_flags_t = MDBX_TXN_READWRITE;
const IS_READ_ONLY: bool = false;
}
/// An MDBX transaction.
///
/// All database operations require a transaction.
pub struct Transaction<K>
where
K: TransactionKind,
{
inner: Arc<TransactionInner<K>>,
}
impl<K> Transaction<K>
where
K: TransactionKind,
{
pub(crate) fn new(env: Environment) -> Result<Self> {
let mut txn: *mut ffi::MDBX_txn = ptr::null_mut();
unsafe {
mdbx_result(ffi::mdbx_txn_begin_ex(
env.env_ptr(),
ptr::null_mut(),
K::OPEN_FLAGS,
&mut txn,
ptr::null_mut(),
))?;
Ok(Self::new_from_ptr(env, txn))
}
}
pub(crate) fn new_from_ptr(env: Environment, txn_ptr: *mut ffi::MDBX_txn) -> Self {
let txn = TransactionPtr::new(txn_ptr);
#[cfg(feature = "read-tx-timeouts")]
if K::IS_READ_ONLY {
env.txn_manager().add_active_read_transaction(txn_ptr, txn.clone())
}
let inner = TransactionInner {
txn,
primed_dbis: Mutex::new(IndexSet::new()),
committed: AtomicBool::new(false),
env,
_marker: Default::default(),
};
Self { inner: Arc::new(inner) }
}
/// Executes the given closure once the lock on the transaction is acquired.
///
/// The caller **must** ensure that the pointer is not used after the
/// lifetime of the transaction.
#[inline]
pub fn txn_execute<F, T>(&self, f: F) -> Result<T>
where
F: FnOnce(*mut ffi::MDBX_txn) -> T,
{
self.inner.txn_execute(f)
}
/// Executes the given closure once the lock on the transaction is acquired. If the transaction
/// is timed out, it will be renewed first.
///
/// Returns the result of the closure or an error if the transaction renewal fails.
#[inline]
pub(crate) fn txn_execute_renew_on_timeout<F, T>(&self, f: F) -> Result<T>
where
F: FnOnce(*mut ffi::MDBX_txn) -> T,
{
self.inner.txn_execute_renew_on_timeout(f)
}
/// Returns a copy of the raw pointer to the underlying MDBX transaction.
#[doc(hidden)]
#[cfg(test)]
pub fn txn(&self) -> *mut ffi::MDBX_txn {
self.inner.txn.txn
}
/// Returns a raw pointer to the MDBX environment.
pub fn env(&self) -> &Environment {
&self.inner.env
}
/// Returns the transaction id.
pub fn id(&self) -> Result<u64> {
self.txn_execute(|txn| unsafe { ffi::mdbx_txn_id(txn) })
}
/// Gets an item from a database.
///
/// This function retrieves the data associated with the given key in the
/// database. If the database supports duplicate keys
/// ([`DatabaseFlags::DUP_SORT`]) then the first data item for the key will be
/// returned. Retrieval of other items requires the use of
/// [Cursor]. If the item is not in the database, then
/// [None] will be returned.
pub fn get<Key>(&self, dbi: ffi::MDBX_dbi, key: &[u8]) -> Result<Option<Key>>
where
Key: TableObject,
{
let key_val: ffi::MDBX_val =
ffi::MDBX_val { iov_len: key.len(), iov_base: key.as_ptr() as *mut c_void };
let mut data_val: ffi::MDBX_val = ffi::MDBX_val { iov_len: 0, iov_base: ptr::null_mut() };
self.txn_execute(|txn| unsafe {
match ffi::mdbx_get(txn, dbi, &key_val, &mut data_val) {
ffi::MDBX_SUCCESS => Key::decode_val::<K>(txn, data_val).map(Some),
ffi::MDBX_NOTFOUND => Ok(None),
err_code => Err(Error::from_err_code(err_code)),
}
})?
}
/// Commits the transaction.
///
/// Any pending operations will be saved.
pub fn commit(self) -> Result<(bool, CommitLatency)> {
self.commit_and_rebind_open_dbs().map(|v| (v.0, v.1))
}
pub fn prime_for_permaopen(&self, db: Database) {
self.inner.primed_dbis.lock().insert(db.dbi());
}
/// Commits the transaction and returns table handles permanently open until dropped.
pub fn commit_and_rebind_open_dbs(self) -> Result<(bool, CommitLatency, Vec<Database>)> {
let result = {
let result = self.txn_execute(|txn| {
if K::IS_READ_ONLY {
#[cfg(feature = "read-tx-timeouts")]
self.env().txn_manager().remove_active_read_transaction(txn);
let mut latency = CommitLatency::new();
mdbx_result(unsafe {
ffi::mdbx_txn_commit_ex(txn, latency.mdb_commit_latency())
})
.map(|v| (v, latency))
} else {
let (sender, rx) = sync_channel(0);
self.env()
.txn_manager()
.send_message(TxnManagerMessage::Commit { tx: TxnPtr(txn), sender });
rx.recv().unwrap()
}
})?;
self.inner.set_committed();
result
};
result.map(|(v, latency)| {
(
v,
latency,
self.inner
.primed_dbis
.lock()
.iter()
.map(|&dbi| Database::new_from_ptr(dbi, self.env().clone()))
.collect(),
)
})
}
/// Opens a handle to an MDBX database.
///
/// If `name` is [None], then the returned handle will be for the default database.
///
/// If `name` is not [None], then the returned handle will be for a named database. In this
/// case the environment must be configured to allow named databases through
/// [`EnvironmentBuilder::set_max_dbs()`](crate::EnvironmentBuilder::set_max_dbs).
///
/// The returned database handle may be shared among any transaction in the environment.
///
/// The database name may not contain the null character.
pub fn open_db(&self, name: Option<&str>) -> Result<Database> {
Database::new(self, name, 0)
}
/// Gets the option flags for the given database in the transaction.
pub fn db_flags(&self, db: &Database) -> Result<DatabaseFlags> {
let mut flags: c_uint = 0;
unsafe {
self.txn_execute(|txn| {
mdbx_result(ffi::mdbx_dbi_flags_ex(txn, db.dbi(), &mut flags, ptr::null_mut()))
})??;
}
// The types are not the same on Windows. Great!
#[cfg_attr(not(windows), allow(clippy::useless_conversion))]
Ok(DatabaseFlags::from_bits_truncate(flags.try_into().unwrap()))
}
/// Retrieves database statistics.
pub fn db_stat(&self, db: &Database) -> Result<Stat> {
self.db_stat_with_dbi(db.dbi())
}
/// Retrieves database statistics by the given dbi.
pub fn db_stat_with_dbi(&self, dbi: ffi::MDBX_dbi) -> Result<Stat> {
unsafe {
let mut stat = Stat::new();
self.txn_execute(|txn| {
mdbx_result(ffi::mdbx_dbi_stat(txn, dbi, stat.mdb_stat(), size_of::<Stat>()))
})??;
Ok(stat)
}
}
/// Open a new cursor on the given database.
pub fn cursor(&self, db: &Database) -> Result<Cursor<K>> {
Cursor::new(self.clone(), db.dbi())
}
/// Open a new cursor on the given dbi.
pub fn cursor_with_dbi(&self, dbi: ffi::MDBX_dbi) -> Result<Cursor<K>> {
Cursor::new(self.clone(), dbi)
}
/// Disables a timeout for this read transaction.
#[cfg(feature = "read-tx-timeouts")]
pub fn disable_timeout(&self) {
if K::IS_READ_ONLY {
self.env().txn_manager().remove_active_read_transaction(self.inner.txn.txn);
}
}
}
impl<K> Clone for Transaction<K>
where
K: TransactionKind,
{
fn clone(&self) -> Self {
Self { inner: Arc::clone(&self.inner) }
}
}
impl<K> fmt::Debug for Transaction<K>
where
K: TransactionKind,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("RoTransaction").finish_non_exhaustive()
}
}
/// Internals of a transaction.
struct TransactionInner<K>
where
K: TransactionKind,
{
/// The transaction pointer itself.
txn: TransactionPtr,
/// A set of database handles that are primed for permaopen.
primed_dbis: Mutex<IndexSet<ffi::MDBX_dbi>>,
/// Whether the transaction has committed.
committed: AtomicBool,
env: Environment,
_marker: std::marker::PhantomData<fn(K)>,
}
impl<K> TransactionInner<K>
where
K: TransactionKind,
{
/// Marks the transaction as committed.
fn set_committed(&self) {
self.committed.store(true, std::sync::atomic::Ordering::SeqCst);
}
fn has_committed(&self) -> bool {
self.committed.load(std::sync::atomic::Ordering::SeqCst)
}
#[inline]
fn txn_execute<F, T>(&self, f: F) -> Result<T>
where
F: FnOnce(*mut ffi::MDBX_txn) -> T,
{
self.txn.txn_execute_fail_on_timeout(f)
}
#[inline]
fn txn_execute_renew_on_timeout<F, T>(&self, f: F) -> Result<T>
where
F: FnOnce(*mut ffi::MDBX_txn) -> T,
{
self.txn.txn_execute_renew_on_timeout(f)
}
}
impl<K> Drop for TransactionInner<K>
where
K: TransactionKind,
{
fn drop(&mut self) {
// To be able to abort a timed out transaction, we need to renew it first.
// Hence the usage of `txn_execute_renew_on_timeout` here.
self.txn
.txn_execute_renew_on_timeout(|txn| {
if !self.has_committed() {
if K::IS_READ_ONLY {
#[cfg(feature = "read-tx-timeouts")]
self.env.txn_manager().remove_active_read_transaction(txn);
unsafe {
ffi::mdbx_txn_abort(txn);
}
} else {
let (sender, rx) = sync_channel(0);
self.env
.txn_manager()
.send_message(TxnManagerMessage::Abort { tx: TxnPtr(txn), sender });
rx.recv().unwrap().unwrap();
}
}
})
.unwrap();
}
}
impl Transaction<RW> {
fn open_db_with_flags(&self, name: Option<&str>, flags: DatabaseFlags) -> Result<Database> {
Database::new(self, name, flags.bits())
}
/// Opens a handle to an MDBX database, creating the database if necessary.
///
/// If the database is already created, the given option flags will be added to it.
///
/// If `name` is [None], then the returned handle will be for the default database.
///
/// If `name` is not [None], then the returned handle will be for a named database. In this
/// case the environment must be configured to allow named databases through
/// [`EnvironmentBuilder::set_max_dbs()`](crate::EnvironmentBuilder::set_max_dbs).
///
/// This function will fail with [`Error::BadRslot`] if called by a thread with an open
/// transaction.
pub fn create_db(&self, name: Option<&str>, flags: DatabaseFlags) -> Result<Database> {
self.open_db_with_flags(name, flags | DatabaseFlags::CREATE)
}
/// Stores an item into a database.
///
/// This function stores key/data pairs in the database. The default
/// behavior is to enter the new key/data pair, replacing any previously
/// existing key if duplicates are disallowed, or adding a duplicate data
/// item if duplicates are allowed ([`DatabaseFlags::DUP_SORT`]).
pub fn put(
&self,
dbi: ffi::MDBX_dbi,
key: impl AsRef<[u8]>,
data: impl AsRef<[u8]>,
flags: WriteFlags,
) -> Result<()> {
let key = key.as_ref();
let data = data.as_ref();
let key_val: ffi::MDBX_val =
ffi::MDBX_val { iov_len: key.len(), iov_base: key.as_ptr() as *mut c_void };
let mut data_val: ffi::MDBX_val =
ffi::MDBX_val { iov_len: data.len(), iov_base: data.as_ptr() as *mut c_void };
mdbx_result(self.txn_execute(|txn| unsafe {
ffi::mdbx_put(txn, dbi, &key_val, &mut data_val, flags.bits())
})?)?;
Ok(())
}
/// Returns a buffer which can be used to write a value into the item at the
/// given key and with the given length. The buffer must be completely
/// filled by the caller.
#[allow(clippy::mut_from_ref)]
pub fn reserve(
&self,
db: &Database,
key: impl AsRef<[u8]>,
len: usize,
flags: WriteFlags,
) -> Result<&mut [u8]> {
let key = key.as_ref();
let key_val: ffi::MDBX_val =
ffi::MDBX_val { iov_len: key.len(), iov_base: key.as_ptr() as *mut c_void };
let mut data_val: ffi::MDBX_val =
ffi::MDBX_val { iov_len: len, iov_base: ptr::null_mut::<c_void>() };
unsafe {
mdbx_result(self.txn_execute(|txn| {
ffi::mdbx_put(
txn,
db.dbi(),
&key_val,
&mut data_val,
flags.bits() | ffi::MDBX_RESERVE,
)
})?)?;
Ok(slice::from_raw_parts_mut(data_val.iov_base as *mut u8, data_val.iov_len))
}
}
/// Delete items from a database.
/// This function removes key/data pairs from the database.
///
/// The data parameter is NOT ignored regardless the database does support sorted duplicate data
/// items or not. If the data parameter is [Some] only the matching data item will be
/// deleted. Otherwise, if data parameter is [None], any/all value(s) for specified key will
/// be deleted.
///
/// Returns `true` if the key/value pair was present.
pub fn del(
&self,
dbi: ffi::MDBX_dbi,
key: impl AsRef<[u8]>,
data: Option<&[u8]>,
) -> Result<bool> {
let key = key.as_ref();
let key_val: ffi::MDBX_val =
ffi::MDBX_val { iov_len: key.len(), iov_base: key.as_ptr() as *mut c_void };
let data_val: Option<ffi::MDBX_val> = data.map(|data| ffi::MDBX_val {
iov_len: data.len(),
iov_base: data.as_ptr() as *mut c_void,
});
mdbx_result({
self.txn_execute(|txn| {
if let Some(d) = data_val {
unsafe { ffi::mdbx_del(txn, dbi, &key_val, &d) }
} else {
unsafe { ffi::mdbx_del(txn, dbi, &key_val, ptr::null()) }
}
})?
})
.map(|_| true)
.or_else(|e| match e {
Error::NotFound => Ok(false),
other => Err(other),
})
}
/// Empties the given database. All items will be removed.
pub fn clear_db(&self, dbi: ffi::MDBX_dbi) -> Result<()> {
mdbx_result(self.txn_execute(|txn| unsafe { ffi::mdbx_drop(txn, dbi, false) })?)?;
Ok(())
}
/// Drops the database from the environment.
///
/// # Safety
/// Caller must close ALL other [Database] and [Cursor] instances pointing to the same dbi
/// BEFORE calling this function.
pub unsafe fn drop_db(&self, db: Database) -> Result<()> {
mdbx_result(self.txn_execute(|txn| ffi::mdbx_drop(txn, db.dbi(), true))?)?;
Ok(())
}
}
impl Transaction<RO> {
/// Closes the database handle.
///
/// # Safety
/// Caller must close ALL other [Database] and [Cursor] instances pointing to the same dbi
/// BEFORE calling this function.
pub unsafe fn close_db(&self, db: Database) -> Result<()> {
mdbx_result(ffi::mdbx_dbi_close(self.env().env_ptr(), db.dbi()))?;
Ok(())
}
}
impl Transaction<RW> {
/// Begins a new nested transaction inside of this transaction.
pub fn begin_nested_txn(&mut self) -> Result<Self> {
if self.inner.env.is_write_map() {
return Err(Error::NestedTransactionsUnsupportedWithWriteMap)
}
self.txn_execute(|txn| {
let (tx, rx) = sync_channel(0);
self.env().txn_manager().send_message(TxnManagerMessage::Begin {
parent: TxnPtr(txn),
flags: RW::OPEN_FLAGS,
sender: tx,
});
rx.recv().unwrap().map(|ptr| Self::new_from_ptr(self.env().clone(), ptr.0))
})?
}
}
/// A shareable pointer to an MDBX transaction.
#[derive(Debug, Clone)]
pub(crate) struct TransactionPtr {
txn: *mut ffi::MDBX_txn,
#[cfg(feature = "read-tx-timeouts")]
timed_out: Arc<AtomicBool>,
lock: Arc<Mutex<()>>,
}
impl TransactionPtr {
fn new(txn: *mut ffi::MDBX_txn) -> Self {
Self {
txn,
#[cfg(feature = "read-tx-timeouts")]
timed_out: Arc::new(AtomicBool::new(false)),
lock: Arc::new(Mutex::new(())),
}
}
/// Returns `true` if the transaction is timed out.
///
/// When transaction is timed out via `TxnManager`, it's actually reset using
/// `mdbx_txn_reset`. It makes the transaction unusable (MDBX fails on any usages of such
/// transactions).
///
/// Importantly, we can't rely on `MDBX_TXN_FINISHED` flag to check if the transaction is timed
/// out using `mdbx_txn_reset`, because MDBX uses it in other cases too.
#[cfg(feature = "read-tx-timeouts")]
fn is_timed_out(&self) -> bool {
self.timed_out.load(std::sync::atomic::Ordering::SeqCst)
}
#[cfg(feature = "read-tx-timeouts")]
pub(crate) fn set_timed_out(&self) {
self.timed_out.store(true, std::sync::atomic::Ordering::SeqCst);
}
/// Acquires the inner transaction lock to guarantee exclusive access to the transaction
/// pointer.
fn lock(&self) -> MutexGuard<'_, ()> {
if let Some(lock) = self.lock.try_lock() {
lock
} else {
tracing::trace!(
target: "libmdbx",
txn = %self.txn as usize,
backtrace = %std::backtrace::Backtrace::capture(),
"Transaction lock is already acquired, blocking...
To display the full backtrace, run with `RUST_BACKTRACE=full` env variable."
);
self.lock.lock()
}
}
/// Executes the given closure once the lock on the transaction is acquired.
///
/// Returns the result of the closure or an error if the transaction is timed out.
#[inline]
pub(crate) fn txn_execute_fail_on_timeout<F, T>(&self, f: F) -> Result<T>
where
F: FnOnce(*mut ffi::MDBX_txn) -> T,
{
let _lck = self.lock();
// No race condition with the `TxnManager` timing out the transaction is possible here,
// because we're taking a lock for any actions on the transaction pointer, including a call
// to the `mdbx_txn_reset`.
#[cfg(feature = "read-tx-timeouts")]
if self.is_timed_out() {
return Err(Error::ReadTransactionTimeout)
}
Ok((f)(self.txn))
}
/// Executes the given closure once the lock on the transaction is acquired. If the transaction
/// is timed out, it will be renewed first.
///
/// Returns the result of the closure or an error if the transaction renewal fails.
#[inline]
pub(crate) fn txn_execute_renew_on_timeout<F, T>(&self, f: F) -> Result<T>
where
F: FnOnce(*mut ffi::MDBX_txn) -> T,
{
let _lck = self.lock();
// To be able to do any operations on the transaction, we need to renew it first.
#[cfg(feature = "read-tx-timeouts")]
if self.is_timed_out() {
mdbx_result(unsafe { mdbx_txn_renew(self.txn) })?;
}
Ok((f)(self.txn))
}
}
/// Commit latencies info.
///
/// Contains information about latency of commit stages.
/// Inner struct stores this info in 1/65536 of seconds units.
#[derive(Debug)]
#[repr(transparent)]
pub struct CommitLatency(ffi::MDBX_commit_latency);
impl CommitLatency {
/// Create a new `CommitLatency` with zero'd inner struct `ffi::MDBX_commit_latency`.
pub(crate) const fn new() -> Self {
unsafe { Self(std::mem::zeroed()) }
}
/// Returns a mut pointer to `ffi::MDBX_commit_latency`.
pub(crate) const fn mdb_commit_latency(&mut self) -> *mut ffi::MDBX_commit_latency {
&mut self.0
}
}
impl CommitLatency {
/// Duration of preparation (commit child transactions, update
/// sub-databases records and cursors destroying).
#[inline]
pub const fn preparation(&self) -> Duration {
Self::time_to_duration(self.0.preparation)
}
/// Duration of GC update by wall clock.
#[inline]
pub const fn gc_wallclock(&self) -> Duration {
Self::time_to_duration(self.0.gc_wallclock)
}
/// Duration of internal audit if enabled.
#[inline]
pub const fn audit(&self) -> Duration {
Self::time_to_duration(self.0.audit)
}
/// Duration of writing dirty/modified data pages to a filesystem,
/// i.e. the summary duration of a `write()` syscalls during commit.
#[inline]
pub const fn write(&self) -> Duration {
Self::time_to_duration(self.0.write)
}
/// Duration of syncing written data to the disk/storage, i.e.
/// the duration of a `fdatasync()` or a `msync()` syscall during commit.
#[inline]
pub const fn sync(&self) -> Duration {
Self::time_to_duration(self.0.sync)
}
/// Duration of transaction ending (releasing resources).
#[inline]
pub const fn ending(&self) -> Duration {
Self::time_to_duration(self.0.ending)
}
/// The total duration of a commit.
#[inline]
pub const fn whole(&self) -> Duration {
Self::time_to_duration(self.0.whole)
}
/// User-mode CPU time spent on GC update.
#[inline]
pub const fn gc_cputime(&self) -> Duration {
Self::time_to_duration(self.0.gc_cputime)
}
#[inline]
const fn time_to_duration(time: u32) -> Duration {
Duration::from_nanos(time as u64 * (1_000_000_000 / 65_536))
}
}
// SAFETY: Access to the transaction is synchronized by the lock.
unsafe impl Send for TransactionPtr {}
// SAFETY: Access to the transaction is synchronized by the lock.
unsafe impl Sync for TransactionPtr {}
#[cfg(test)]
mod tests {
use super::*;
const fn assert_send_sync<T: Send + Sync>() {}
#[expect(dead_code)]
const fn test_txn_send_sync() {
assert_send_sync::<Transaction<RO>>();
assert_send_sync::<Transaction<RW>>();
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/libmdbx-rs/tests/cursor.rs | crates/storage/libmdbx-rs/tests/cursor.rs | #![allow(missing_docs)]
use reth_libmdbx::*;
use std::borrow::Cow;
use tempfile::tempdir;
#[test]
fn test_get() {
let dir = tempdir().unwrap();
let env = Environment::builder().open(dir.path()).unwrap();
let txn = env.begin_rw_txn().unwrap();
let db = txn.open_db(None).unwrap();
assert_eq!(None, txn.cursor(&db).unwrap().first::<(), ()>().unwrap());
txn.put(db.dbi(), b"key1", b"val1", WriteFlags::empty()).unwrap();
txn.put(db.dbi(), b"key2", b"val2", WriteFlags::empty()).unwrap();
txn.put(db.dbi(), b"key3", b"val3", WriteFlags::empty()).unwrap();
let mut cursor = txn.cursor(&db).unwrap();
assert_eq!(cursor.first().unwrap(), Some((*b"key1", *b"val1")));
assert_eq!(cursor.get_current().unwrap(), Some((*b"key1", *b"val1")));
assert_eq!(cursor.next().unwrap(), Some((*b"key2", *b"val2")));
assert_eq!(cursor.prev().unwrap(), Some((*b"key1", *b"val1")));
assert_eq!(cursor.last().unwrap(), Some((*b"key3", *b"val3")));
assert_eq!(cursor.set(b"key1").unwrap(), Some(*b"val1"));
assert_eq!(cursor.set_key(b"key3").unwrap(), Some((*b"key3", *b"val3")));
assert_eq!(cursor.set_range(b"key2\0").unwrap(), Some((*b"key3", *b"val3")));
}
#[test]
fn test_get_dup() {
let dir = tempdir().unwrap();
let env = Environment::builder().open(dir.path()).unwrap();
let txn = env.begin_rw_txn().unwrap();
let db = txn.create_db(None, DatabaseFlags::DUP_SORT).unwrap();
txn.put(db.dbi(), b"key1", b"val1", WriteFlags::empty()).unwrap();
txn.put(db.dbi(), b"key1", b"val2", WriteFlags::empty()).unwrap();
txn.put(db.dbi(), b"key1", b"val3", WriteFlags::empty()).unwrap();
txn.put(db.dbi(), b"key2", b"val1", WriteFlags::empty()).unwrap();
txn.put(db.dbi(), b"key2", b"val2", WriteFlags::empty()).unwrap();
txn.put(db.dbi(), b"key2", b"val3", WriteFlags::empty()).unwrap();
let mut cursor = txn.cursor(&db).unwrap();
assert_eq!(cursor.first().unwrap(), Some((*b"key1", *b"val1")));
assert_eq!(cursor.first_dup().unwrap(), Some(*b"val1"));
assert_eq!(cursor.get_current().unwrap(), Some((*b"key1", *b"val1")));
assert_eq!(cursor.next_nodup().unwrap(), Some((*b"key2", *b"val1")));
assert_eq!(cursor.next().unwrap(), Some((*b"key2", *b"val2")));
assert_eq!(cursor.prev().unwrap(), Some((*b"key2", *b"val1")));
assert_eq!(cursor.next_dup().unwrap(), Some((*b"key2", *b"val2")));
assert_eq!(cursor.next_dup().unwrap(), Some((*b"key2", *b"val3")));
assert_eq!(cursor.next_dup::<(), ()>().unwrap(), None);
assert_eq!(cursor.prev_dup().unwrap(), Some((*b"key2", *b"val2")));
assert_eq!(cursor.last_dup().unwrap(), Some(*b"val3"));
assert_eq!(cursor.prev_nodup().unwrap(), Some((*b"key1", *b"val3")));
assert_eq!(cursor.next_dup::<(), ()>().unwrap(), None);
assert_eq!(cursor.set(b"key1").unwrap(), Some(*b"val1"));
assert_eq!(cursor.set(b"key2").unwrap(), Some(*b"val1"));
assert_eq!(cursor.set_range(b"key1\0").unwrap(), Some((*b"key2", *b"val1")));
assert_eq!(cursor.get_both(b"key1", b"val3").unwrap(), Some(*b"val3"));
assert_eq!(cursor.get_both_range::<()>(b"key1", b"val4").unwrap(), None);
assert_eq!(cursor.get_both_range(b"key2", b"val").unwrap(), Some(*b"val1"));
assert_eq!(cursor.last().unwrap(), Some((*b"key2", *b"val3")));
cursor.del(WriteFlags::empty()).unwrap();
assert_eq!(cursor.last().unwrap(), Some((*b"key2", *b"val2")));
cursor.del(WriteFlags::empty()).unwrap();
assert_eq!(cursor.last().unwrap(), Some((*b"key2", *b"val1")));
cursor.del(WriteFlags::empty()).unwrap();
assert_eq!(cursor.last().unwrap(), Some((*b"key1", *b"val3")));
}
#[test]
fn test_get_dupfixed() {
let dir = tempdir().unwrap();
let env = Environment::builder().open(dir.path()).unwrap();
let txn = env.begin_rw_txn().unwrap();
let db = txn.create_db(None, DatabaseFlags::DUP_SORT | DatabaseFlags::DUP_FIXED).unwrap();
txn.put(db.dbi(), b"key1", b"val1", WriteFlags::empty()).unwrap();
txn.put(db.dbi(), b"key1", b"val2", WriteFlags::empty()).unwrap();
txn.put(db.dbi(), b"key1", b"val3", WriteFlags::empty()).unwrap();
txn.put(db.dbi(), b"key2", b"val4", WriteFlags::empty()).unwrap();
txn.put(db.dbi(), b"key2", b"val5", WriteFlags::empty()).unwrap();
txn.put(db.dbi(), b"key2", b"val6", WriteFlags::empty()).unwrap();
let mut cursor = txn.cursor(&db).unwrap();
assert_eq!(cursor.first().unwrap(), Some((*b"key1", *b"val1")));
assert_eq!(cursor.get_multiple().unwrap(), Some(*b"val1val2val3"));
assert_eq!(cursor.next_multiple::<(), ()>().unwrap(), None);
}
#[test]
fn test_iter() {
let dir = tempdir().unwrap();
let env = Environment::builder().open(dir.path()).unwrap();
let items: Vec<(_, _)> = vec![
(*b"key1", *b"val1"),
(*b"key2", *b"val2"),
(*b"key3", *b"val3"),
(*b"key5", *b"val5"),
];
{
let txn = env.begin_rw_txn().unwrap();
let db = txn.open_db(None).unwrap();
for (key, data) in &items {
txn.put(db.dbi(), key, data, WriteFlags::empty()).unwrap();
}
assert!(!txn.commit().unwrap().0);
}
let txn = env.begin_ro_txn().unwrap();
let db = txn.open_db(None).unwrap();
let mut cursor = txn.cursor(&db).unwrap();
// Because Result implements FromIterator, we can collect the iterator
// of items of type Result<_, E> into a Result<Vec<_, E>> by specifying
// the collection type via the turbofish syntax.
assert_eq!(items, cursor.iter().collect::<Result<Vec<_>>>().unwrap());
// Alternately, we can collect it into an appropriately typed variable.
let retr: Result<Vec<_>> = cursor.iter_start().collect();
assert_eq!(items, retr.unwrap());
cursor.set::<()>(b"key2").unwrap();
assert_eq!(
items.clone().into_iter().skip(2).collect::<Vec<_>>(),
cursor.iter().collect::<Result<Vec<_>>>().unwrap()
);
assert_eq!(items, cursor.iter_start().collect::<Result<Vec<_>>>().unwrap());
assert_eq!(
items.clone().into_iter().skip(1).collect::<Vec<_>>(),
cursor.iter_from(b"key2").collect::<Result<Vec<_>>>().unwrap()
);
assert_eq!(
items.into_iter().skip(3).collect::<Vec<_>>(),
cursor.iter_from(b"key4").collect::<Result<Vec<_>>>().unwrap()
);
assert_eq!(
Vec::<((), ())>::new(),
cursor.iter_from(b"key6").collect::<Result<Vec<_>>>().unwrap()
);
}
#[test]
fn test_iter_empty_database() {
let dir = tempdir().unwrap();
let env = Environment::builder().open(dir.path()).unwrap();
let txn = env.begin_ro_txn().unwrap();
let db = txn.open_db(None).unwrap();
let mut cursor = txn.cursor(&db).unwrap();
assert!(cursor.iter::<(), ()>().next().is_none());
assert!(cursor.iter_start::<(), ()>().next().is_none());
assert!(cursor.iter_from::<(), ()>(b"foo").next().is_none());
}
#[test]
fn test_iter_empty_dup_database() {
let dir = tempdir().unwrap();
let env = Environment::builder().open(dir.path()).unwrap();
let txn = env.begin_rw_txn().unwrap();
txn.create_db(None, DatabaseFlags::DUP_SORT).unwrap();
txn.commit().unwrap();
let txn = env.begin_ro_txn().unwrap();
let db = txn.open_db(None).unwrap();
let mut cursor = txn.cursor(&db).unwrap();
assert!(cursor.iter::<(), ()>().next().is_none());
assert!(cursor.iter_start::<(), ()>().next().is_none());
assert!(cursor.iter_from::<(), ()>(b"foo").next().is_none());
assert!(cursor.iter_from::<(), ()>(b"foo").next().is_none());
assert!(cursor.iter_dup::<(), ()>().flatten().next().is_none());
assert!(cursor.iter_dup_start::<(), ()>().flatten().next().is_none());
assert!(cursor.iter_dup_from::<(), ()>(b"foo").flatten().next().is_none());
assert!(cursor.iter_dup_of::<(), ()>(b"foo").next().is_none());
}
#[test]
fn test_iter_dup() {
let dir = tempdir().unwrap();
let env = Environment::builder().open(dir.path()).unwrap();
let txn = env.begin_rw_txn().unwrap();
txn.create_db(None, DatabaseFlags::DUP_SORT).unwrap();
txn.commit().unwrap();
let items: Vec<(_, _)> = [
(b"a", b"1"),
(b"a", b"2"),
(b"a", b"3"),
(b"b", b"1"),
(b"b", b"2"),
(b"b", b"3"),
(b"c", b"1"),
(b"c", b"2"),
(b"c", b"3"),
(b"e", b"1"),
(b"e", b"2"),
(b"e", b"3"),
]
.iter()
.map(|&(&k, &v)| (k, v))
.collect();
{
let txn = env.begin_rw_txn().unwrap();
for (key, data) in items.clone() {
let db = txn.open_db(None).unwrap();
txn.put(db.dbi(), key, data, WriteFlags::empty()).unwrap();
}
txn.commit().unwrap();
}
let txn = env.begin_ro_txn().unwrap();
let db = txn.open_db(None).unwrap();
let mut cursor = txn.cursor(&db).unwrap();
assert_eq!(items, cursor.iter_dup().flatten().collect::<Result<Vec<_>>>().unwrap());
cursor.set::<()>(b"b").unwrap();
assert_eq!(
items.iter().copied().skip(4).collect::<Vec<_>>(),
cursor.iter_dup().flatten().collect::<Result<Vec<_>>>().unwrap()
);
assert_eq!(items, cursor.iter_dup_start().flatten().collect::<Result<Vec<_>>>().unwrap());
assert_eq!(
items.iter().copied().skip(3).collect::<Vec<_>>(),
cursor.iter_dup_from(b"b").flatten().collect::<Result<Vec<_>>>().unwrap()
);
assert_eq!(
items.iter().copied().skip(3).collect::<Vec<_>>(),
cursor.iter_dup_from(b"ab").flatten().collect::<Result<Vec<_>>>().unwrap()
);
assert_eq!(
items.iter().copied().skip(9).collect::<Vec<_>>(),
cursor.iter_dup_from(b"d").flatten().collect::<Result<Vec<_>>>().unwrap()
);
assert_eq!(
Vec::<([u8; 1], [u8; 1])>::new(),
cursor.iter_dup_from(b"f").flatten().collect::<Result<Vec<_>>>().unwrap()
);
assert_eq!(
items.iter().copied().skip(3).take(3).collect::<Vec<_>>(),
cursor.iter_dup_of(b"b").collect::<Result<Vec<_>>>().unwrap()
);
assert_eq!(0, cursor.iter_dup_of::<(), ()>(b"foo").count());
}
#[test]
fn test_iter_del_get() {
let dir = tempdir().unwrap();
let env = Environment::builder().open(dir.path()).unwrap();
let items = vec![(*b"a", *b"1"), (*b"b", *b"2")];
{
let txn = env.begin_rw_txn().unwrap();
let db = txn.create_db(None, DatabaseFlags::DUP_SORT).unwrap();
assert_eq!(
txn.cursor(&db)
.unwrap()
.iter_dup_of::<(), ()>(b"a")
.collect::<Result<Vec<_>>>()
.unwrap()
.len(),
0
);
txn.commit().unwrap();
}
{
let txn = env.begin_rw_txn().unwrap();
let db = txn.open_db(None).unwrap();
for (key, data) in &items {
txn.put(db.dbi(), key, data, WriteFlags::empty()).unwrap();
}
txn.commit().unwrap();
}
let txn = env.begin_rw_txn().unwrap();
let db = txn.open_db(None).unwrap();
let mut cursor = txn.cursor(&db).unwrap();
assert_eq!(items, cursor.iter_dup().flatten().collect::<Result<Vec<_>>>().unwrap());
assert_eq!(
items.iter().copied().take(1).collect::<Vec<(_, _)>>(),
cursor.iter_dup_of(b"a").collect::<Result<Vec<_>>>().unwrap()
);
assert_eq!(cursor.set(b"a").unwrap(), Some(*b"1"));
cursor.del(WriteFlags::empty()).unwrap();
assert_eq!(cursor.iter_dup_of::<(), ()>(b"a").collect::<Result<Vec<_>>>().unwrap().len(), 0);
}
#[test]
fn test_put_del() {
let dir = tempdir().unwrap();
let env = Environment::builder().open(dir.path()).unwrap();
let txn = env.begin_rw_txn().unwrap();
let db = txn.open_db(None).unwrap();
let mut cursor = txn.cursor(&db).unwrap();
cursor.put(b"key1", b"val1", WriteFlags::empty()).unwrap();
cursor.put(b"key2", b"val2", WriteFlags::empty()).unwrap();
cursor.put(b"key3", b"val3", WriteFlags::empty()).unwrap();
assert_eq!(
cursor.set_key(b"key2").unwrap(),
Some((Cow::Borrowed(b"key2" as &[u8]), Cow::Borrowed(b"val2" as &[u8])))
);
assert_eq!(
cursor.get_current().unwrap(),
Some((Cow::Borrowed(b"key2" as &[u8]), Cow::Borrowed(b"val2" as &[u8])))
);
cursor.del(WriteFlags::empty()).unwrap();
assert_eq!(
cursor.get_current().unwrap(),
Some((Cow::Borrowed(b"key3" as &[u8]), Cow::Borrowed(b"val3" as &[u8])))
);
assert_eq!(
cursor.last().unwrap(),
Some((Cow::Borrowed(b"key3" as &[u8]), Cow::Borrowed(b"val3" as &[u8])))
);
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/libmdbx-rs/tests/environment.rs | crates/storage/libmdbx-rs/tests/environment.rs | #![allow(missing_docs)]
use byteorder::{ByteOrder, LittleEndian};
use reth_libmdbx::*;
use tempfile::tempdir;
#[test]
fn test_open() {
let dir = tempdir().unwrap();
// opening non-existent env with read-only should fail
assert!(Environment::builder().set_flags(Mode::ReadOnly.into()).open(dir.path()).is_err());
// opening non-existent env should succeed
assert!(Environment::builder().open(dir.path()).is_ok());
// opening env with read-only should succeed
assert!(Environment::builder().set_flags(Mode::ReadOnly.into()).open(dir.path()).is_ok());
}
#[test]
fn test_begin_txn() {
let dir = tempdir().unwrap();
{
// writable environment
let env = Environment::builder().open(dir.path()).unwrap();
assert!(env.begin_rw_txn().is_ok());
assert!(env.begin_ro_txn().is_ok());
}
{
// read-only environment
let env = Environment::builder().set_flags(Mode::ReadOnly.into()).open(dir.path()).unwrap();
assert!(env.begin_rw_txn().is_err());
assert!(env.begin_ro_txn().is_ok());
}
}
#[test]
fn test_open_db() {
let dir = tempdir().unwrap();
let env = Environment::builder().set_max_dbs(1).open(dir.path()).unwrap();
let txn = env.begin_ro_txn().unwrap();
assert!(txn.open_db(None).is_ok());
assert!(txn.open_db(Some("testdb")).is_err());
}
#[test]
fn test_create_db() {
let dir = tempdir().unwrap();
let env = Environment::builder().set_max_dbs(11).open(dir.path()).unwrap();
let txn = env.begin_rw_txn().unwrap();
assert!(txn.open_db(Some("testdb")).is_err());
assert!(txn.create_db(Some("testdb"), DatabaseFlags::empty()).is_ok());
assert!(txn.open_db(Some("testdb")).is_ok())
}
#[test]
fn test_close_database() {
let dir = tempdir().unwrap();
let env = Environment::builder().set_max_dbs(10).open(dir.path()).unwrap();
let txn = env.begin_rw_txn().unwrap();
txn.create_db(Some("db"), DatabaseFlags::empty()).unwrap();
txn.open_db(Some("db")).unwrap();
}
#[test]
fn test_sync() {
let dir = tempdir().unwrap();
{
let env = Environment::builder().open(dir.path()).unwrap();
env.sync(true).unwrap();
}
{
let env = Environment::builder().set_flags(Mode::ReadOnly.into()).open(dir.path()).unwrap();
env.sync(true).unwrap_err();
}
}
#[test]
fn test_stat() {
let dir = tempdir().unwrap();
let env = Environment::builder().open(dir.path()).unwrap();
// Stats should be empty initially.
let stat = env.stat().unwrap();
assert_eq!(stat.depth(), 0);
assert_eq!(stat.branch_pages(), 0);
assert_eq!(stat.leaf_pages(), 0);
assert_eq!(stat.overflow_pages(), 0);
assert_eq!(stat.entries(), 0);
// Write a few small values.
for i in 0..64 {
let mut value = [0u8; 8];
LittleEndian::write_u64(&mut value, i);
let tx = env.begin_rw_txn().expect("begin_rw_txn");
tx.put(tx.open_db(None).unwrap().dbi(), value, value, WriteFlags::default())
.expect("tx.put");
tx.commit().expect("tx.commit");
}
// Stats should now reflect inserted values.
let stat = env.stat().unwrap();
assert_eq!(stat.depth(), 1);
assert_eq!(stat.branch_pages(), 0);
assert_eq!(stat.leaf_pages(), 1);
assert_eq!(stat.overflow_pages(), 0);
assert_eq!(stat.entries(), 64);
}
#[test]
fn test_info() {
let map_size = 1024 * 1024;
let dir = tempdir().unwrap();
let env = Environment::builder()
.set_geometry(Geometry { size: Some(map_size..), ..Default::default() })
.open(dir.path())
.unwrap();
let info = env.info().unwrap();
assert_eq!(info.geometry().min(), map_size as u64);
// assert_eq!(info.last_pgno(), 1);
// assert_eq!(info.last_txnid(), 0);
assert_eq!(info.num_readers(), 0);
assert!(matches!(info.mode(), Mode::ReadWrite { sync_mode: SyncMode::Durable }));
assert!(env.is_read_write().unwrap());
drop(env);
let env = Environment::builder()
.set_geometry(Geometry { size: Some(map_size..), ..Default::default() })
.set_flags(EnvironmentFlags { mode: Mode::ReadOnly, ..Default::default() })
.open(dir.path())
.unwrap();
let info = env.info().unwrap();
assert!(matches!(info.mode(), Mode::ReadOnly));
assert!(env.is_read_only().unwrap());
}
#[test]
fn test_freelist() {
let dir = tempdir().unwrap();
let env = Environment::builder().open(dir.path()).unwrap();
let mut freelist = env.freelist().unwrap();
assert_eq!(freelist, 0);
// Write a few small values.
for i in 0..64 {
let mut value = [0u8; 8];
LittleEndian::write_u64(&mut value, i);
let tx = env.begin_rw_txn().expect("begin_rw_txn");
tx.put(tx.open_db(None).unwrap().dbi(), value, value, WriteFlags::default())
.expect("tx.put");
tx.commit().expect("tx.commit");
}
let tx = env.begin_rw_txn().expect("begin_rw_txn");
tx.clear_db(tx.open_db(None).unwrap().dbi()).expect("clear");
tx.commit().expect("tx.commit");
// Freelist should not be empty after clear_db.
freelist = env.freelist().unwrap();
assert!(freelist > 0);
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/libmdbx-rs/tests/transaction.rs | crates/storage/libmdbx-rs/tests/transaction.rs | #![allow(missing_docs)]
use reth_libmdbx::*;
use std::{
borrow::Cow,
io::Write,
sync::{Arc, Barrier},
thread::{self, JoinHandle},
};
use tempfile::tempdir;
#[test]
fn test_put_get_del() {
let dir = tempdir().unwrap();
let env = Environment::builder().open(dir.path()).unwrap();
let txn = env.begin_rw_txn().unwrap();
let db = txn.open_db(None).unwrap();
txn.put(db.dbi(), b"key1", b"val1", WriteFlags::empty()).unwrap();
txn.put(db.dbi(), b"key2", b"val2", WriteFlags::empty()).unwrap();
txn.put(db.dbi(), b"key3", b"val3", WriteFlags::empty()).unwrap();
txn.commit().unwrap();
let txn = env.begin_rw_txn().unwrap();
let db = txn.open_db(None).unwrap();
assert_eq!(txn.get(db.dbi(), b"key1").unwrap(), Some(*b"val1"));
assert_eq!(txn.get(db.dbi(), b"key2").unwrap(), Some(*b"val2"));
assert_eq!(txn.get(db.dbi(), b"key3").unwrap(), Some(*b"val3"));
assert_eq!(txn.get::<()>(db.dbi(), b"key").unwrap(), None);
txn.del(db.dbi(), b"key1", None).unwrap();
assert_eq!(txn.get::<()>(db.dbi(), b"key1").unwrap(), None);
}
#[test]
fn test_put_get_del_multi() {
let dir = tempdir().unwrap();
let env = Environment::builder().open(dir.path()).unwrap();
let txn = env.begin_rw_txn().unwrap();
let db = txn.create_db(None, DatabaseFlags::DUP_SORT).unwrap();
txn.put(db.dbi(), b"key1", b"val1", WriteFlags::empty()).unwrap();
txn.put(db.dbi(), b"key1", b"val2", WriteFlags::empty()).unwrap();
txn.put(db.dbi(), b"key1", b"val3", WriteFlags::empty()).unwrap();
txn.put(db.dbi(), b"key2", b"val1", WriteFlags::empty()).unwrap();
txn.put(db.dbi(), b"key2", b"val2", WriteFlags::empty()).unwrap();
txn.put(db.dbi(), b"key2", b"val3", WriteFlags::empty()).unwrap();
txn.put(db.dbi(), b"key3", b"val1", WriteFlags::empty()).unwrap();
txn.put(db.dbi(), b"key3", b"val2", WriteFlags::empty()).unwrap();
txn.put(db.dbi(), b"key3", b"val3", WriteFlags::empty()).unwrap();
txn.commit().unwrap();
let txn = env.begin_rw_txn().unwrap();
let db = txn.open_db(None).unwrap();
{
let mut cur = txn.cursor(&db).unwrap();
let iter = cur.iter_dup_of::<(), [u8; 4]>(b"key1");
let vals = iter.map(|x| x.unwrap()).map(|(_, x)| x).collect::<Vec<_>>();
assert_eq!(vals, vec![*b"val1", *b"val2", *b"val3"]);
}
txn.commit().unwrap();
let txn = env.begin_rw_txn().unwrap();
let db = txn.open_db(None).unwrap();
txn.del(db.dbi(), b"key1", Some(b"val2")).unwrap();
txn.del(db.dbi(), b"key2", None).unwrap();
txn.commit().unwrap();
let txn = env.begin_rw_txn().unwrap();
let db = txn.open_db(None).unwrap();
{
let mut cur = txn.cursor(&db).unwrap();
let iter = cur.iter_dup_of::<(), [u8; 4]>(b"key1");
let vals = iter.map(|x| x.unwrap()).map(|(_, x)| x).collect::<Vec<_>>();
assert_eq!(vals, vec![*b"val1", *b"val3"]);
let iter = cur.iter_dup_of::<(), ()>(b"key2");
assert_eq!(0, iter.count());
}
txn.commit().unwrap();
}
#[test]
fn test_put_get_del_empty_key() {
let dir = tempdir().unwrap();
let env = Environment::builder().open(dir.path()).unwrap();
let txn = env.begin_rw_txn().unwrap();
let db = txn.create_db(None, Default::default()).unwrap();
txn.put(db.dbi(), b"", b"hello", WriteFlags::empty()).unwrap();
assert_eq!(txn.get(db.dbi(), b"").unwrap(), Some(*b"hello"));
txn.commit().unwrap();
let txn = env.begin_rw_txn().unwrap();
let db = txn.open_db(None).unwrap();
assert_eq!(txn.get(db.dbi(), b"").unwrap(), Some(*b"hello"));
txn.put(db.dbi(), b"", b"", WriteFlags::empty()).unwrap();
assert_eq!(txn.get(db.dbi(), b"").unwrap(), Some(*b""));
}
#[test]
fn test_reserve() {
let dir = tempdir().unwrap();
let env = Environment::builder().open(dir.path()).unwrap();
let txn = env.begin_rw_txn().unwrap();
let db = txn.open_db(None).unwrap();
{
let mut writer = txn.reserve(&db, b"key1", 4, WriteFlags::empty()).unwrap();
writer.write_all(b"val1").unwrap();
}
txn.commit().unwrap();
let txn = env.begin_rw_txn().unwrap();
let db = txn.open_db(None).unwrap();
assert_eq!(txn.get(db.dbi(), b"key1").unwrap(), Some(*b"val1"));
assert_eq!(txn.get::<()>(db.dbi(), b"key").unwrap(), None);
txn.del(db.dbi(), b"key1", None).unwrap();
assert_eq!(txn.get::<()>(db.dbi(), b"key1").unwrap(), None);
}
#[test]
fn test_nested_txn() {
let dir = tempdir().unwrap();
let env = Environment::builder().open(dir.path()).unwrap();
let mut txn = env.begin_rw_txn().unwrap();
txn.put(txn.open_db(None).unwrap().dbi(), b"key1", b"val1", WriteFlags::empty()).unwrap();
{
let nested = txn.begin_nested_txn().unwrap();
let db = nested.open_db(None).unwrap();
nested.put(db.dbi(), b"key2", b"val2", WriteFlags::empty()).unwrap();
assert_eq!(nested.get(db.dbi(), b"key1").unwrap(), Some(*b"val1"));
assert_eq!(nested.get(db.dbi(), b"key2").unwrap(), Some(*b"val2"));
}
let db = txn.open_db(None).unwrap();
assert_eq!(txn.get(db.dbi(), b"key1").unwrap(), Some(*b"val1"));
assert_eq!(txn.get::<()>(db.dbi(), b"key2").unwrap(), None);
}
#[test]
fn test_clear_db() {
let dir = tempdir().unwrap();
let env = Environment::builder().open(dir.path()).unwrap();
{
let txn = env.begin_rw_txn().unwrap();
txn.put(txn.open_db(None).unwrap().dbi(), b"key", b"val", WriteFlags::empty()).unwrap();
assert!(!txn.commit().unwrap().0);
}
{
let txn = env.begin_rw_txn().unwrap();
txn.clear_db(txn.open_db(None).unwrap().dbi()).unwrap();
assert!(!txn.commit().unwrap().0);
}
let txn = env.begin_ro_txn().unwrap();
assert_eq!(txn.get::<()>(txn.open_db(None).unwrap().dbi(), b"key").unwrap(), None);
}
#[test]
fn test_drop_db() {
let dir = tempdir().unwrap();
{
let env = Environment::builder().set_max_dbs(2).open(dir.path()).unwrap();
{
let txn = env.begin_rw_txn().unwrap();
txn.put(
txn.create_db(Some("test"), DatabaseFlags::empty()).unwrap().dbi(),
b"key",
b"val",
WriteFlags::empty(),
)
.unwrap();
// Workaround for MDBX dbi drop issue
txn.create_db(Some("canary"), DatabaseFlags::empty()).unwrap();
assert!(!txn.commit().unwrap().0);
}
{
let txn = env.begin_rw_txn().unwrap();
let db = txn.open_db(Some("test")).unwrap();
unsafe {
txn.drop_db(db).unwrap();
}
assert!(matches!(txn.open_db(Some("test")).unwrap_err(), Error::NotFound));
assert!(!txn.commit().unwrap().0);
}
}
let env = Environment::builder().set_max_dbs(2).open(dir.path()).unwrap();
let txn = env.begin_ro_txn().unwrap();
txn.open_db(Some("canary")).unwrap();
assert!(matches!(txn.open_db(Some("test")).unwrap_err(), Error::NotFound));
}
#[test]
fn test_concurrent_readers_single_writer() {
let dir = tempdir().unwrap();
let env: Arc<Environment> = Arc::new(Environment::builder().open(dir.path()).unwrap());
let n = 10usize; // Number of concurrent readers
let barrier = Arc::new(Barrier::new(n + 1));
let mut threads: Vec<JoinHandle<bool>> = Vec::with_capacity(n);
let key = b"key";
let val = b"val";
for _ in 0..n {
let reader_env = env.clone();
let reader_barrier = barrier.clone();
threads.push(thread::spawn(move || {
{
let txn = reader_env.begin_ro_txn().unwrap();
let db = txn.open_db(None).unwrap();
assert_eq!(txn.get::<()>(db.dbi(), key).unwrap(), None);
}
reader_barrier.wait();
reader_barrier.wait();
{
let txn = reader_env.begin_ro_txn().unwrap();
let db = txn.open_db(None).unwrap();
txn.get::<[u8; 3]>(db.dbi(), key).unwrap().unwrap() == *val
}
}));
}
let txn = env.begin_rw_txn().unwrap();
let db = txn.open_db(None).unwrap();
barrier.wait();
txn.put(db.dbi(), key, val, WriteFlags::empty()).unwrap();
txn.commit().unwrap();
barrier.wait();
assert!(threads.into_iter().all(|b| b.join().unwrap()))
}
#[test]
fn test_concurrent_writers() {
let dir = tempdir().unwrap();
let env = Arc::new(Environment::builder().open(dir.path()).unwrap());
let n = 10usize; // Number of concurrent writers
let mut threads: Vec<JoinHandle<bool>> = Vec::with_capacity(n);
let key = "key";
let val = "val";
for i in 0..n {
let writer_env = env.clone();
threads.push(thread::spawn(move || {
let txn = writer_env.begin_rw_txn().unwrap();
let db = txn.open_db(None).unwrap();
txn.put(db.dbi(), format!("{key}{i}"), format!("{val}{i}"), WriteFlags::empty())
.unwrap();
txn.commit().is_ok()
}));
}
assert!(threads.into_iter().all(|b| b.join().unwrap()));
let txn = env.begin_ro_txn().unwrap();
let db = txn.open_db(None).unwrap();
for i in 0..n {
assert_eq!(
Cow::<Vec<u8>>::Owned(format!("{val}{i}").into_bytes()),
txn.get(db.dbi(), format!("{key}{i}").as_bytes()).unwrap().unwrap()
);
}
}
#[test]
fn test_stat() {
let dir = tempdir().unwrap();
let env = Environment::builder().open(dir.path()).unwrap();
let txn = env.begin_rw_txn().unwrap();
let db = txn.create_db(None, DatabaseFlags::empty()).unwrap();
txn.put(db.dbi(), b"key1", b"val1", WriteFlags::empty()).unwrap();
txn.put(db.dbi(), b"key2", b"val2", WriteFlags::empty()).unwrap();
txn.put(db.dbi(), b"key3", b"val3", WriteFlags::empty()).unwrap();
txn.commit().unwrap();
{
let txn = env.begin_ro_txn().unwrap();
let db = txn.open_db(None).unwrap();
let stat = txn.db_stat(&db).unwrap();
assert_eq!(stat.entries(), 3);
}
let txn = env.begin_rw_txn().unwrap();
let db = txn.open_db(None).unwrap();
txn.del(db.dbi(), b"key1", None).unwrap();
txn.del(db.dbi(), b"key2", None).unwrap();
txn.commit().unwrap();
{
let txn = env.begin_ro_txn().unwrap();
let db = txn.open_db(None).unwrap();
let stat = txn.db_stat(&db).unwrap();
assert_eq!(stat.entries(), 1);
}
let txn = env.begin_rw_txn().unwrap();
let db = txn.open_db(None).unwrap();
txn.put(db.dbi(), b"key4", b"val4", WriteFlags::empty()).unwrap();
txn.put(db.dbi(), b"key5", b"val5", WriteFlags::empty()).unwrap();
txn.put(db.dbi(), b"key6", b"val6", WriteFlags::empty()).unwrap();
txn.commit().unwrap();
{
let txn = env.begin_ro_txn().unwrap();
let db = txn.open_db(None).unwrap();
let stat = txn.db_stat(&db).unwrap();
assert_eq!(stat.entries(), 4);
}
}
#[test]
fn test_stat_dupsort() {
let dir = tempdir().unwrap();
let env = Environment::builder().open(dir.path()).unwrap();
let txn = env.begin_rw_txn().unwrap();
let db = txn.create_db(None, DatabaseFlags::DUP_SORT).unwrap();
txn.put(db.dbi(), b"key1", b"val1", WriteFlags::empty()).unwrap();
txn.put(db.dbi(), b"key1", b"val2", WriteFlags::empty()).unwrap();
txn.put(db.dbi(), b"key1", b"val3", WriteFlags::empty()).unwrap();
txn.put(db.dbi(), b"key2", b"val1", WriteFlags::empty()).unwrap();
txn.put(db.dbi(), b"key2", b"val2", WriteFlags::empty()).unwrap();
txn.put(db.dbi(), b"key2", b"val3", WriteFlags::empty()).unwrap();
txn.put(db.dbi(), b"key3", b"val1", WriteFlags::empty()).unwrap();
txn.put(db.dbi(), b"key3", b"val2", WriteFlags::empty()).unwrap();
txn.put(db.dbi(), b"key3", b"val3", WriteFlags::empty()).unwrap();
txn.commit().unwrap();
{
let txn = env.begin_ro_txn().unwrap();
let stat = txn.db_stat(&txn.open_db(None).unwrap()).unwrap();
assert_eq!(stat.entries(), 9);
}
let txn = env.begin_rw_txn().unwrap();
let db = txn.open_db(None).unwrap();
txn.del(db.dbi(), b"key1", Some(b"val2")).unwrap();
txn.del(db.dbi(), b"key2", None).unwrap();
txn.commit().unwrap();
{
let txn = env.begin_ro_txn().unwrap();
let stat = txn.db_stat(&txn.open_db(None).unwrap()).unwrap();
assert_eq!(stat.entries(), 5);
}
let txn = env.begin_rw_txn().unwrap();
let db = txn.open_db(None).unwrap();
txn.put(db.dbi(), b"key4", b"val1", WriteFlags::empty()).unwrap();
txn.put(db.dbi(), b"key4", b"val2", WriteFlags::empty()).unwrap();
txn.put(db.dbi(), b"key4", b"val3", WriteFlags::empty()).unwrap();
txn.commit().unwrap();
{
let txn = env.begin_ro_txn().unwrap();
let stat = txn.db_stat(&txn.open_db(None).unwrap()).unwrap();
assert_eq!(stat.entries(), 8);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/libmdbx-rs/benches/cursor.rs | crates/storage/libmdbx-rs/benches/cursor.rs | #![allow(missing_docs)]
mod utils;
use criterion::{criterion_group, criterion_main, Criterion};
use reth_libmdbx::{ffi::*, *};
use std::{hint::black_box, ptr};
use utils::*;
/// Benchmark of iterator sequential read performance.
fn bench_get_seq_iter(c: &mut Criterion) {
let n = 100;
let (_dir, env) = setup_bench_db(n);
let txn = env.begin_ro_txn().unwrap();
let db = txn.open_db(None).unwrap();
c.bench_function("bench_get_seq_iter", |b| {
b.iter(|| {
let mut cursor = txn.cursor(&db).unwrap();
let mut i = 0;
let mut count = 0u32;
for (key_len, data_len) in
cursor.iter::<ObjectLength, ObjectLength>().map(Result::unwrap)
{
i = i + *key_len + *data_len;
count += 1;
}
for (key_len, data_len) in
cursor.iter::<ObjectLength, ObjectLength>().filter_map(Result::ok)
{
i = i + *key_len + *data_len;
count += 1;
}
fn iterate<K: TransactionKind>(cursor: &mut Cursor<K>) -> Result<()> {
let mut i = 0;
for result in cursor.iter::<ObjectLength, ObjectLength>() {
let (key_len, data_len) = result?;
i = i + *key_len + *data_len;
}
Ok(())
}
iterate(&mut cursor).unwrap();
black_box(i);
assert_eq!(count, n);
})
});
}
/// Benchmark of cursor sequential read performance.
fn bench_get_seq_cursor(c: &mut Criterion) {
let n = 100;
let (_dir, env) = setup_bench_db(n);
let txn = env.begin_ro_txn().unwrap();
let db = txn.open_db(None).unwrap();
c.bench_function("bench_get_seq_cursor", |b| {
b.iter(|| {
let (i, count) = txn
.cursor(&db)
.unwrap()
.iter::<ObjectLength, ObjectLength>()
.map(Result::unwrap)
.fold((0, 0), |(i, count), (key, val)| (i + *key + *val, count + 1));
black_box(i);
assert_eq!(count, n);
})
});
}
/// Benchmark of raw MDBX sequential read performance (control).
fn bench_get_seq_raw(c: &mut Criterion) {
let n = 100;
let (_dir, env) = setup_bench_db(n);
let dbi = env.begin_ro_txn().unwrap().open_db(None).unwrap().dbi();
let txn = env.begin_ro_txn().unwrap();
let mut key = MDBX_val { iov_len: 0, iov_base: ptr::null_mut() };
let mut data = MDBX_val { iov_len: 0, iov_base: ptr::null_mut() };
let mut cursor: *mut MDBX_cursor = ptr::null_mut();
c.bench_function("bench_get_seq_raw", |b| {
b.iter(|| unsafe {
txn.txn_execute(|txn| {
mdbx_cursor_open(txn, dbi, &raw mut cursor);
let mut i = 0;
let mut count = 0u32;
while mdbx_cursor_get(cursor, &raw mut key, &raw mut data, MDBX_NEXT) == 0 {
i += key.iov_len + data.iov_len;
count += 1;
}
black_box(i);
assert_eq!(count, n);
mdbx_cursor_close(cursor);
})
.unwrap();
})
});
}
criterion_group! {
name = benches;
config = Criterion::default();
targets = bench_get_seq_iter, bench_get_seq_cursor, bench_get_seq_raw
}
criterion_main!(benches);
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/libmdbx-rs/benches/utils.rs | crates/storage/libmdbx-rs/benches/utils.rs | #![allow(unreachable_pub)]
#![allow(missing_docs)]
use reth_libmdbx::{Environment, WriteFlags};
use tempfile::{tempdir, TempDir};
pub fn get_key(n: u32) -> String {
format!("key{n}")
}
pub fn get_data(n: u32) -> String {
format!("data{n}")
}
pub fn setup_bench_db(num_rows: u32) -> (TempDir, Environment) {
let dir = tempdir().unwrap();
let env = Environment::builder().open(dir.path()).unwrap();
{
let txn = env.begin_rw_txn().unwrap();
let db = txn.open_db(None).unwrap();
for i in 0..num_rows {
txn.put(db.dbi(), get_key(i), get_data(i), WriteFlags::empty()).unwrap();
}
txn.commit().unwrap();
}
(dir, env)
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/libmdbx-rs/benches/transaction.rs | crates/storage/libmdbx-rs/benches/transaction.rs | #![allow(missing_docs, unreachable_pub)]
mod utils;
use criterion::{criterion_group, criterion_main, Criterion};
use rand::{prelude::SliceRandom, rngs::StdRng, SeedableRng};
use reth_libmdbx::{ffi::*, ObjectLength, WriteFlags};
use std::{hint::black_box, ptr};
use utils::*;
fn bench_get_rand(c: &mut Criterion) {
let n = 100u32;
let (_dir, env) = setup_bench_db(n);
let txn = env.begin_ro_txn().unwrap();
let db = txn.open_db(None).unwrap();
let mut keys: Vec<String> = (0..n).map(get_key).collect();
keys.shuffle(&mut StdRng::from_seed(Default::default()));
c.bench_function("bench_get_rand", |b| {
b.iter(|| {
let mut i = 0usize;
for key in &keys {
i += *txn.get::<ObjectLength>(db.dbi(), key.as_bytes()).unwrap().unwrap();
}
black_box(i);
})
});
}
fn bench_get_rand_raw(c: &mut Criterion) {
let n = 100u32;
let (_dir, env) = setup_bench_db(n);
let txn = env.begin_ro_txn().unwrap();
let db = txn.open_db(None).unwrap();
let mut keys: Vec<String> = (0..n).map(get_key).collect();
keys.shuffle(&mut StdRng::from_seed(Default::default()));
let dbi = db.dbi();
let mut key_val: MDBX_val = MDBX_val { iov_len: 0, iov_base: ptr::null_mut() };
let mut data_val: MDBX_val = MDBX_val { iov_len: 0, iov_base: ptr::null_mut() };
c.bench_function("bench_get_rand_raw", |b| {
b.iter(|| unsafe {
txn.txn_execute(|txn| {
let mut i = 0;
for key in &keys {
key_val.iov_len = key.len();
key_val.iov_base = key.as_bytes().as_ptr().cast_mut().cast();
mdbx_get(txn, dbi, &raw const key_val, &raw mut data_val);
i += key_val.iov_len;
}
black_box(i);
})
.unwrap();
})
});
}
fn bench_put_rand(c: &mut Criterion) {
let n = 100u32;
let (_dir, env) = setup_bench_db(0);
let txn = env.begin_ro_txn().unwrap();
let db = txn.open_db(None).unwrap();
txn.prime_for_permaopen(db);
let db = txn.commit_and_rebind_open_dbs().unwrap().2.remove(0);
let mut items: Vec<(String, String)> = (0..n).map(|n| (get_key(n), get_data(n))).collect();
items.shuffle(&mut StdRng::from_seed(Default::default()));
c.bench_function("bench_put_rand", |b| {
b.iter(|| {
let txn = env.begin_rw_txn().unwrap();
for (key, data) in &items {
txn.put(db.dbi(), key, data, WriteFlags::empty()).unwrap();
}
})
});
}
fn bench_put_rand_raw(c: &mut Criterion) {
let n = 100u32;
let (_dir, env) = setup_bench_db(0);
let mut items: Vec<(String, String)> = (0..n).map(|n| (get_key(n), get_data(n))).collect();
items.shuffle(&mut StdRng::from_seed(Default::default()));
let dbi = env.begin_ro_txn().unwrap().open_db(None).unwrap().dbi();
let mut key_val: MDBX_val = MDBX_val { iov_len: 0, iov_base: ptr::null_mut() };
let mut data_val: MDBX_val = MDBX_val { iov_len: 0, iov_base: ptr::null_mut() };
c.bench_function("bench_put_rand_raw", |b| {
b.iter(|| unsafe {
let mut txn: *mut MDBX_txn = ptr::null_mut();
env.with_raw_env_ptr(|env| {
mdbx_txn_begin_ex(env, ptr::null_mut(), 0, &raw mut txn, ptr::null_mut());
let mut i = 0;
for (key, data) in &items {
key_val.iov_len = key.len();
key_val.iov_base = key.as_bytes().as_ptr().cast_mut().cast();
data_val.iov_len = data.len();
data_val.iov_base = data.as_bytes().as_ptr().cast_mut().cast();
i += mdbx_put(txn, dbi, &raw const key_val, &raw mut data_val, 0);
}
assert_eq!(0, i);
mdbx_txn_abort(txn);
});
})
});
}
criterion_group! {
name = benches;
config = Criterion::default();
targets = bench_get_rand, bench_get_rand_raw, bench_put_rand, bench_put_rand_raw
}
criterion_main!(benches);
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/db-api/src/cursor.rs | crates/storage/db-api/src/cursor.rs | use std::{
fmt,
ops::{Bound, RangeBounds},
};
use crate::{
common::{IterPairResult, PairResult, ValueOnlyResult},
table::{DupSort, Table, TableRow},
DatabaseError,
};
/// A read-only cursor over table `T`.
pub trait DbCursorRO<T: Table> {
/// Positions the cursor at the first entry in the table, returning it.
fn first(&mut self) -> PairResult<T>;
/// Seeks to the KV pair exactly at `key`.
fn seek_exact(&mut self, key: T::Key) -> PairResult<T>;
/// Seeks to the KV pair whose key is greater than or equal to `key`.
fn seek(&mut self, key: T::Key) -> PairResult<T>;
/// Position the cursor at the next KV pair, returning it.
fn next(&mut self) -> PairResult<T>;
/// Position the cursor at the previous KV pair, returning it.
fn prev(&mut self) -> PairResult<T>;
/// Positions the cursor at the last entry in the table, returning it.
fn last(&mut self) -> PairResult<T>;
/// Get the KV pair at the cursor's current position.
fn current(&mut self) -> PairResult<T>;
/// Get an iterator that walks through the table.
///
/// If `start_key` is `None`, then the walker will start from the first entry of the table,
/// otherwise it starts at the entry greater than or equal to the provided key.
fn walk(&mut self, start_key: Option<T::Key>) -> Result<Walker<'_, T, Self>, DatabaseError>
where
Self: Sized;
/// Get an iterator that walks over a range of keys in the table.
fn walk_range(
&mut self,
range: impl RangeBounds<T::Key>,
) -> Result<RangeWalker<'_, T, Self>, DatabaseError>
where
Self: Sized;
/// Get an iterator that walks through the table in reverse order.
///
/// If `start_key` is `None`, then the walker will start from the last entry of the table,
/// otherwise it starts at the entry greater than or equal to the provided key.
fn walk_back(
&mut self,
start_key: Option<T::Key>,
) -> Result<ReverseWalker<'_, T, Self>, DatabaseError>
where
Self: Sized;
}
/// A read-only cursor over the dup table `T`.
pub trait DbDupCursorRO<T: DupSort> {
/// Positions the cursor at the next KV pair of the table, returning it.
fn next_dup(&mut self) -> PairResult<T>;
/// Positions the cursor at the next KV pair of the table, skipping duplicates.
fn next_no_dup(&mut self) -> PairResult<T>;
/// Positions the cursor at the next duplicate value of the current key.
fn next_dup_val(&mut self) -> ValueOnlyResult<T>;
/// Positions the cursor at the entry greater than or equal to the provided key/subkey pair.
///
/// # Note
///
/// The position of the cursor might not correspond to the key/subkey pair if the entry does not
/// exist.
fn seek_by_key_subkey(&mut self, key: T::Key, subkey: T::SubKey) -> ValueOnlyResult<T>;
/// Get an iterator that walks through the dup table.
///
/// The cursor will start at different points in the table depending on the values of `key` and
/// `subkey`:
///
/// | `key` | `subkey` | **Equivalent starting position** |
/// |--------|----------|-----------------------------------------|
/// | `None` | `None` | [`DbCursorRO::first()`] |
/// | `Some` | `None` | [`DbCursorRO::seek()`] |
/// | `None` | `Some` | [`DbDupCursorRO::seek_by_key_subkey()`] |
/// | `Some` | `Some` | [`DbDupCursorRO::seek_by_key_subkey()`] |
fn walk_dup(
&mut self,
key: Option<T::Key>,
subkey: Option<T::SubKey>,
) -> Result<DupWalker<'_, T, Self>, DatabaseError>
where
Self: Sized;
}
/// Read write cursor over table.
pub trait DbCursorRW<T: Table> {
/// Database operation that will update an existing row if a specified value already
/// exists in a table, and insert a new row if the specified value doesn't already exist
fn upsert(&mut self, key: T::Key, value: &T::Value) -> Result<(), DatabaseError>;
/// Database operation that will insert a row at a given key. If the key is already
/// present, the operation will result in an error.
fn insert(&mut self, key: T::Key, value: &T::Value) -> Result<(), DatabaseError>;
/// Append value to next cursor item.
///
/// This is efficient for pre-sorted data. If the data is not pre-sorted, use
/// [`DbCursorRW::insert`].
fn append(&mut self, key: T::Key, value: &T::Value) -> Result<(), DatabaseError>;
/// Delete current value that cursor points to
fn delete_current(&mut self) -> Result<(), DatabaseError>;
}
/// Read Write Cursor over `DupSorted` table.
pub trait DbDupCursorRW<T: DupSort> {
/// Delete all duplicate entries for current key.
fn delete_current_duplicates(&mut self) -> Result<(), DatabaseError>;
/// Append duplicate value.
///
/// This is efficient for pre-sorted data. If the data is not pre-sorted, use `insert`.
fn append_dup(&mut self, key: T::Key, value: T::Value) -> Result<(), DatabaseError>;
}
/// Provides an iterator to `Cursor` when handling `Table`.
pub struct Walker<'cursor, T: Table, CURSOR: DbCursorRO<T>> {
/// Cursor to be used to walk through the table.
cursor: &'cursor mut CURSOR,
/// `(key, value)` where to start the walk.
start: IterPairResult<T>,
}
impl<T, CURSOR> fmt::Debug for Walker<'_, T, CURSOR>
where
T: Table,
CURSOR: DbCursorRO<T> + fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Walker").field("cursor", &self.cursor).field("start", &self.start).finish()
}
}
impl<T: Table, CURSOR: DbCursorRO<T>> Iterator for Walker<'_, T, CURSOR> {
type Item = Result<TableRow<T>, DatabaseError>;
fn next(&mut self) -> Option<Self::Item> {
self.start.take().or_else(|| self.cursor.next().transpose())
}
}
impl<'cursor, T: Table, CURSOR: DbCursorRO<T>> Walker<'cursor, T, CURSOR> {
/// construct Walker
pub const fn new(cursor: &'cursor mut CURSOR, start: IterPairResult<T>) -> Self {
Self { cursor, start }
}
/// convert current [`Walker`] to [`ReverseWalker`] which iterates reversely
pub fn rev(self) -> ReverseWalker<'cursor, T, CURSOR> {
let start = self.cursor.current().transpose();
ReverseWalker::new(self.cursor, start)
}
}
impl<T: Table, CURSOR: DbCursorRW<T> + DbCursorRO<T>> Walker<'_, T, CURSOR> {
/// Delete current item that walker points to.
pub fn delete_current(&mut self) -> Result<(), DatabaseError> {
self.start.take();
self.cursor.delete_current()
}
}
/// Provides a reverse iterator to `Cursor` when handling `Table`.
/// Also check [`Walker`]
pub struct ReverseWalker<'cursor, T: Table, CURSOR: DbCursorRO<T>> {
/// Cursor to be used to walk through the table.
cursor: &'cursor mut CURSOR,
/// `(key, value)` where to start the walk.
start: IterPairResult<T>,
}
impl<T, CURSOR> fmt::Debug for ReverseWalker<'_, T, CURSOR>
where
T: Table,
CURSOR: DbCursorRO<T> + fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("ReverseWalker")
.field("cursor", &self.cursor)
.field("start", &self.start)
.finish()
}
}
impl<'cursor, T: Table, CURSOR: DbCursorRO<T>> ReverseWalker<'cursor, T, CURSOR> {
/// construct `ReverseWalker`
pub const fn new(cursor: &'cursor mut CURSOR, start: IterPairResult<T>) -> Self {
Self { cursor, start }
}
/// convert current [`ReverseWalker`] to [`Walker`] which iterate forwardly
pub fn forward(self) -> Walker<'cursor, T, CURSOR> {
let start = self.cursor.current().transpose();
Walker::new(self.cursor, start)
}
}
impl<T: Table, CURSOR: DbCursorRW<T> + DbCursorRO<T>> ReverseWalker<'_, T, CURSOR> {
/// Delete current item that walker points to.
pub fn delete_current(&mut self) -> Result<(), DatabaseError> {
self.start.take();
self.cursor.delete_current()
}
}
impl<T: Table, CURSOR: DbCursorRO<T>> Iterator for ReverseWalker<'_, T, CURSOR> {
type Item = Result<TableRow<T>, DatabaseError>;
fn next(&mut self) -> Option<Self::Item> {
let start = self.start.take();
if start.is_some() {
return start
}
self.cursor.prev().transpose()
}
}
/// Provides a range iterator to `Cursor` when handling `Table`.
/// Also check [`Walker`]
pub struct RangeWalker<'cursor, T: Table, CURSOR: DbCursorRO<T>> {
/// Cursor to be used to walk through the table.
cursor: &'cursor mut CURSOR,
/// `(key, value)` where to start the walk.
start: IterPairResult<T>,
/// `key` where to stop the walk.
end_key: Bound<T::Key>,
/// flag whether is ended
is_done: bool,
}
impl<T, CURSOR> fmt::Debug for RangeWalker<'_, T, CURSOR>
where
T: Table,
CURSOR: DbCursorRO<T> + fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("RangeWalker")
.field("cursor", &self.cursor)
.field("start", &self.start)
.field("end_key", &self.end_key)
.field("is_done", &self.is_done)
.finish()
}
}
impl<T: Table, CURSOR: DbCursorRO<T>> Iterator for RangeWalker<'_, T, CURSOR> {
type Item = Result<TableRow<T>, DatabaseError>;
fn next(&mut self) -> Option<Self::Item> {
if self.is_done {
return None
}
let next_item = self.start.take().or_else(|| self.cursor.next().transpose());
match next_item {
Some(Ok((key, value))) => match &self.end_key {
Bound::Included(end_key) if &key <= end_key => Some(Ok((key, value))),
Bound::Excluded(end_key) if &key < end_key => Some(Ok((key, value))),
Bound::Unbounded => Some(Ok((key, value))),
_ => {
self.is_done = true;
None
}
},
Some(res @ Err(_)) => Some(res),
None => {
self.is_done = matches!(self.end_key, Bound::Unbounded);
None
}
}
}
}
impl<'cursor, T: Table, CURSOR: DbCursorRO<T>> RangeWalker<'cursor, T, CURSOR> {
/// construct `RangeWalker`
pub fn new(
cursor: &'cursor mut CURSOR,
start: IterPairResult<T>,
end_key: Bound<T::Key>,
) -> Self {
// mark done if range is empty.
let is_done = match start {
Some(Ok((ref start_key, _))) => match &end_key {
Bound::Included(end_key) if start_key > end_key => true,
Bound::Excluded(end_key) if start_key >= end_key => true,
_ => false,
},
None => true,
_ => false,
};
Self { cursor, start, end_key, is_done }
}
}
impl<T: Table, CURSOR: DbCursorRW<T> + DbCursorRO<T>> RangeWalker<'_, T, CURSOR> {
/// Delete current item that walker points to.
pub fn delete_current(&mut self) -> Result<(), DatabaseError> {
self.start.take();
self.cursor.delete_current()
}
}
/// Provides an iterator to `Cursor` when handling a `DupSort` table.
///
/// Reason why we have two lifetimes is to distinguish between `'cursor` lifetime
/// and inherited `'tx` lifetime. If there is only one, rust would short circle
/// the Cursor lifetime and it wouldn't be possible to use Walker.
pub struct DupWalker<'cursor, T: DupSort, CURSOR: DbDupCursorRO<T>> {
/// Cursor to be used to walk through the table.
pub cursor: &'cursor mut CURSOR,
/// Value where to start the walk.
pub start: IterPairResult<T>,
}
impl<T, CURSOR> fmt::Debug for DupWalker<'_, T, CURSOR>
where
T: DupSort,
CURSOR: DbDupCursorRO<T> + fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("DupWalker")
.field("cursor", &self.cursor)
.field("start", &self.start)
.finish()
}
}
impl<T: DupSort, CURSOR: DbCursorRW<T> + DbDupCursorRO<T>> DupWalker<'_, T, CURSOR> {
/// Delete current item that walker points to.
pub fn delete_current(&mut self) -> Result<(), DatabaseError> {
self.start.take();
self.cursor.delete_current()
}
}
impl<T: DupSort, CURSOR: DbDupCursorRO<T>> Iterator for DupWalker<'_, T, CURSOR> {
type Item = Result<TableRow<T>, DatabaseError>;
fn next(&mut self) -> Option<Self::Item> {
let start = self.start.take();
if start.is_some() {
return start
}
self.cursor.next_dup().transpose()
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/db-api/src/lib.rs | crates/storage/db-api/src/lib.rs | //! reth's database abstraction layer.
//!
//! The database abstraction assumes that the underlying store is a KV store subdivided into tables.
//!
//! One or more changes are tied to a transaction that is atomically committed to the data store at
//! the same time. Strong consistency in what data is written and when is important for reth, so it
//! is not possible to write data to the database outside of using a transaction.
//!
//! Good starting points for this crate are:
//!
//! - [`Database`] for the main database abstraction
//! - [`DbTx`] (RO) and [`DbTxMut`] (RW) for the transaction abstractions.
//! - [`DbCursorRO`] (RO) and [`DbCursorRW`] (RW) for the cursor abstractions (see below).
//!
//! # Cursors and Walkers
//!
//! The abstraction also defines a couple of helpful abstractions for iterating and writing data:
//!
//! - **Cursors** ([`DbCursorRO`] / [`DbCursorRW`]) for iterating data in a table. Cursors are
//! assumed to resolve data in a sorted manner when iterating from start to finish, and it is safe
//! to assume that they are efficient at doing so.
//! - **Walkers** ([`Walker`] / [`RangeWalker`] / [`ReverseWalker`]) use cursors to walk the entries
//! in a table, either fully from a specific point, or over a range.
//!
//! Dup tables (see below) also have corresponding cursors and walkers (e.g. [`DbDupCursorRO`]).
//! These **should** be preferred when working with dup tables, as they provide additional methods
//! that are optimized for dup tables.
//!
//! # Tables
//!
//! reth has two types of tables: simple KV stores (one key, one value) and dup tables (one key,
//! many values). Dup tables can be efficient for certain types of data.
//!
//! Keys are de/serialized using the [`Encode`] and [`Decode`] traits, and values are de/serialized
//! ("compressed") using the [`Compress`] and [`Decompress`] traits.
//!
//! Tables implement the [`Table`] trait.
//!
//! [`Database`]: crate::database::Database
//! [`DbTx`]: crate::transaction::DbTx
//! [`DbTxMut`]: crate::transaction::DbTxMut
//! [`DbCursorRO`]: crate::cursor::DbCursorRO
//! [`DbCursorRW`]: crate::cursor::DbCursorRW
//! [`Walker`]: crate::cursor::Walker
//! [`RangeWalker`]: crate::cursor::RangeWalker
//! [`ReverseWalker`]: crate::cursor::ReverseWalker
//! [`DbDupCursorRO`]: crate::cursor::DbDupCursorRO
//! [`Encode`]: crate::table::Encode
//! [`Decode`]: crate::table::Decode
//! [`Compress`]: crate::table::Compress
//! [`Decompress`]: crate::table::Decompress
//! [`Table`]: crate::table::Table
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
/// Common types used throughout the abstraction.
pub mod common;
/// Cursor database traits.
pub mod cursor;
/// Database traits.
pub mod database;
/// Database metrics trait extensions.
pub mod database_metrics;
pub mod mock;
/// Table traits
pub mod table;
pub mod tables;
pub use tables::*;
/// Transaction database traits.
pub mod transaction;
/// Re-exports
pub use reth_storage_errors::db::{DatabaseError, DatabaseWriteOperation};
pub mod models;
mod scale;
mod utils;
pub use database::Database;
mod unwind;
pub use unwind::DbTxUnwindExt;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/db-api/src/table.rs | crates/storage/db-api/src/table.rs | use crate::{
cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO, DbDupCursorRW},
transaction::{DbTx, DbTxMut},
DatabaseError,
};
use serde::{Deserialize, Serialize};
use std::fmt::Debug;
/// Trait that will transform the data to be saved in the DB in a (ideally) compressed format
pub trait Compress: Send + Sync + Sized + Debug {
/// Compressed type.
type Compressed: bytes::BufMut
+ AsRef<[u8]>
+ AsMut<[u8]>
+ Into<Vec<u8>>
+ Default
+ Send
+ Sync
+ Debug;
/// If the type cannot be compressed, return its inner reference as `Some(self.as_ref())`
fn uncompressable_ref(&self) -> Option<&[u8]> {
None
}
/// Compresses data going into the database.
fn compress(self) -> Self::Compressed {
let mut buf = Self::Compressed::default();
self.compress_to_buf(&mut buf);
buf
}
/// Compresses data to a given buffer.
fn compress_to_buf<B: bytes::BufMut + AsMut<[u8]>>(&self, buf: &mut B);
}
/// Trait that will transform the data to be read from the DB.
pub trait Decompress: Send + Sync + Sized + Debug {
/// Decompresses data coming from the database.
fn decompress(value: &[u8]) -> Result<Self, DatabaseError>;
/// Decompresses owned data coming from the database.
fn decompress_owned(value: Vec<u8>) -> Result<Self, DatabaseError> {
Self::decompress(&value)
}
}
/// Trait that will transform the data to be saved in the DB.
pub trait Encode: Send + Sync + Sized + Debug {
/// Encoded type.
type Encoded: AsRef<[u8]> + Into<Vec<u8>> + Send + Sync + Ord + Debug;
/// Encodes data going into the database.
fn encode(self) -> Self::Encoded;
}
/// Trait that will transform the data to be read from the DB.
pub trait Decode: Send + Sync + Sized + Debug {
/// Decodes data coming from the database.
fn decode(value: &[u8]) -> Result<Self, DatabaseError>;
/// Decodes owned data coming from the database.
fn decode_owned(value: Vec<u8>) -> Result<Self, DatabaseError> {
Self::decode(&value)
}
}
/// Generic trait that enforces the database key to implement [`Encode`] and [`Decode`].
pub trait Key: Encode + Decode + Ord + Clone + Serialize + for<'a> Deserialize<'a> {}
impl<T> Key for T where T: Encode + Decode + Ord + Clone + Serialize + for<'a> Deserialize<'a> {}
/// Generic trait that enforces the database value to implement [`Compress`] and [`Decompress`].
pub trait Value: Compress + Decompress + Serialize {}
impl<T> Value for T where T: Compress + Decompress + Serialize {}
/// Generic trait that a database table should follow.
///
/// The [`Table::Key`] and [`Table::Value`] types should implement [`Encode`] and
/// [`Decode`] when appropriate. These traits define how the data is stored and read from the
/// database.
///
/// It allows for the use of codecs. See [`crate::models::ShardedKey`] for a custom
/// implementation.
pub trait Table: Send + Sync + Debug + 'static {
/// The table's name.
const NAME: &'static str;
/// Whether the table is also a `DUPSORT` table.
const DUPSORT: bool;
/// Key element of `Table`.
///
/// Sorting should be taken into account when encoding this.
type Key: Key;
/// Value element of `Table`.
type Value: Value;
}
/// Trait that provides object-safe access to the table's metadata.
pub trait TableInfo: Send + Sync + Debug + 'static {
/// The table's name.
fn name(&self) -> &'static str;
/// Whether the table is a `DUPSORT` table.
fn is_dupsort(&self) -> bool;
}
/// Tuple with `T::Key` and `T::Value`.
pub type TableRow<T> = (<T as Table>::Key, <T as Table>::Value);
/// `DupSort` allows for keys to be repeated in the database.
///
/// Upstream docs: <https://libmdbx.dqdkfa.ru/usage.html#autotoc_md48>
pub trait DupSort: Table {
/// The table subkey. This type must implement [`Encode`] and [`Decode`].
///
/// Sorting should be taken into account when encoding this.
///
/// Upstream docs: <https://libmdbx.dqdkfa.ru/usage.html#autotoc_md48>
type SubKey: Key;
}
/// Allows duplicating tables across databases
pub trait TableImporter: DbTxMut {
/// Imports all table data from another transaction.
fn import_table<T: Table, R: DbTx>(&self, source_tx: &R) -> Result<(), DatabaseError> {
let mut destination_cursor = self.cursor_write::<T>()?;
for kv in source_tx.cursor_read::<T>()?.walk(None)? {
let (k, v) = kv?;
destination_cursor.append(k, &v)?;
}
Ok(())
}
/// Imports table data from another transaction within a range.
fn import_table_with_range<T: Table, R: DbTx>(
&self,
source_tx: &R,
from: Option<<T as Table>::Key>,
to: <T as Table>::Key,
) -> Result<(), DatabaseError>
where
T::Key: Default,
{
let mut destination_cursor = self.cursor_write::<T>()?;
let mut source_cursor = source_tx.cursor_read::<T>()?;
let source_range = match from {
Some(from) => source_cursor.walk_range(from..=to),
None => source_cursor.walk_range(..=to),
};
for row in source_range? {
let (key, value) = row?;
destination_cursor.append(key, &value)?;
}
Ok(())
}
/// Imports all dupsort data from another transaction.
fn import_dupsort<T: DupSort, R: DbTx>(&self, source_tx: &R) -> Result<(), DatabaseError> {
let mut destination_cursor = self.cursor_dup_write::<T>()?;
let mut cursor = source_tx.cursor_dup_read::<T>()?;
while let Some((k, _)) = cursor.next_no_dup()? {
for kv in cursor.walk_dup(Some(k), None)? {
let (k, v) = kv?;
destination_cursor.append_dup(k, v)?;
}
}
Ok(())
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/db-api/src/database.rs | crates/storage/db-api/src/database.rs | use crate::{
table::TableImporter,
transaction::{DbTx, DbTxMut},
DatabaseError,
};
use std::{fmt::Debug, sync::Arc};
/// Main Database trait that can open read-only and read-write transactions.
///
/// Sealed trait which cannot be implemented by 3rd parties, exposed only for consumption.
pub trait Database: Send + Sync + Debug {
/// Read-Only database transaction
type TX: DbTx + Send + Sync + Debug + 'static;
/// Read-Write database transaction
type TXMut: DbTxMut + DbTx + TableImporter + Send + Sync + Debug + 'static;
/// Create read only transaction.
#[track_caller]
fn tx(&self) -> Result<Self::TX, DatabaseError>;
/// Create read write transaction only possible if database is open with write access.
#[track_caller]
fn tx_mut(&self) -> Result<Self::TXMut, DatabaseError>;
/// Takes a function and passes a read-only transaction into it, making sure it's closed in the
/// end of the execution.
fn view<T, F>(&self, f: F) -> Result<T, DatabaseError>
where
F: FnOnce(&Self::TX) -> T,
{
let tx = self.tx()?;
let res = f(&tx);
tx.commit()?;
Ok(res)
}
/// Takes a function and passes a write-read transaction into it, making sure it's committed in
/// the end of the execution.
fn update<T, F>(&self, f: F) -> Result<T, DatabaseError>
where
F: FnOnce(&Self::TXMut) -> T,
{
let tx = self.tx_mut()?;
let res = f(&tx);
tx.commit()?;
Ok(res)
}
}
impl<DB: Database> Database for Arc<DB> {
type TX = <DB as Database>::TX;
type TXMut = <DB as Database>::TXMut;
fn tx(&self) -> Result<Self::TX, DatabaseError> {
<DB as Database>::tx(self)
}
fn tx_mut(&self) -> Result<Self::TXMut, DatabaseError> {
<DB as Database>::tx_mut(self)
}
}
impl<DB: Database> Database for &DB {
type TX = <DB as Database>::TX;
type TXMut = <DB as Database>::TXMut;
fn tx(&self) -> Result<Self::TX, DatabaseError> {
<DB as Database>::tx(self)
}
fn tx_mut(&self) -> Result<Self::TXMut, DatabaseError> {
<DB as Database>::tx_mut(self)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/db-api/src/scale.rs | crates/storage/db-api/src/scale.rs | use crate::{
table::{Compress, Decompress},
DatabaseError,
};
use alloy_primitives::U256;
mod sealed {
pub trait Sealed {}
}
/// Marker trait type to restrict the [`Compress`] and [`Decompress`] with scale to chosen types.
pub trait ScaleValue: sealed::Sealed {}
impl<T> Compress for T
where
T: ScaleValue + parity_scale_codec::Encode + Sync + Send + std::fmt::Debug,
{
type Compressed = Vec<u8>;
fn compress(self) -> Self::Compressed {
parity_scale_codec::Encode::encode(&self)
}
fn compress_to_buf<B: bytes::BufMut + AsMut<[u8]>>(&self, buf: &mut B) {
parity_scale_codec::Encode::encode_to(&self, OutputCompat::wrap_mut(buf));
}
}
impl<T> Decompress for T
where
T: ScaleValue + parity_scale_codec::Decode + Sync + Send + std::fmt::Debug,
{
fn decompress(mut value: &[u8]) -> Result<T, DatabaseError> {
parity_scale_codec::Decode::decode(&mut value).map_err(|_| DatabaseError::Decode)
}
}
/// Implements compression for SCALE type.
macro_rules! impl_compression_for_scale {
($($name:tt),+) => {
$(
impl ScaleValue for $name {}
impl sealed::Sealed for $name {}
)+
};
}
impl ScaleValue for Vec<u8> {}
impl sealed::Sealed for Vec<u8> {}
impl_compression_for_scale!(U256);
impl_compression_for_scale!(u8, u32, u16, u64);
#[repr(transparent)]
struct OutputCompat<B>(B);
impl<B> OutputCompat<B> {
fn wrap_mut(buf: &mut B) -> &mut Self {
unsafe { std::mem::transmute(buf) }
}
}
impl<B: bytes::BufMut> parity_scale_codec::Output for OutputCompat<B> {
fn write(&mut self, bytes: &[u8]) {
self.0.put_slice(bytes);
}
fn push_byte(&mut self, byte: u8) {
self.0.put_u8(byte);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/db-api/src/utils.rs | crates/storage/db-api/src/utils.rs | #[macro_export]
/// Implements the `Arbitrary` trait for types with fixed array types.
macro_rules! impl_fixed_arbitrary {
($(($name:ident, $size:expr)),*) => {
#[cfg(any(test, feature = "arbitrary"))]
use arbitrary::{Arbitrary, Unstructured};
$(
#[cfg(any(test, feature = "arbitrary"))]
impl<'a> Arbitrary<'a> for $name {
fn arbitrary(u: &mut Unstructured<'a>) -> Result<Self, arbitrary::Error> {
let mut buffer = vec![0; $size];
u.fill_buffer(buffer.as_mut_slice())?;
Decode::decode_owned(buffer).map_err(|_| arbitrary::Error::IncorrectFormat)
}
}
#[cfg(any(test, feature = "arbitrary"))]
impl proptest::prelude::Arbitrary for $name {
type Parameters = ();
type Strategy = proptest::strategy::Map<
proptest::collection::VecStrategy<<u8 as proptest::arbitrary::Arbitrary>::Strategy>,
fn(Vec<u8>) -> Self,
>;
fn arbitrary_with(args: Self::Parameters) -> Self::Strategy {
use proptest::strategy::Strategy;
proptest::collection::vec(proptest::arbitrary::any_with::<u8>(args), $size)
.prop_map(move |vec| Decode::decode_owned(vec).unwrap())
}
}
)+
};
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/db-api/src/unwind.rs | crates/storage/db-api/src/unwind.rs | use crate::{cursor::DbCursorRO, table::Table, transaction::DbTxMut};
use reth_storage_errors::db::DatabaseError;
use std::ops::RangeBounds;
/// Extension trait for [`DbTxMut`] that provides unwind functionality.
pub trait DbTxUnwindExt: DbTxMut {
/// Unwind table by some number key.
/// Returns number of rows unwound.
///
/// Note: Key is not inclusive and specified key would stay in db.
#[inline]
fn unwind_table_by_num<T>(&self, num: u64) -> Result<usize, DatabaseError>
where
T: Table<Key = u64>,
{
self.unwind_table::<T, _>(num, |key| key)
}
/// Unwind the table to a provided number key.
/// Returns number of rows unwound.
///
/// Note: Key is not inclusive and specified key would stay in db.
fn unwind_table<T, F>(&self, key: u64, mut selector: F) -> Result<usize, DatabaseError>
where
T: Table,
F: FnMut(T::Key) -> u64,
{
let mut cursor = self.cursor_write::<T>()?;
let mut reverse_walker = cursor.walk_back(None)?;
let mut deleted = 0;
while let Some(Ok((entry_key, _))) = reverse_walker.next() {
if selector(entry_key.clone()) <= key {
break
}
reverse_walker.delete_current()?;
deleted += 1;
}
Ok(deleted)
}
/// Unwind a table forward by a [`Walker`][crate::cursor::Walker] on another table.
///
/// Note: Range is inclusive and first key in the range is removed.
fn unwind_table_by_walker<T1, T2>(
&self,
range: impl RangeBounds<T1::Key>,
) -> Result<(), DatabaseError>
where
T1: Table,
T2: Table<Key = T1::Value>,
{
let mut cursor = self.cursor_write::<T1>()?;
let mut walker = cursor.walk_range(range)?;
while let Some((_, value)) = walker.next().transpose()? {
self.delete::<T2>(value, None)?;
}
Ok(())
}
}
impl<T> DbTxUnwindExt for T where T: DbTxMut {}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/db-api/src/mock.rs | crates/storage/db-api/src/mock.rs | //! Mock database
use crate::{
common::{IterPairResult, PairResult, ValueOnlyResult},
cursor::{
DbCursorRO, DbCursorRW, DbDupCursorRO, DbDupCursorRW, DupWalker, RangeWalker,
ReverseWalker, Walker,
},
database::Database,
database_metrics::DatabaseMetrics,
table::{DupSort, Encode, Table, TableImporter},
transaction::{DbTx, DbTxMut},
DatabaseError,
};
use core::ops::Bound;
use std::{collections::BTreeMap, ops::RangeBounds};
/// Mock database used for testing with inner `BTreeMap` structure
#[derive(Clone, Debug, Default)]
pub struct DatabaseMock {
/// Main data. TODO (Make it table aware)
pub data: BTreeMap<Vec<u8>, Vec<u8>>,
}
impl Database for DatabaseMock {
type TX = TxMock;
type TXMut = TxMock;
fn tx(&self) -> Result<Self::TX, DatabaseError> {
Ok(TxMock::default())
}
fn tx_mut(&self) -> Result<Self::TXMut, DatabaseError> {
Ok(TxMock::default())
}
}
impl DatabaseMetrics for DatabaseMock {}
/// Mock read only tx
#[derive(Debug, Clone, Default)]
pub struct TxMock {
/// Table representation
_table: BTreeMap<Vec<u8>, Vec<u8>>,
}
impl DbTx for TxMock {
type Cursor<T: Table> = CursorMock;
type DupCursor<T: DupSort> = CursorMock;
fn get<T: Table>(&self, _key: T::Key) -> Result<Option<T::Value>, DatabaseError> {
Ok(None)
}
fn get_by_encoded_key<T: Table>(
&self,
_key: &<T::Key as Encode>::Encoded,
) -> Result<Option<T::Value>, DatabaseError> {
Ok(None)
}
fn commit(self) -> Result<bool, DatabaseError> {
Ok(true)
}
fn abort(self) {}
fn cursor_read<T: Table>(&self) -> Result<Self::Cursor<T>, DatabaseError> {
Ok(CursorMock { _cursor: 0 })
}
fn cursor_dup_read<T: DupSort>(&self) -> Result<Self::DupCursor<T>, DatabaseError> {
Ok(CursorMock { _cursor: 0 })
}
fn entries<T: Table>(&self) -> Result<usize, DatabaseError> {
Ok(self._table.len())
}
fn disable_long_read_transaction_safety(&mut self) {}
}
impl DbTxMut for TxMock {
type CursorMut<T: Table> = CursorMock;
type DupCursorMut<T: DupSort> = CursorMock;
fn put<T: Table>(&self, _key: T::Key, _value: T::Value) -> Result<(), DatabaseError> {
Ok(())
}
fn delete<T: Table>(
&self,
_key: T::Key,
_value: Option<T::Value>,
) -> Result<bool, DatabaseError> {
Ok(true)
}
fn clear<T: Table>(&self) -> Result<(), DatabaseError> {
Ok(())
}
fn cursor_write<T: Table>(&self) -> Result<Self::CursorMut<T>, DatabaseError> {
Ok(CursorMock { _cursor: 0 })
}
fn cursor_dup_write<T: DupSort>(&self) -> Result<Self::DupCursorMut<T>, DatabaseError> {
Ok(CursorMock { _cursor: 0 })
}
}
impl TableImporter for TxMock {}
/// Cursor that iterates over table
#[derive(Debug)]
pub struct CursorMock {
_cursor: u32,
}
impl<T: Table> DbCursorRO<T> for CursorMock {
fn first(&mut self) -> PairResult<T> {
Ok(None)
}
fn seek_exact(&mut self, _key: T::Key) -> PairResult<T> {
Ok(None)
}
fn seek(&mut self, _key: T::Key) -> PairResult<T> {
Ok(None)
}
fn next(&mut self) -> PairResult<T> {
Ok(None)
}
fn prev(&mut self) -> PairResult<T> {
Ok(None)
}
fn last(&mut self) -> PairResult<T> {
Ok(None)
}
fn current(&mut self) -> PairResult<T> {
Ok(None)
}
fn walk(&mut self, start_key: Option<T::Key>) -> Result<Walker<'_, T, Self>, DatabaseError> {
let start: IterPairResult<T> = match start_key {
Some(key) => <Self as DbCursorRO<T>>::seek(self, key).transpose(),
None => <Self as DbCursorRO<T>>::first(self).transpose(),
};
Ok(Walker::new(self, start))
}
fn walk_range(
&mut self,
range: impl RangeBounds<T::Key>,
) -> Result<RangeWalker<'_, T, Self>, DatabaseError> {
let start_key = match range.start_bound() {
Bound::Included(key) | Bound::Excluded(key) => Some((*key).clone()),
Bound::Unbounded => None,
};
let end_key = match range.end_bound() {
Bound::Included(key) | Bound::Excluded(key) => Bound::Included((*key).clone()),
Bound::Unbounded => Bound::Unbounded,
};
let start: IterPairResult<T> = match start_key {
Some(key) => <Self as DbCursorRO<T>>::seek(self, key).transpose(),
None => <Self as DbCursorRO<T>>::first(self).transpose(),
};
Ok(RangeWalker::new(self, start, end_key))
}
fn walk_back(
&mut self,
start_key: Option<T::Key>,
) -> Result<ReverseWalker<'_, T, Self>, DatabaseError> {
let start: IterPairResult<T> = match start_key {
Some(key) => <Self as DbCursorRO<T>>::seek(self, key).transpose(),
None => <Self as DbCursorRO<T>>::last(self).transpose(),
};
Ok(ReverseWalker::new(self, start))
}
}
impl<T: DupSort> DbDupCursorRO<T> for CursorMock {
fn next_dup(&mut self) -> PairResult<T> {
Ok(None)
}
fn next_no_dup(&mut self) -> PairResult<T> {
Ok(None)
}
fn next_dup_val(&mut self) -> ValueOnlyResult<T> {
Ok(None)
}
fn seek_by_key_subkey(
&mut self,
_key: <T as Table>::Key,
_subkey: <T as DupSort>::SubKey,
) -> ValueOnlyResult<T> {
Ok(None)
}
fn walk_dup(
&mut self,
_key: Option<<T>::Key>,
_subkey: Option<<T as DupSort>::SubKey>,
) -> Result<DupWalker<'_, T, Self>, DatabaseError> {
Ok(DupWalker { cursor: self, start: None })
}
}
impl<T: Table> DbCursorRW<T> for CursorMock {
fn upsert(
&mut self,
_key: <T as Table>::Key,
_value: &<T as Table>::Value,
) -> Result<(), DatabaseError> {
Ok(())
}
fn insert(
&mut self,
_key: <T as Table>::Key,
_value: &<T as Table>::Value,
) -> Result<(), DatabaseError> {
Ok(())
}
fn append(
&mut self,
_key: <T as Table>::Key,
_value: &<T as Table>::Value,
) -> Result<(), DatabaseError> {
Ok(())
}
fn delete_current(&mut self) -> Result<(), DatabaseError> {
Ok(())
}
}
impl<T: DupSort> DbDupCursorRW<T> for CursorMock {
fn delete_current_duplicates(&mut self) -> Result<(), DatabaseError> {
Ok(())
}
fn append_dup(&mut self, _key: <T>::Key, _value: <T>::Value) -> Result<(), DatabaseError> {
Ok(())
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/db-api/src/common.rs | crates/storage/db-api/src/common.rs | use crate::{table::*, DatabaseError};
/// A key-value pair for table `T`.
pub type KeyValue<T> = (<T as Table>::Key, <T as Table>::Value);
/// A fallible key-value pair that may or may not exist.
///
/// The `Result` represents that the operation might fail, while the `Option` represents whether or
/// not the entry exists.
pub type PairResult<T> = Result<Option<KeyValue<T>>, DatabaseError>;
/// A key-value pair coming from an iterator.
///
/// The `Result` represents that the operation might fail, while the `Option` represents whether or
/// not there is another entry.
pub type IterPairResult<T> = Option<Result<KeyValue<T>, DatabaseError>>;
/// A value only result for table `T`.
pub type ValueOnlyResult<T> = Result<Option<<T as Table>::Value>, DatabaseError>;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/db-api/src/database_metrics.rs | crates/storage/db-api/src/database_metrics.rs | use metrics::{counter, gauge, histogram, Label};
use std::sync::Arc;
/// Represents a type that can report metrics, used mainly with the database. The `report_metrics`
/// method can be used as a prometheus hook.
pub trait DatabaseMetrics {
/// Reports metrics for the database.
fn report_metrics(&self) {
for (name, value, labels) in self.gauge_metrics() {
gauge!(name, labels).set(value);
}
for (name, value, labels) in self.counter_metrics() {
counter!(name, labels).increment(value);
}
for (name, value, labels) in self.histogram_metrics() {
histogram!(name, labels).record(value);
}
}
/// Returns a list of [Gauge](metrics::Gauge) metrics for the database.
fn gauge_metrics(&self) -> Vec<(&'static str, f64, Vec<Label>)> {
vec![]
}
/// Returns a list of [Counter](metrics::Counter) metrics for the database.
fn counter_metrics(&self) -> Vec<(&'static str, u64, Vec<Label>)> {
vec![]
}
/// Returns a list of [Histogram](metrics::Histogram) metrics for the database.
fn histogram_metrics(&self) -> Vec<(&'static str, f64, Vec<Label>)> {
vec![]
}
}
impl<DB: DatabaseMetrics> DatabaseMetrics for Arc<DB> {
fn report_metrics(&self) {
<DB as DatabaseMetrics>::report_metrics(self)
}
fn gauge_metrics(&self) -> Vec<(&'static str, f64, Vec<Label>)> {
<DB as DatabaseMetrics>::gauge_metrics(self)
}
fn counter_metrics(&self) -> Vec<(&'static str, u64, Vec<Label>)> {
<DB as DatabaseMetrics>::counter_metrics(self)
}
fn histogram_metrics(&self) -> Vec<(&'static str, f64, Vec<Label>)> {
<DB as DatabaseMetrics>::histogram_metrics(self)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/db-api/src/transaction.rs | crates/storage/db-api/src/transaction.rs | use crate::{
cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO, DbDupCursorRW},
table::{DupSort, Encode, Table},
DatabaseError,
};
use std::fmt::Debug;
/// Read only transaction
pub trait DbTx: Debug + Send + Sync {
/// Cursor type for this read-only transaction
type Cursor<T: Table>: DbCursorRO<T> + Send + Sync;
/// `DupCursor` type for this read-only transaction
type DupCursor<T: DupSort>: DbDupCursorRO<T> + DbCursorRO<T> + Send + Sync;
/// Get value by an owned key
fn get<T: Table>(&self, key: T::Key) -> Result<Option<T::Value>, DatabaseError>;
/// Get value by a reference to the encoded key, especially useful for "raw" keys
/// that encode to themselves like Address and B256. Doesn't need to clone a
/// reference key like `get`.
fn get_by_encoded_key<T: Table>(
&self,
key: &<T::Key as Encode>::Encoded,
) -> Result<Option<T::Value>, DatabaseError>;
/// Commit for read only transaction will consume and free transaction and allows
/// freeing of memory pages
fn commit(self) -> Result<bool, DatabaseError>;
/// Aborts transaction
fn abort(self);
/// Iterate over read only values in table.
fn cursor_read<T: Table>(&self) -> Result<Self::Cursor<T>, DatabaseError>;
/// Iterate over read only values in dup sorted table.
fn cursor_dup_read<T: DupSort>(&self) -> Result<Self::DupCursor<T>, DatabaseError>;
/// Returns number of entries in the table.
fn entries<T: Table>(&self) -> Result<usize, DatabaseError>;
/// Disables long-lived read transaction safety guarantees.
fn disable_long_read_transaction_safety(&mut self);
}
/// Read write transaction that allows writing to database
pub trait DbTxMut: Send + Sync {
/// Read-Write Cursor type
type CursorMut<T: Table>: DbCursorRW<T> + DbCursorRO<T> + Send + Sync;
/// Read-Write `DupCursor` type
type DupCursorMut<T: DupSort>: DbDupCursorRW<T>
+ DbCursorRW<T>
+ DbDupCursorRO<T>
+ DbCursorRO<T>
+ Send
+ Sync;
/// Put value to database
fn put<T: Table>(&self, key: T::Key, value: T::Value) -> Result<(), DatabaseError>;
/// Delete value from database
fn delete<T: Table>(&self, key: T::Key, value: Option<T::Value>)
-> Result<bool, DatabaseError>;
/// Clears database.
fn clear<T: Table>(&self) -> Result<(), DatabaseError>;
/// Cursor mut
fn cursor_write<T: Table>(&self) -> Result<Self::CursorMut<T>, DatabaseError>;
/// `DupCursor` mut.
fn cursor_dup_write<T: DupSort>(&self) -> Result<Self::DupCursorMut<T>, DatabaseError>;
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/db-api/src/models/integer_list.rs | crates/storage/db-api/src/models/integer_list.rs | //! Implements [`Compress`] and [`Decompress`] for [`IntegerList`]
use crate::{
table::{Compress, Decompress},
DatabaseError,
};
use bytes::BufMut;
use core::fmt;
use derive_more::Deref;
use roaring::RoaringTreemap;
/// A data structure that uses Roaring Bitmaps to efficiently store a list of integers.
///
/// This structure provides excellent compression while allowing direct access to individual
/// elements without the need for full decompression.
///
/// Key features:
/// - Efficient compression: the underlying Roaring Bitmaps significantly reduce memory usage.
/// - Direct access: elements can be accessed or queried without needing to decode the entire list.
/// - [`RoaringTreemap`] backing: internally backed by [`RoaringTreemap`], which supports 64-bit
/// integers.
#[derive(Clone, PartialEq, Default, Deref)]
pub struct IntegerList(pub RoaringTreemap);
impl fmt::Debug for IntegerList {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("IntegerList")?;
f.debug_list().entries(self.0.iter()).finish()
}
}
impl IntegerList {
/// Creates a new empty [`IntegerList`].
pub fn empty() -> Self {
Self(RoaringTreemap::new())
}
/// Creates an [`IntegerList`] from a list of integers.
///
/// Returns an error if the list is not pre-sorted.
pub fn new(list: impl IntoIterator<Item = u64>) -> Result<Self, IntegerListError> {
RoaringTreemap::from_sorted_iter(list)
.map(Self)
.map_err(|_| IntegerListError::UnsortedInput)
}
/// Creates an [`IntegerList`] from a pre-sorted list of integers.
///
/// # Panics
///
/// Panics if the list is not pre-sorted.
#[inline]
#[track_caller]
pub fn new_pre_sorted(list: impl IntoIterator<Item = u64>) -> Self {
Self::new(list).expect("IntegerList must be pre-sorted and non-empty")
}
/// Appends a list of integers to the current list.
pub fn append(&mut self, list: impl IntoIterator<Item = u64>) -> Result<u64, IntegerListError> {
self.0.append(list).map_err(|_| IntegerListError::UnsortedInput)
}
/// Pushes a new integer to the list.
pub fn push(&mut self, value: u64) -> Result<(), IntegerListError> {
self.0.push(value).then_some(()).ok_or(IntegerListError::UnsortedInput)
}
/// Clears the list.
pub fn clear(&mut self) {
self.0.clear();
}
/// Serializes an [`IntegerList`] into a sequence of bytes.
pub fn to_bytes(&self) -> Vec<u8> {
let mut vec = Vec::with_capacity(self.0.serialized_size());
self.0.serialize_into(&mut vec).expect("not able to encode IntegerList");
vec
}
/// Serializes an [`IntegerList`] into a sequence of bytes.
pub fn to_mut_bytes<B: bytes::BufMut>(&self, buf: &mut B) {
self.0.serialize_into(buf.writer()).unwrap();
}
/// Deserializes a sequence of bytes into a proper [`IntegerList`].
pub fn from_bytes(data: &[u8]) -> Result<Self, IntegerListError> {
RoaringTreemap::deserialize_from(data)
.map(Self)
.map_err(|_| IntegerListError::FailedToDeserialize)
}
}
impl serde::Serialize for IntegerList {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
use serde::ser::SerializeSeq;
let mut seq = serializer.serialize_seq(Some(self.len() as usize))?;
for e in &self.0 {
seq.serialize_element(&e)?;
}
seq.end()
}
}
struct IntegerListVisitor;
impl<'de> serde::de::Visitor<'de> for IntegerListVisitor {
type Value = IntegerList;
fn expecting(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("a usize array")
}
fn visit_seq<E>(self, mut seq: E) -> Result<Self::Value, E::Error>
where
E: serde::de::SeqAccess<'de>,
{
let mut list = IntegerList::empty();
while let Some(item) = seq.next_element()? {
list.push(item).map_err(serde::de::Error::custom)?;
}
Ok(list)
}
}
impl<'de> serde::Deserialize<'de> for IntegerList {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
deserializer.deserialize_byte_buf(IntegerListVisitor)
}
}
#[cfg(any(test, feature = "arbitrary"))]
use arbitrary::{Arbitrary, Unstructured};
#[cfg(any(test, feature = "arbitrary"))]
impl<'a> Arbitrary<'a> for IntegerList {
fn arbitrary(u: &mut Unstructured<'a>) -> Result<Self, arbitrary::Error> {
let mut nums: Vec<u64> = Vec::arbitrary(u)?;
nums.sort_unstable();
Self::new(nums).map_err(|_| arbitrary::Error::IncorrectFormat)
}
}
/// Primitives error type.
#[derive(Debug, derive_more::Display, derive_more::Error)]
pub enum IntegerListError {
/// The provided input is unsorted.
#[display("the provided input is unsorted")]
UnsortedInput,
/// Failed to deserialize data into type.
#[display("failed to deserialize data into type")]
FailedToDeserialize,
}
impl Compress for IntegerList {
type Compressed = Vec<u8>;
fn compress(self) -> Self::Compressed {
self.to_bytes()
}
fn compress_to_buf<B: bytes::BufMut + AsMut<[u8]>>(&self, buf: &mut B) {
self.to_mut_bytes(buf)
}
}
impl Decompress for IntegerList {
fn decompress(value: &[u8]) -> Result<Self, DatabaseError> {
Self::from_bytes(value).map_err(|_| DatabaseError::Decode)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn empty_list() {
assert_eq!(IntegerList::empty().len(), 0);
assert_eq!(IntegerList::new_pre_sorted(std::iter::empty()).len(), 0);
}
#[test]
fn test_integer_list() {
let original_list = [1, 2, 3];
let ef_list = IntegerList::new(original_list).unwrap();
assert_eq!(ef_list.iter().collect::<Vec<_>>(), original_list);
}
#[test]
fn test_integer_list_serialization() {
let original_list = [1, 2, 3];
let ef_list = IntegerList::new(original_list).unwrap();
let blist = ef_list.to_bytes();
assert_eq!(IntegerList::from_bytes(&blist).unwrap(), ef_list)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/db-api/src/models/sharded_key.rs | crates/storage/db-api/src/models/sharded_key.rs | //! Sharded key
use crate::{
table::{Decode, Encode},
DatabaseError,
};
use alloy_primitives::BlockNumber;
use serde::{Deserialize, Serialize};
use std::hash::Hash;
/// Number of indices in one shard.
pub const NUM_OF_INDICES_IN_SHARD: usize = 2_000;
/// Sometimes data can be too big to be saved for a single key. This helps out by dividing the data
/// into different shards. Example:
///
/// `Address | 200` -> data is from block 0 to 200.
///
/// `Address | 300` -> data is from block 201 to 300.
#[derive(Debug, Default, Clone, Eq, PartialEq, Ord, PartialOrd, Serialize, Deserialize, Hash)]
pub struct ShardedKey<T> {
/// The key for this type.
pub key: T,
/// Highest block number to which `value` is related to.
pub highest_block_number: BlockNumber,
}
impl<T> AsRef<Self> for ShardedKey<T> {
fn as_ref(&self) -> &Self {
self
}
}
impl<T> ShardedKey<T> {
/// Creates a new `ShardedKey<T>`.
pub const fn new(key: T, highest_block_number: BlockNumber) -> Self {
Self { key, highest_block_number }
}
/// Creates a new key with the highest block number set to maximum.
/// This is useful when we want to search the last value for a given key.
pub const fn last(key: T) -> Self {
Self { key, highest_block_number: u64::MAX }
}
}
impl<T: Encode> Encode for ShardedKey<T> {
type Encoded = Vec<u8>;
fn encode(self) -> Self::Encoded {
let mut buf: Vec<u8> = Encode::encode(self.key).into();
buf.extend_from_slice(&self.highest_block_number.to_be_bytes());
buf
}
}
impl<T: Decode> Decode for ShardedKey<T> {
fn decode(value: &[u8]) -> Result<Self, DatabaseError> {
let (key, highest_tx_number) = value.split_last_chunk().ok_or(DatabaseError::Decode)?;
let key = T::decode(key)?;
let highest_tx_number = u64::from_be_bytes(*highest_tx_number);
Ok(Self::new(key, highest_tx_number))
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/db-api/src/models/storage_sharded_key.rs | crates/storage/db-api/src/models/storage_sharded_key.rs | //! Storage sharded key
use crate::{
table::{Decode, Encode},
DatabaseError,
};
use alloy_primitives::{Address, BlockNumber, B256};
use derive_more::AsRef;
use serde::{Deserialize, Serialize};
use super::ShardedKey;
/// Number of indices in one shard.
pub const NUM_OF_INDICES_IN_SHARD: usize = 2_000;
/// The size of [`StorageShardedKey`] encode bytes.
/// The fields are: 20-byte address, 32-byte key, and 8-byte block number
const STORAGE_SHARD_KEY_BYTES_SIZE: usize = 20 + 32 + 8;
/// Sometimes data can be too big to be saved for a single key. This helps out by dividing the data
/// into different shards. Example:
///
/// `Address | StorageKey | 200` -> data is from block 0 to 200.
///
/// `Address | StorageKey | 300` -> data is from block 201 to 300.
#[derive(
Debug, Default, Clone, Eq, Ord, PartialOrd, PartialEq, AsRef, Serialize, Deserialize, Hash,
)]
pub struct StorageShardedKey {
/// Storage account address.
pub address: Address,
/// Storage slot with highest block number.
#[as_ref]
pub sharded_key: ShardedKey<B256>,
}
impl StorageShardedKey {
/// Creates a new `StorageShardedKey`.
pub const fn new(
address: Address,
storage_key: B256,
highest_block_number: BlockNumber,
) -> Self {
Self { address, sharded_key: ShardedKey { key: storage_key, highest_block_number } }
}
/// Creates a new key with the highest block number set to maximum.
/// This is useful when we want to search the last value for a given key.
pub const fn last(address: Address, storage_key: B256) -> Self {
Self {
address,
sharded_key: ShardedKey { key: storage_key, highest_block_number: u64::MAX },
}
}
}
impl Encode for StorageShardedKey {
type Encoded = Vec<u8>;
fn encode(self) -> Self::Encoded {
let mut buf: Vec<u8> = Vec::with_capacity(STORAGE_SHARD_KEY_BYTES_SIZE);
buf.extend_from_slice(&Encode::encode(self.address));
buf.extend_from_slice(&Encode::encode(self.sharded_key.key));
buf.extend_from_slice(&self.sharded_key.highest_block_number.to_be_bytes());
buf
}
}
impl Decode for StorageShardedKey {
fn decode(value: &[u8]) -> Result<Self, DatabaseError> {
if value.len() != STORAGE_SHARD_KEY_BYTES_SIZE {
return Err(DatabaseError::Decode)
}
let block_num_index = value.len() - 8;
let highest_block_number = u64::from_be_bytes(
value[block_num_index..].try_into().map_err(|_| DatabaseError::Decode)?,
);
let address = Address::decode(&value[..20])?;
let storage_key = B256::decode(&value[20..52])?;
Ok(Self { address, sharded_key: ShardedKey::new(storage_key, highest_block_number) })
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/db-api/src/models/mod.rs | crates/storage/db-api/src/models/mod.rs | //! Implements data structures specific to the database
use crate::{
table::{Compress, Decode, Decompress, Encode},
DatabaseError,
};
use alloy_consensus::Header;
use alloy_primitives::{Address, Bytes, Log, B256, U256};
use reth_codecs::{add_arbitrary_tests, Compact};
use reth_ethereum_primitives::{Receipt, TransactionSigned, TxType};
use reth_primitives_traits::{Account, Bytecode, StorageEntry};
use reth_prune_types::{PruneCheckpoint, PruneSegment};
use reth_stages_types::StageCheckpoint;
use reth_trie_common::{StoredNibbles, StoredNibblesSubKey, *};
use seismic_alloy_genesis::GenesisAccount;
use serde::{Deserialize, Serialize};
pub mod accounts;
pub mod blocks;
pub mod integer_list;
pub mod sharded_key;
pub mod storage_sharded_key;
pub use accounts::*;
pub use blocks::*;
pub use integer_list::IntegerList;
pub use reth_db_models::{
AccountBeforeTx, ClientVersion, StaticFileBlockWithdrawals, StoredBlockBodyIndices,
StoredBlockWithdrawals,
};
pub use sharded_key::ShardedKey;
/// Macro that implements [`Encode`] and [`Decode`] for uint types.
macro_rules! impl_uints {
($($name:tt),+) => {
$(
impl Encode for $name {
type Encoded = [u8; std::mem::size_of::<$name>()];
fn encode(self) -> Self::Encoded {
self.to_be_bytes()
}
}
impl Decode for $name {
fn decode(value: &[u8]) -> Result<Self, $crate::DatabaseError> {
Ok(
$name::from_be_bytes(
value.try_into().map_err(|_| $crate::DatabaseError::Decode)?
)
)
}
}
)+
};
}
impl_uints!(u64, u32, u16, u8);
impl Encode for Vec<u8> {
type Encoded = Self;
fn encode(self) -> Self::Encoded {
self
}
}
impl Decode for Vec<u8> {
fn decode(value: &[u8]) -> Result<Self, DatabaseError> {
Ok(value.to_vec())
}
fn decode_owned(value: Vec<u8>) -> Result<Self, DatabaseError> {
Ok(value)
}
}
impl Encode for Address {
type Encoded = [u8; 20];
fn encode(self) -> Self::Encoded {
self.0 .0
}
}
impl Decode for Address {
fn decode(value: &[u8]) -> Result<Self, DatabaseError> {
Ok(Self::from_slice(value))
}
}
impl Encode for B256 {
type Encoded = [u8; 32];
fn encode(self) -> Self::Encoded {
self.0
}
}
impl Decode for B256 {
fn decode(value: &[u8]) -> Result<Self, DatabaseError> {
Ok(Self::new(value.try_into().map_err(|_| DatabaseError::Decode)?))
}
}
impl Encode for String {
type Encoded = Vec<u8>;
fn encode(self) -> Self::Encoded {
self.into_bytes()
}
}
impl Decode for String {
fn decode(value: &[u8]) -> Result<Self, DatabaseError> {
Self::decode_owned(value.to_vec())
}
fn decode_owned(value: Vec<u8>) -> Result<Self, DatabaseError> {
Self::from_utf8(value).map_err(|_| DatabaseError::Decode)
}
}
impl Encode for StoredNibbles {
type Encoded = Vec<u8>;
// Delegate to the Compact implementation
fn encode(self) -> Self::Encoded {
// NOTE: This used to be `to_compact`, but all it does is append the bytes to the buffer,
// so we can just use the implementation of `Into<Vec<u8>>` to reuse the buffer.
self.0.to_vec()
}
}
impl Decode for StoredNibbles {
fn decode(value: &[u8]) -> Result<Self, DatabaseError> {
Ok(Self::from_compact(value, value.len()).0)
}
}
impl Encode for StoredNibblesSubKey {
type Encoded = Vec<u8>;
// Delegate to the Compact implementation
fn encode(self) -> Self::Encoded {
let mut buf = Vec::with_capacity(65);
self.to_compact(&mut buf);
buf
}
}
impl Decode for StoredNibblesSubKey {
fn decode(value: &[u8]) -> Result<Self, DatabaseError> {
Ok(Self::from_compact(value, value.len()).0)
}
}
impl Encode for PruneSegment {
type Encoded = [u8; 1];
fn encode(self) -> Self::Encoded {
let mut buf = [0u8];
self.to_compact(&mut buf.as_mut());
buf
}
}
impl Decode for PruneSegment {
fn decode(value: &[u8]) -> Result<Self, DatabaseError> {
Ok(Self::from_compact(value, value.len()).0)
}
}
impl Encode for ClientVersion {
type Encoded = Vec<u8>;
// Delegate to the Compact implementation
fn encode(self) -> Self::Encoded {
let mut buf = vec![];
self.to_compact(&mut buf);
buf
}
}
impl Decode for ClientVersion {
fn decode(value: &[u8]) -> Result<Self, DatabaseError> {
Ok(Self::from_compact(value, value.len()).0)
}
}
/// Implements compression for Compact type.
macro_rules! impl_compression_for_compact {
($($name:ident$(<$($generic:ident),*>)?),+) => {
$(
impl$(<$($generic: core::fmt::Debug + Send + Sync + Compact),*>)? Compress for $name$(<$($generic),*>)? {
type Compressed = Vec<u8>;
fn compress_to_buf<B: bytes::BufMut + AsMut<[u8]>>(&self, buf: &mut B) {
let _ = Compact::to_compact(self, buf);
}
}
impl$(<$($generic: core::fmt::Debug + Send + Sync + Compact),*>)? Decompress for $name$(<$($generic),*>)? {
fn decompress(value: &[u8]) -> Result<$name$(<$($generic),*>)?, $crate::DatabaseError> {
let (obj, _) = Compact::from_compact(value, value.len());
Ok(obj)
}
}
)+
};
}
impl_compression_for_compact!(
Bytes,
Header,
Account,
Log,
Receipt<T>,
TxType,
StorageEntry,
BranchNodeCompact,
StoredNibbles,
StoredNibblesSubKey,
StorageTrieEntry,
StoredBlockBodyIndices,
StoredBlockOmmers<H>,
StoredBlockWithdrawals,
StaticFileBlockWithdrawals,
Bytecode,
AccountBeforeTx,
TransactionSigned,
CompactU256,
StageCheckpoint,
PruneCheckpoint,
ClientVersion,
// Non-DB
GenesisAccount
);
#[cfg(feature = "op")]
mod op {
use super::*;
use reth_optimism_primitives::{OpReceipt, OpTransactionSigned};
impl_compression_for_compact!(OpTransactionSigned, OpReceipt);
}
mod seismic {
use super::*;
use reth_seismic_primitives::{SeismicReceipt, SeismicTransactionSigned};
impl_compression_for_compact!(SeismicTransactionSigned, SeismicReceipt);
}
macro_rules! impl_compression_fixed_compact {
($($name:tt),+) => {
$(
impl Compress for $name {
type Compressed = Vec<u8>;
fn uncompressable_ref(&self) -> Option<&[u8]> {
Some(self.as_ref())
}
fn compress_to_buf<B: bytes::BufMut + AsMut<[u8]>>(&self, buf: &mut B) {
let _ = Compact::to_compact(self, buf);
}
}
impl Decompress for $name {
fn decompress(value: &[u8]) -> Result<$name, $crate::DatabaseError> {
let (obj, _) = Compact::from_compact(&value, value.len());
Ok(obj)
}
}
)+
};
}
impl_compression_fixed_compact!(B256, Address);
/// Adds wrapper structs for some primitive types so they can use `StructFlags` from Compact, when
/// used as pure table values.
macro_rules! add_wrapper_struct {
($(($name:tt, $wrapper:tt)),+) => {
$(
/// Wrapper struct so it can use `StructFlags` from Compact, when used as pure table values.
#[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize, Compact)]
#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))]
#[add_arbitrary_tests(compact)]
pub struct $wrapper(pub $name);
impl From<$name> for $wrapper {
fn from(value: $name) -> Self {
$wrapper(value)
}
}
impl From<$wrapper> for $name {
fn from(value: $wrapper) -> Self {
value.0
}
}
impl std::ops::Deref for $wrapper {
type Target = $name;
fn deref(&self) -> &Self::Target {
&self.0
}
}
)+
};
}
add_wrapper_struct!((U256, CompactU256));
add_wrapper_struct!((u64, CompactU64));
add_wrapper_struct!((ClientVersion, CompactClientVersion));
#[cfg(test)]
mod tests {
// each value in the database has an extra field named flags that encodes metadata about other
// fields in the value, e.g. offset and length.
//
// this check is to ensure we do not inadvertently add too many fields to a struct which would
// expand the flags field and break backwards compatibility
#[test]
fn test_ensure_backwards_compatibility() {
use super::*;
use reth_codecs::{test_utils::UnusedBits, validate_bitflag_backwards_compat};
use reth_primitives_traits::Account;
use reth_prune_types::{PruneCheckpoint, PruneMode, PruneSegment};
use reth_stages_types::{
AccountHashingCheckpoint, CheckpointBlockRange, EntitiesCheckpoint,
ExecutionCheckpoint, HeadersCheckpoint, IndexHistoryCheckpoint, StageCheckpoint,
StageUnitCheckpoint, StorageHashingCheckpoint,
};
assert_eq!(Account::bitflag_encoded_bytes(), 2);
assert_eq!(AccountHashingCheckpoint::bitflag_encoded_bytes(), 1);
assert_eq!(CheckpointBlockRange::bitflag_encoded_bytes(), 1);
assert_eq!(CompactClientVersion::bitflag_encoded_bytes(), 0);
assert_eq!(CompactU256::bitflag_encoded_bytes(), 1);
assert_eq!(CompactU64::bitflag_encoded_bytes(), 1);
assert_eq!(EntitiesCheckpoint::bitflag_encoded_bytes(), 1);
assert_eq!(ExecutionCheckpoint::bitflag_encoded_bytes(), 0);
assert_eq!(HeadersCheckpoint::bitflag_encoded_bytes(), 0);
assert_eq!(IndexHistoryCheckpoint::bitflag_encoded_bytes(), 0);
assert_eq!(PruneCheckpoint::bitflag_encoded_bytes(), 1);
assert_eq!(PruneMode::bitflag_encoded_bytes(), 1);
assert_eq!(PruneSegment::bitflag_encoded_bytes(), 1);
assert_eq!(Receipt::bitflag_encoded_bytes(), 1);
assert_eq!(StageCheckpoint::bitflag_encoded_bytes(), 1);
assert_eq!(StageUnitCheckpoint::bitflag_encoded_bytes(), 1);
assert_eq!(StoredBlockBodyIndices::bitflag_encoded_bytes(), 1);
assert_eq!(StoredBlockWithdrawals::bitflag_encoded_bytes(), 0);
assert_eq!(StorageHashingCheckpoint::bitflag_encoded_bytes(), 1);
validate_bitflag_backwards_compat!(Account, UnusedBits::NotZero);
validate_bitflag_backwards_compat!(AccountHashingCheckpoint, UnusedBits::NotZero);
validate_bitflag_backwards_compat!(CheckpointBlockRange, UnusedBits::Zero);
validate_bitflag_backwards_compat!(CompactClientVersion, UnusedBits::Zero);
validate_bitflag_backwards_compat!(CompactU256, UnusedBits::NotZero);
validate_bitflag_backwards_compat!(CompactU64, UnusedBits::NotZero);
validate_bitflag_backwards_compat!(EntitiesCheckpoint, UnusedBits::Zero);
validate_bitflag_backwards_compat!(ExecutionCheckpoint, UnusedBits::Zero);
validate_bitflag_backwards_compat!(HeadersCheckpoint, UnusedBits::Zero);
validate_bitflag_backwards_compat!(IndexHistoryCheckpoint, UnusedBits::Zero);
validate_bitflag_backwards_compat!(PruneCheckpoint, UnusedBits::NotZero);
validate_bitflag_backwards_compat!(PruneMode, UnusedBits::Zero);
validate_bitflag_backwards_compat!(PruneSegment, UnusedBits::Zero);
validate_bitflag_backwards_compat!(Receipt, UnusedBits::Zero);
validate_bitflag_backwards_compat!(StageCheckpoint, UnusedBits::NotZero);
validate_bitflag_backwards_compat!(StageUnitCheckpoint, UnusedBits::Zero);
validate_bitflag_backwards_compat!(StoredBlockBodyIndices, UnusedBits::Zero);
validate_bitflag_backwards_compat!(StoredBlockWithdrawals, UnusedBits::Zero);
validate_bitflag_backwards_compat!(StorageHashingCheckpoint, UnusedBits::NotZero);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/db-api/src/models/blocks.rs | crates/storage/db-api/src/models/blocks.rs | //! Block related models and types.
use alloy_consensus::Header;
use alloy_primitives::B256;
use reth_codecs::{add_arbitrary_tests, Compact};
use serde::{Deserialize, Serialize};
/// The storage representation of a block's ommers.
///
/// It is stored as the headers of the block's uncles.
#[derive(Debug, Default, Eq, PartialEq, Clone, Serialize, Deserialize)]
#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))]
#[add_arbitrary_tests(compact)]
pub struct StoredBlockOmmers<H = Header> {
/// The block headers of this block's uncles.
pub ommers: Vec<H>,
}
impl<H: Compact> Compact for StoredBlockOmmers<H> {
fn to_compact<B>(&self, buf: &mut B) -> usize
where
B: bytes::BufMut + AsMut<[u8]>,
{
let mut buffer = bytes::BytesMut::new();
self.ommers.to_compact(&mut buffer);
let total_length = buffer.len();
buf.put(buffer);
total_length
}
fn from_compact(buf: &[u8], _len: usize) -> (Self, &[u8]) {
let (ommers, new_buf) = Vec::from_compact(buf, buf.len());
(Self { ommers }, new_buf)
}
}
/// Hash of the block header.
pub type HeaderHash = B256;
#[cfg(test)]
mod tests {
use super::*;
use crate::table::{Compress, Decompress};
#[test]
fn test_ommer() {
let mut ommer = StoredBlockOmmers::default();
ommer.ommers.push(Header::default());
ommer.ommers.push(Header::default());
assert_eq!(ommer.clone(), StoredBlockOmmers::decompress(&ommer.compress()).unwrap());
}
#[test]
fn fuzz_stored_block_ommers() {
fuzz_test_stored_block_ommers(StoredBlockOmmers::default())
}
#[test_fuzz::test_fuzz]
fn fuzz_test_stored_block_ommers(obj: StoredBlockOmmers) {
use reth_codecs::Compact;
let mut buf = vec![];
let len = obj.to_compact(&mut buf);
let (same_obj, _) = StoredBlockOmmers::from_compact(buf.as_ref(), len);
assert_eq!(obj, same_obj);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/db-api/src/models/accounts.rs | crates/storage/db-api/src/models/accounts.rs | //! Account related models and types.
use std::ops::{Range, RangeInclusive};
use crate::{
impl_fixed_arbitrary,
table::{Decode, Encode},
DatabaseError,
};
use alloy_primitives::{Address, BlockNumber, StorageKey};
use serde::{Deserialize, Serialize};
/// [`BlockNumber`] concatenated with [`Address`].
///
/// Since it's used as a key, it isn't compressed when encoding it.
#[derive(
Debug, Default, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Ord, PartialOrd, Hash,
)]
pub struct BlockNumberAddress(pub (BlockNumber, Address));
impl BlockNumberAddress {
/// Create a new Range from `start` to `end`
///
/// Note: End is inclusive
pub fn range(range: RangeInclusive<BlockNumber>) -> Range<Self> {
(*range.start(), Address::ZERO).into()..(*range.end() + 1, Address::ZERO).into()
}
/// Return the block number
pub const fn block_number(&self) -> BlockNumber {
self.0 .0
}
/// Return the address
pub const fn address(&self) -> Address {
self.0 .1
}
/// Consumes `Self` and returns [`BlockNumber`], [`Address`]
pub const fn take(self) -> (BlockNumber, Address) {
(self.0 .0, self.0 .1)
}
}
impl From<(BlockNumber, Address)> for BlockNumberAddress {
fn from(tpl: (u64, Address)) -> Self {
Self(tpl)
}
}
impl Encode for BlockNumberAddress {
type Encoded = [u8; 28];
fn encode(self) -> Self::Encoded {
let block_number = self.0 .0;
let address = self.0 .1;
let mut buf = [0u8; 28];
buf[..8].copy_from_slice(&block_number.to_be_bytes());
buf[8..].copy_from_slice(address.as_slice());
buf
}
}
impl Decode for BlockNumberAddress {
fn decode(value: &[u8]) -> Result<Self, DatabaseError> {
let num = u64::from_be_bytes(value[..8].try_into().map_err(|_| DatabaseError::Decode)?);
let hash = Address::from_slice(&value[8..]);
Ok(Self((num, hash)))
}
}
/// [`Address`] concatenated with [`StorageKey`]. Used by `reth_etl` and history stages.
///
/// Since it's used as a key, it isn't compressed when encoding it.
#[derive(
Debug, Default, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Ord, PartialOrd, Hash,
)]
pub struct AddressStorageKey(pub (Address, StorageKey));
impl Encode for AddressStorageKey {
type Encoded = [u8; 52];
fn encode(self) -> Self::Encoded {
let address = self.0 .0;
let storage_key = self.0 .1;
let mut buf = [0u8; 52];
buf[..20].copy_from_slice(address.as_slice());
buf[20..].copy_from_slice(storage_key.as_slice());
buf
}
}
impl Decode for AddressStorageKey {
fn decode(value: &[u8]) -> Result<Self, DatabaseError> {
let address = Address::from_slice(&value[..20]);
let storage_key = StorageKey::from_slice(&value[20..]);
Ok(Self((address, storage_key)))
}
}
impl_fixed_arbitrary!((BlockNumberAddress, 28), (AddressStorageKey, 52));
#[cfg(test)]
mod tests {
use super::*;
use alloy_primitives::address;
use rand::{rng, Rng};
#[test]
fn test_block_number_address() {
let num = 1u64;
let hash = address!("0xba5e000000000000000000000000000000000000");
let key = BlockNumberAddress((num, hash));
let mut bytes = [0u8; 28];
bytes[..8].copy_from_slice(&num.to_be_bytes());
bytes[8..].copy_from_slice(hash.as_slice());
let encoded = Encode::encode(key);
assert_eq!(encoded, bytes);
let decoded: BlockNumberAddress = Decode::decode(&encoded).unwrap();
assert_eq!(decoded, key);
}
#[test]
fn test_block_number_address_rand() {
let mut bytes = [0u8; 28];
rng().fill(bytes.as_mut_slice());
let key = BlockNumberAddress::arbitrary(&mut Unstructured::new(&bytes)).unwrap();
assert_eq!(bytes, Encode::encode(key));
}
#[test]
fn test_address_storage_key() {
let storage_key = StorageKey::random();
let address = address!("0xba5e000000000000000000000000000000000000");
let key = AddressStorageKey((address, storage_key));
let mut bytes = [0u8; 52];
bytes[..20].copy_from_slice(address.as_slice());
bytes[20..].copy_from_slice(storage_key.as_slice());
let encoded = Encode::encode(key);
assert_eq!(encoded, bytes);
let decoded: AddressStorageKey = Decode::decode(&encoded).unwrap();
assert_eq!(decoded, key);
}
#[test]
fn test_address_storage_key_rand() {
let mut bytes = [0u8; 52];
rng().fill(bytes.as_mut_slice());
let key = AddressStorageKey::arbitrary(&mut Unstructured::new(&bytes)).unwrap();
assert_eq!(bytes, Encode::encode(key));
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/db-api/src/tables/raw.rs | crates/storage/db-api/src/tables/raw.rs | use crate::{
table::{Compress, Decode, Decompress, DupSort, Encode, Key, Table, Value},
DatabaseError,
};
use serde::{Deserialize, Serialize};
/// Tuple with `RawKey<T::Key>` and `RawValue<T::Value>`.
pub type TableRawRow<T> = (RawKey<<T as Table>::Key>, RawValue<<T as Table>::Value>);
/// Raw table that can be used to access any table and its data in raw mode.
/// This is useful for delayed decoding/encoding of data.
#[derive(Default, Copy, Clone, Debug)]
pub struct RawTable<T: Table> {
phantom: std::marker::PhantomData<T>,
}
impl<T: Table> Table for RawTable<T> {
const NAME: &'static str = T::NAME;
const DUPSORT: bool = false;
type Key = RawKey<T::Key>;
type Value = RawValue<T::Value>;
}
/// Raw `DupSort` table that can be used to access any table and its data in raw mode.
/// This is useful for delayed decoding/encoding of data.
#[derive(Default, Copy, Clone, Debug)]
pub struct RawDupSort<T: DupSort> {
phantom: std::marker::PhantomData<T>,
}
impl<T: DupSort> Table for RawDupSort<T> {
const NAME: &'static str = T::NAME;
const DUPSORT: bool = true;
type Key = RawKey<T::Key>;
type Value = RawValue<T::Value>;
}
impl<T: DupSort> DupSort for RawDupSort<T> {
type SubKey = RawKey<T::SubKey>;
}
/// Raw table key.
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
pub struct RawKey<K: Key> {
/// Inner encoded key
key: Vec<u8>,
_phantom: std::marker::PhantomData<K>,
}
impl<K: Key> RawKey<K> {
/// Create new raw key.
pub fn new(key: K) -> Self {
Self { key: K::encode(key).into(), _phantom: std::marker::PhantomData }
}
/// Creates a raw key from an existing `Vec`. Useful when we already have the encoded
/// key.
pub const fn from_vec(vec: Vec<u8>) -> Self {
Self { key: vec, _phantom: std::marker::PhantomData }
}
/// Returns the decoded value.
pub fn key(&self) -> Result<K, DatabaseError> {
K::decode(&self.key)
}
/// Returns the raw key as seen on the database.
pub const fn raw_key(&self) -> &Vec<u8> {
&self.key
}
/// Consumes [`Self`] and returns the inner raw key.
pub fn into_key(self) -> Vec<u8> {
self.key
}
}
impl<K: Key> From<K> for RawKey<K> {
fn from(key: K) -> Self {
Self::new(key)
}
}
impl AsRef<[u8]> for RawKey<Vec<u8>> {
fn as_ref(&self) -> &[u8] {
&self.key
}
}
// Encode
impl<K: Key> Encode for RawKey<K> {
type Encoded = Vec<u8>;
fn encode(self) -> Self::Encoded {
self.key
}
}
// Decode
impl<K: Key> Decode for RawKey<K> {
fn decode(value: &[u8]) -> Result<Self, DatabaseError> {
Ok(Self { key: value.to_vec(), _phantom: std::marker::PhantomData })
}
fn decode_owned(value: Vec<u8>) -> Result<Self, DatabaseError> {
Ok(Self { key: value, _phantom: std::marker::PhantomData })
}
}
/// Raw table value.
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Serialize, Ord, Hash)]
pub struct RawValue<V: Value> {
/// Inner compressed value
value: Vec<u8>,
#[serde(skip)]
_phantom: std::marker::PhantomData<V>,
}
impl<V: Value> RawValue<V> {
/// Create new raw value.
pub fn new(value: V) -> Self {
Self { value: V::compress(value).into(), _phantom: std::marker::PhantomData }
}
/// Creates a raw value from an existing `Vec`. Useful when we already have the encoded
/// value.
pub const fn from_vec(vec: Vec<u8>) -> Self {
Self { value: vec, _phantom: std::marker::PhantomData }
}
/// Returns the decompressed value.
pub fn value(&self) -> Result<V, DatabaseError> {
V::decompress(&self.value)
}
/// Returns the raw value as seen on the database.
pub fn raw_value(&self) -> &[u8] {
&self.value
}
/// Consumes [`Self`] and returns the inner raw value.
pub fn into_value(self) -> Vec<u8> {
self.value
}
}
impl<V: Value> From<V> for RawValue<V> {
fn from(value: V) -> Self {
Self::new(value)
}
}
impl AsRef<[u8]> for RawValue<Vec<u8>> {
fn as_ref(&self) -> &[u8] {
&self.value
}
}
impl<V: Value> Compress for RawValue<V> {
type Compressed = Vec<u8>;
fn uncompressable_ref(&self) -> Option<&[u8]> {
// Already compressed
Some(&self.value)
}
fn compress(self) -> Self::Compressed {
self.value
}
fn compress_to_buf<B: bytes::BufMut + AsMut<[u8]>>(&self, buf: &mut B) {
buf.put_slice(self.value.as_slice())
}
}
impl<V: Value> Decompress for RawValue<V> {
fn decompress(value: &[u8]) -> Result<Self, DatabaseError> {
Ok(Self { value: value.to_vec(), _phantom: std::marker::PhantomData })
}
fn decompress_owned(value: Vec<u8>) -> Result<Self, DatabaseError> {
Ok(Self { value, _phantom: std::marker::PhantomData })
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/db-api/src/tables/mod.rs | crates/storage/db-api/src/tables/mod.rs | //! Tables and data models.
//!
//! # Overview
//!
//! This module defines the tables in reth, as well as some table-related abstractions:
//!
//! - [`codecs`] integrates different codecs into [`Encode`] and [`Decode`]
//! - [`models`](crate::models) defines the values written to tables
//!
//! # Database Tour
//!
//! TODO(onbjerg): Find appropriate format for this...
pub mod codecs;
mod raw;
pub use raw::{RawDupSort, RawKey, RawTable, RawValue, TableRawRow};
use crate::{
models::{
accounts::BlockNumberAddress,
blocks::{HeaderHash, StoredBlockOmmers},
storage_sharded_key::StorageShardedKey,
AccountBeforeTx, ClientVersion, CompactU256, IntegerList, ShardedKey,
StoredBlockBodyIndices, StoredBlockWithdrawals,
},
table::{Decode, DupSort, Encode, Table, TableInfo},
};
use alloy_consensus::Header;
use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256};
use reth_ethereum_primitives::{Receipt, TransactionSigned};
use reth_primitives_traits::{Account, Bytecode, StorageEntry};
use reth_prune_types::{PruneCheckpoint, PruneSegment};
use reth_stages_types::StageCheckpoint;
use reth_trie_common::{BranchNodeCompact, StorageTrieEntry, StoredNibbles, StoredNibblesSubKey};
use serde::{Deserialize, Serialize};
use std::fmt;
/// Enum for the types of tables present in libmdbx.
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub enum TableType {
/// key value table
Table,
/// Duplicate key value table
DupSort,
}
/// The general purpose of this is to use with a combination of Tables enum,
/// by implementing a `TableViewer` trait you can operate on db tables in an abstract way.
///
/// # Example
///
/// ```
/// use reth_db_api::{
/// table::{DupSort, Table},
/// TableViewer, Tables,
/// };
///
/// struct MyTableViewer;
///
/// impl TableViewer<()> for MyTableViewer {
/// type Error = &'static str;
///
/// fn view<T: Table>(&self) -> Result<(), Self::Error> {
/// // operate on table in a generic way
/// Ok(())
/// }
///
/// fn view_dupsort<T: DupSort>(&self) -> Result<(), Self::Error> {
/// // operate on a dupsort table in a generic way
/// Ok(())
/// }
/// }
///
/// let viewer = MyTableViewer {};
///
/// let _ = Tables::Headers.view(&viewer);
/// let _ = Tables::Transactions.view(&viewer);
/// ```
pub trait TableViewer<R> {
/// The error type returned by the viewer.
type Error;
/// Calls `view` with the correct table type.
fn view_rt(&self, table: Tables) -> Result<R, Self::Error> {
table.view(self)
}
/// Operate on the table in a generic way.
fn view<T: Table>(&self) -> Result<R, Self::Error>;
/// Operate on the dupsort table in a generic way.
///
/// By default, the `view` function is invoked unless overridden.
fn view_dupsort<T: DupSort>(&self) -> Result<R, Self::Error> {
self.view::<T>()
}
}
/// General trait for defining the set of tables
/// Used to initialize database
pub trait TableSet {
/// Returns an iterator over the tables
fn tables() -> Box<dyn Iterator<Item = Box<dyn TableInfo>>>;
}
/// Defines all the tables in the database.
#[macro_export]
macro_rules! tables {
(@bool) => { false };
(@bool $($t:tt)+) => { true };
(@view $name:ident $v:ident) => { $v.view::<$name>() };
(@view $name:ident $v:ident $_subkey:ty) => { $v.view_dupsort::<$name>() };
(@value_doc $key:ty, $value:ty) => {
concat!("[`", stringify!($value), "`]")
};
// Don't generate links if we have generics
(@value_doc $key:ty, $value:ty, $($generic:ident),*) => {
concat!("`", stringify!($value), "`")
};
($($(#[$attr:meta])* table $name:ident$(<$($generic:ident $(= $default:ty)?),*>)? { type Key = $key:ty; type Value = $value:ty; $(type SubKey = $subkey:ty;)? } )*) => {
// Table marker types.
$(
$(#[$attr])*
///
#[doc = concat!("Marker type representing a database table mapping [`", stringify!($key), "`] to ", tables!(@value_doc $key, $value, $($($generic),*)?), ".")]
$(
#[doc = concat!("\n\nThis table's `DUPSORT` subkey is [`", stringify!($subkey), "`].")]
)?
pub struct $name$(<$($generic $( = $default)?),*>)? {
_private: std::marker::PhantomData<($($($generic,)*)?)>,
}
// Ideally this implementation wouldn't exist, but it is necessary to derive `Debug`
// when a type is generic over `T: Table`. See: https://github.com/rust-lang/rust/issues/26925
impl$(<$($generic),*>)? fmt::Debug for $name$(<$($generic),*>)? {
fn fmt(&self, _: &mut fmt::Formatter<'_>) -> fmt::Result {
unreachable!("this type cannot be instantiated")
}
}
impl$(<$($generic),*>)? $crate::table::Table for $name$(<$($generic),*>)?
where
$value: $crate::table::Value + 'static
$($(,$generic: Send + Sync)*)?
{
const NAME: &'static str = table_names::$name;
const DUPSORT: bool = tables!(@bool $($subkey)?);
type Key = $key;
type Value = $value;
}
$(
impl DupSort for $name {
type SubKey = $subkey;
}
)?
)*
// Tables enum.
/// A table in the database.
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
pub enum Tables {
$(
#[doc = concat!("The [`", stringify!($name), "`] database table.")]
$name,
)*
}
impl Tables {
/// All the tables in the database.
pub const ALL: &'static [Self] = &[$(Self::$name,)*];
/// The number of tables in the database.
pub const COUNT: usize = Self::ALL.len();
/// Returns the name of the table as a string.
pub const fn name(&self) -> &'static str {
match self {
$(
Self::$name => table_names::$name,
)*
}
}
/// Returns `true` if the table is a `DUPSORT` table.
pub const fn is_dupsort(&self) -> bool {
match self {
$(
Self::$name => tables!(@bool $($subkey)?),
)*
}
}
/// The type of the given table in database.
pub const fn table_type(&self) -> TableType {
if self.is_dupsort() {
TableType::DupSort
} else {
TableType::Table
}
}
/// Allows to operate on specific table type
pub fn view<T, R>(&self, visitor: &T) -> Result<R, T::Error>
where
T: ?Sized + TableViewer<R>,
{
match self {
$(
Self::$name => tables!(@view $name visitor $($subkey)?),
)*
}
}
}
impl fmt::Debug for Tables {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(self.name())
}
}
impl fmt::Display for Tables {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.name().fmt(f)
}
}
impl std::str::FromStr for Tables {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
$(
table_names::$name => Ok(Self::$name),
)*
s => Err(format!("unknown table: {s:?}")),
}
}
}
impl TableInfo for Tables {
fn name(&self) -> &'static str {
self.name()
}
fn is_dupsort(&self) -> bool {
self.is_dupsort()
}
}
impl TableSet for Tables {
fn tables() -> Box<dyn Iterator<Item = Box<dyn TableInfo>>> {
Box::new(Self::ALL.iter().map(|table| Box::new(*table) as Box<dyn TableInfo>))
}
}
// Need constants to match on in the `FromStr` implementation.
#[expect(non_upper_case_globals)]
mod table_names {
$(
pub(super) const $name: &'static str = stringify!($name);
)*
}
/// Maps a run-time [`Tables`] enum value to its corresponding compile-time [`Table`] type.
///
/// This is a simpler alternative to [`TableViewer`].
///
/// # Examples
///
/// ```
/// use reth_db_api::{table::Table, Tables, tables_to_generic};
///
/// let table = Tables::Headers;
/// let result = tables_to_generic!(table, |GenericTable| <GenericTable as Table>::NAME);
/// assert_eq!(result, table.name());
/// ```
#[macro_export]
macro_rules! tables_to_generic {
($table:expr, |$generic_name:ident| $e:expr) => {
match $table {
$(
Tables::$name => {
use $crate::tables::$name as $generic_name;
$e
},
)*
}
};
}
};
}
tables! {
/// Stores the header hashes belonging to the canonical chain.
table CanonicalHeaders {
type Key = BlockNumber;
type Value = HeaderHash;
}
/// Stores the total difficulty from a block header.
table HeaderTerminalDifficulties {
type Key = BlockNumber;
type Value = CompactU256;
}
/// Stores the block number corresponding to a header.
table HeaderNumbers {
type Key = BlockHash;
type Value = BlockNumber;
}
/// Stores header bodies.
table Headers<H = Header> {
type Key = BlockNumber;
type Value = H;
}
/// Stores block indices that contains indexes of transaction and the count of them.
///
/// More information about stored indices can be found in the [`StoredBlockBodyIndices`] struct.
table BlockBodyIndices {
type Key = BlockNumber;
type Value = StoredBlockBodyIndices;
}
/// Stores the uncles/ommers of the block.
table BlockOmmers<H = Header> {
type Key = BlockNumber;
type Value = StoredBlockOmmers<H>;
}
/// Stores the block withdrawals.
table BlockWithdrawals {
type Key = BlockNumber;
type Value = StoredBlockWithdrawals;
}
/// Canonical only Stores the transaction body for canonical transactions.
table Transactions<T = TransactionSigned> {
type Key = TxNumber;
type Value = T;
}
/// Stores the mapping of the transaction hash to the transaction number.
table TransactionHashNumbers {
type Key = TxHash;
type Value = TxNumber;
}
/// Stores the mapping of transaction number to the blocks number.
///
/// The key is the highest transaction ID in the block.
table TransactionBlocks {
type Key = TxNumber;
type Value = BlockNumber;
}
/// Canonical only Stores transaction receipts.
table Receipts<R = Receipt> {
type Key = TxNumber;
type Value = R;
}
/// Stores all smart contract bytecodes.
/// There will be multiple accounts that have same bytecode
/// So we would need to introduce reference counter.
/// This will be small optimization on state.
table Bytecodes {
type Key = B256;
type Value = Bytecode;
}
/// Stores the current state of an [`Account`].
table PlainAccountState {
type Key = Address;
type Value = Account;
}
/// Stores the current value of a storage key.
table PlainStorageState {
type Key = Address;
type Value = StorageEntry;
type SubKey = B256;
}
/// Stores pointers to block changeset with changes for each account key.
///
/// Last shard key of the storage will contain `u64::MAX` `BlockNumber`,
/// this would allows us small optimization on db access when change is in plain state.
///
/// Imagine having shards as:
/// * `Address | 100`
/// * `Address | u64::MAX`
///
/// What we need to find is number that is one greater than N. Db `seek` function allows us to fetch
/// the shard that equal or more than asked. For example:
/// * For N=50 we would get first shard.
/// * for N=150 we would get second shard.
/// * If max block number is 200 and we ask for N=250 we would fetch last shard and know that needed entry is in `AccountPlainState`.
/// * If there were no shard we would get `None` entry or entry of different storage key.
///
/// Code example can be found in `reth_provider::HistoricalStateProviderRef`
table AccountsHistory {
type Key = ShardedKey<Address>;
type Value = BlockNumberList;
}
/// Stores pointers to block number changeset with changes for each storage key.
///
/// Last shard key of the storage will contain `u64::MAX` `BlockNumber`,
/// this would allows us small optimization on db access when change is in plain state.
///
/// Imagine having shards as:
/// * `Address | StorageKey | 100`
/// * `Address | StorageKey | u64::MAX`
///
/// What we need to find is number that is one greater than N. Db `seek` function allows us to fetch
/// the shard that equal or more than asked. For example:
/// * For N=50 we would get first shard.
/// * for N=150 we would get second shard.
/// * If max block number is 200 and we ask for N=250 we would fetch last shard and know that needed entry is in `StoragePlainState`.
/// * If there were no shard we would get `None` entry or entry of different storage key.
///
/// Code example can be found in `reth_provider::HistoricalStateProviderRef`
table StoragesHistory {
type Key = StorageShardedKey;
type Value = BlockNumberList;
}
/// Stores the state of an account before a certain transaction changed it.
/// Change on state can be: account is created, selfdestructed, touched while empty
/// or changed balance,nonce.
table AccountChangeSets {
type Key = BlockNumber;
type Value = AccountBeforeTx;
type SubKey = Address;
}
/// Stores the state of a storage key before a certain transaction changed it.
/// If [`StorageEntry::value`] is zero, this means storage was not existing
/// and needs to be removed.
table StorageChangeSets {
type Key = BlockNumberAddress;
type Value = StorageEntry;
type SubKey = B256;
}
/// Stores the current state of an [`Account`] indexed with `keccak256Address`
/// This table is in preparation for merklization and calculation of state root.
/// We are saving whole account data as it is needed for partial update when
/// part of storage is changed. Benefit for merklization is that hashed addresses are sorted.
table HashedAccounts {
type Key = B256;
type Value = Account;
}
/// Stores the current storage values indexed with `keccak256Address` and
/// hash of storage key `keccak256key`.
/// This table is in preparation for merklization and calculation of state root.
/// Benefit for merklization is that hashed addresses/keys are sorted.
table HashedStorages {
type Key = B256;
type Value = StorageEntry;
type SubKey = B256;
}
/// Stores the current state's Merkle Patricia Tree.
table AccountsTrie {
type Key = StoredNibbles;
type Value = BranchNodeCompact;
}
/// From `HashedAddress` => `NibblesSubKey` => Intermediate value
table StoragesTrie {
type Key = B256;
type Value = StorageTrieEntry;
type SubKey = StoredNibblesSubKey;
}
/// Stores the transaction sender for each canonical transaction.
/// It is needed to speed up execution stage and allows fetching signer without doing
/// transaction signed recovery
table TransactionSenders {
type Key = TxNumber;
type Value = Address;
}
/// Stores the highest synced block number and stage-specific checkpoint of each stage.
table StageCheckpoints {
type Key = StageId;
type Value = StageCheckpoint;
}
/// Stores arbitrary data to keep track of a stage first-sync progress.
table StageCheckpointProgresses {
type Key = StageId;
type Value = Vec<u8>;
}
/// Stores the highest pruned block number and prune mode of each prune segment.
table PruneCheckpoints {
type Key = PruneSegment;
type Value = PruneCheckpoint;
}
/// Stores the history of client versions that have accessed the database with write privileges by unix timestamp in seconds.
table VersionHistory {
type Key = u64;
type Value = ClientVersion;
}
/// Stores generic chain state info, like the last finalized block.
table ChainState {
type Key = ChainStateKey;
type Value = BlockNumber;
}
}
/// Keys for the `ChainState` table.
#[derive(Ord, Clone, Eq, PartialOrd, PartialEq, Debug, Deserialize, Serialize, Hash)]
pub enum ChainStateKey {
/// Last finalized block key
LastFinalizedBlock,
/// Last safe block key
LastSafeBlockBlock,
}
impl Encode for ChainStateKey {
type Encoded = [u8; 1];
fn encode(self) -> Self::Encoded {
match self {
Self::LastFinalizedBlock => [0],
Self::LastSafeBlockBlock => [1],
}
}
}
impl Decode for ChainStateKey {
fn decode(value: &[u8]) -> Result<Self, crate::DatabaseError> {
match value {
[0] => Ok(Self::LastFinalizedBlock),
[1] => Ok(Self::LastSafeBlockBlock),
_ => Err(crate::DatabaseError::Decode),
}
}
}
// Alias types.
/// List with transaction numbers.
pub type BlockNumberList = IntegerList;
/// Encoded stage id.
pub type StageId = String;
#[cfg(test)]
mod tests {
use super::*;
use std::str::FromStr;
#[test]
fn parse_table_from_str() {
for table in Tables::ALL {
assert_eq!(format!("{table:?}"), table.name());
assert_eq!(table.to_string(), table.name());
assert_eq!(Tables::from_str(table.name()).unwrap(), *table);
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/db-api/src/tables/codecs/mod.rs | crates/storage/db-api/src/tables/codecs/mod.rs | //! Integrates different codecs into `table::Encode` and `table::Decode`.
pub mod fuzz;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/db-api/src/tables/codecs/fuzz/inputs.rs | crates/storage/db-api/src/tables/codecs/fuzz/inputs.rs | //! Curates the input coming from the fuzzer for certain types.
use crate::models::IntegerList;
use serde::{Deserialize, Serialize};
/// Makes sure that the list provided by the fuzzer is not empty and pre-sorted
#[derive(Debug, Clone, Deserialize, Serialize, Default)]
pub struct IntegerListInput(pub Vec<u64>);
impl From<IntegerListInput> for IntegerList {
fn from(list: IntegerListInput) -> Self {
let mut v = list.0;
v.sort_unstable();
Self::new_pre_sorted(v)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/db-api/src/tables/codecs/fuzz/mod.rs | crates/storage/db-api/src/tables/codecs/fuzz/mod.rs | //! Implements fuzzing targets to be used by test-fuzz
mod inputs;
/// Fuzzer generates a random instance of the object and proceeds to encode and decode it. It then
/// makes sure that it matches the original object.
///
/// Some types like [`IntegerList`] might have some restrictions on how they're fuzzed. For example,
/// the list is assumed to be sorted before creating the object.
macro_rules! impl_fuzzer_with_input {
($(($name:tt, $input_type:tt, $encode:tt, $encode_method:tt, $decode:tt, $decode_method:tt)),+) => {
$(
/// Macro generated module to be used by test-fuzz and `bench` if it applies.
#[expect(non_snake_case)]
#[cfg(any(test, feature = "bench"))]
pub mod $name {
use crate::table;
#[expect(unused_imports)]
use reth_primitives_traits::*;
#[allow(unused_imports)]
use super::inputs::*;
use crate::models::*;
/// Encodes and decodes table types returning its encoded size and the decoded object.
/// This method is used for benchmarking, so its parameter should be the actual type that is being tested.
pub fn encode_and_decode(obj: $name) -> (usize, $name) {
let data = table::$encode::$encode_method(obj);
let size = data.len();
// Some `data` might be a fixed array.
(size, table::$decode::$decode_method(&data).expect("failed to decode"))
}
#[cfg(test)]
#[expect(missing_docs)]
#[test_fuzz::test_fuzz]
pub fn fuzz(obj: $input_type) {
let obj: $name = obj.into();
assert!(encode_and_decode(obj.clone()).1 == obj );
}
#[test]
#[expect(missing_docs)]
pub fn test() {
fuzz($input_type::default())
}
}
)+
};
}
/// Fuzzer generates a random instance of the object and proceeds to encode and decode it. It then
/// makes sure that it matches the original object.
macro_rules! impl_fuzzer_key {
($($name:tt),+) => {
$(
impl_fuzzer_with_input!(($name, $name, Encode, encode, Decode, decode));
)+
};
}
/// Fuzzer generates a random instance of the object and proceeds to compress and decompress it. It
/// then makes sure that it matches the original object.
#[expect(unused_macros)]
macro_rules! impl_fuzzer_value {
($($name:tt),+) => {
$(
impl_fuzzer_value_with_input!($name, $name);
)+
};
}
/// Fuzzer generates a random instance of the object and proceeds to compress and decompress it. It
/// then makes sure that it matches the original object. It supports being fed a different kind of
/// input, as long as it supports `Into<T>`.
macro_rules! impl_fuzzer_value_with_input {
($(($name:tt, $input:tt)),+) => {
$(
impl_fuzzer_with_input!(($name, $input, Compress, compress, Decompress, decompress));
)+
};
}
impl_fuzzer_key!(BlockNumberAddress);
impl_fuzzer_value_with_input!((IntegerList, IntegerListInput));
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/errors/src/db.rs | crates/storage/errors/src/db.rs | use alloc::{
boxed::Box,
format,
string::{String, ToString},
vec::Vec,
};
use core::{
fmt::{Debug, Display},
str::FromStr,
};
/// Database error type.
#[derive(Clone, Debug, PartialEq, Eq, thiserror::Error)]
pub enum DatabaseError {
/// Failed to open the database.
#[error("failed to open the database: {_0}")]
Open(DatabaseErrorInfo),
/// Failed to create a table in the database.
#[error("failed to create a table: {_0}")]
CreateTable(DatabaseErrorInfo),
/// Failed to write a value into a table.
#[error(transparent)]
Write(Box<DatabaseWriteError>),
/// Failed to read a value from a table.
#[error("failed to read a value from a database table: {_0}")]
Read(DatabaseErrorInfo),
/// Failed to delete a `(key, value)` pair from a table.
#[error("database delete error code: {_0}")]
Delete(DatabaseErrorInfo),
/// Failed to commit transaction changes into the database.
#[error("failed to commit transaction changes: {_0}")]
Commit(DatabaseErrorInfo),
/// Failed to initiate a transaction.
#[error("failed to initialize a transaction: {_0}")]
InitTx(DatabaseErrorInfo),
/// Failed to initialize a cursor.
#[error("failed to initialize a cursor: {_0}")]
InitCursor(DatabaseErrorInfo),
/// Failed to decode a key from a table.
#[error("failed to decode a key from a table")]
Decode,
/// Failed to get database stats.
#[error("failed to get stats: {_0}")]
Stats(DatabaseErrorInfo),
/// Failed to use the specified log level, as it's not available.
#[error("log level {_0:?} is not available")]
LogLevelUnavailable(LogLevel),
/// Other unspecified error.
#[error("{_0}")]
Other(String),
}
/// Common error struct to propagate implementation-specific error information.
#[derive(Debug, Clone, PartialEq, Eq, derive_more::Display)]
#[display("{message} ({code})")]
pub struct DatabaseErrorInfo {
/// Human-readable error message.
pub message: Box<str>,
/// Error code.
pub code: i32,
}
impl<E> From<E> for DatabaseErrorInfo
where
E: Display + Into<i32>,
{
#[inline]
fn from(error: E) -> Self {
Self { message: error.to_string().into(), code: error.into() }
}
}
impl From<DatabaseWriteError> for DatabaseError {
#[inline]
fn from(error: DatabaseWriteError) -> Self {
Self::Write(Box::new(error))
}
}
/// Database write error.
#[derive(Clone, Debug, PartialEq, Eq, thiserror::Error)]
#[error("write operation {:?} failed for key \"{}\" in table {}: {}",
self.operation,
alloy_primitives::hex::encode(&self.key),
self.table_name,
self.info)]
pub struct DatabaseWriteError {
/// The error code and message.
pub info: DatabaseErrorInfo,
/// The write operation type.
pub operation: DatabaseWriteOperation,
/// The table name.
pub table_name: &'static str,
/// The write key.
pub key: Vec<u8>,
}
/// Database write operation type.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum DatabaseWriteOperation {
/// Append cursor.
CursorAppend,
/// Upsert cursor.
CursorUpsert,
/// Insert cursor.
CursorInsert,
/// Append duplicate cursor.
CursorAppendDup,
/// Put.
Put,
}
/// Database log level.
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum LogLevel {
/// Enables logging for critical conditions, i.e. assertion failures.
Fatal,
/// Enables logging for error conditions.
Error,
/// Enables logging for warning conditions.
Warn,
/// Enables logging for normal but significant condition.
Notice,
/// Enables logging for verbose informational.
Verbose,
/// Enables logging for debug-level messages.
Debug,
/// Enables logging for trace debug-level messages.
Trace,
/// Enables logging for extra debug-level messages.
Extra,
}
impl LogLevel {
/// All possible variants of the `LogLevel` enum
pub const fn value_variants() -> &'static [Self] {
&[
Self::Fatal,
Self::Error,
Self::Warn,
Self::Notice,
Self::Verbose,
Self::Debug,
Self::Trace,
Self::Extra,
]
}
/// Static str reference to `LogLevel` enum, required for `Clap::Builder::PossibleValue::new()`
pub const fn variant_name(&self) -> &'static str {
match self {
Self::Fatal => "fatal",
Self::Error => "error",
Self::Warn => "warn",
Self::Notice => "notice",
Self::Verbose => "verbose",
Self::Debug => "debug",
Self::Trace => "trace",
Self::Extra => "extra",
}
}
/// Returns all variants descriptions
pub const fn help_message(&self) -> &'static str {
match self {
Self::Fatal => "Enables logging for critical conditions, i.e. assertion failures",
Self::Error => "Enables logging for error conditions",
Self::Warn => "Enables logging for warning conditions",
Self::Notice => "Enables logging for normal but significant condition",
Self::Verbose => "Enables logging for verbose informational",
Self::Debug => "Enables logging for debug-level messages",
Self::Trace => "Enables logging for trace debug-level messages",
Self::Extra => "Enables logging for extra debug-level messages",
}
}
}
impl FromStr for LogLevel {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.to_lowercase().as_str() {
"fatal" => Ok(Self::Fatal),
"error" => Ok(Self::Error),
"warn" => Ok(Self::Warn),
"notice" => Ok(Self::Notice),
"verbose" => Ok(Self::Verbose),
"debug" => Ok(Self::Debug),
"trace" => Ok(Self::Trace),
"extra" => Ok(Self::Extra),
_ => Err(format!("Invalid log level: {s}")),
}
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/errors/src/lockfile.rs | crates/storage/errors/src/lockfile.rs | use alloc::string::{String, ToString};
/// Storage lock error.
#[derive(Debug, Clone, PartialEq, Eq, thiserror::Error)]
pub enum StorageLockError {
/// Write lock taken
#[error("storage directory is currently in use as read-write by another process: PID {_0}")]
Taken(usize),
/// Indicates other unspecified errors.
#[error("{_0}")]
Other(String),
}
impl StorageLockError {
/// Converts any error into the `Other` variant of `StorageLockError`.
pub fn other<E: core::error::Error>(err: E) -> Self {
Self::Other(err.to_string())
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/errors/src/lib.rs | crates/storage/errors/src/lib.rs | //! Commonly used error types used when interacting with storage.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![cfg_attr(not(feature = "std"), no_std)]
extern crate alloc;
/// Database error
pub mod db;
/// Lockfile error
pub mod lockfile;
/// Provider error
pub mod provider;
pub use provider::{ProviderError, ProviderResult};
/// Writer error
pub mod writer;
/// Any error
pub mod any;
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/errors/src/any.rs | crates/storage/errors/src/any.rs | use alloc::sync::Arc;
use core::{error::Error, fmt};
/// A thread-safe cloneable wrapper for any error type.
#[derive(Clone)]
pub struct AnyError {
inner: Arc<dyn Error + Send + Sync + 'static>,
}
impl AnyError {
/// Creates a new `AnyError` wrapping the given error value.
pub fn new<E>(error: E) -> Self
where
E: Error + Send + Sync + 'static,
{
Self { inner: Arc::new(error) }
}
/// Returns a reference to the underlying error value.
pub fn as_error(&self) -> &(dyn Error + Send + Sync + 'static) {
self.inner.as_ref()
}
}
impl fmt::Debug for AnyError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&self.inner, f)
}
}
impl fmt::Display for AnyError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(&self.inner, f)
}
}
impl Error for AnyError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
self.inner.source()
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/errors/src/writer.rs | crates/storage/errors/src/writer.rs | use crate::db::DatabaseError;
use reth_static_file_types::StaticFileSegment;
/// `UnifiedStorageWriter` related errors
#[derive(Clone, Debug, derive_more::Display, PartialEq, Eq, derive_more::Error)]
pub enum UnifiedStorageWriterError {
/// Database writer is missing
#[display("Database writer is missing")]
MissingDatabaseWriter,
/// Static file writer is missing
#[display("Static file writer is missing")]
MissingStaticFileWriter,
/// Static file writer is of wrong segment
#[display("Static file writer is of wrong segment: got {_0}, expected {_1}")]
IncorrectStaticFileWriter(StaticFileSegment, StaticFileSegment),
/// Database-related errors.
Database(DatabaseError),
}
impl From<DatabaseError> for UnifiedStorageWriterError {
fn from(error: DatabaseError) -> Self {
Self::Database(error)
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/errors/src/provider.rs | crates/storage/errors/src/provider.rs | use crate::{any::AnyError, db::DatabaseError, writer::UnifiedStorageWriterError};
use alloc::{boxed::Box, string::String};
use alloy_eips::{BlockHashOrNumber, HashOrNumber};
use alloy_primitives::{Address, BlockHash, BlockNumber, TxNumber, B256};
use derive_more::Display;
use reth_primitives_traits::{transaction::signed::RecoveryError, GotExpected};
use reth_prune_types::PruneSegmentError;
use reth_static_file_types::StaticFileSegment;
use revm_database_interface::DBErrorMarker;
/// Provider result type.
pub type ProviderResult<Ok> = Result<Ok, ProviderError>;
/// Bundled errors variants thrown by various providers.
#[derive(Clone, Debug, thiserror::Error)]
pub enum ProviderError {
/// Database error.
#[error(transparent)]
Database(#[from] DatabaseError),
/// Pruning error.
#[error(transparent)]
Pruning(#[from] PruneSegmentError),
/// RLP error.
#[error("{_0}")]
Rlp(alloy_rlp::Error),
/// Trie witness error.
#[error("trie witness error: {_0}")]
TrieWitnessError(String),
/// Error when recovering the sender for a transaction
#[error("failed to recover sender for transaction")]
SenderRecoveryError,
/// The header number was not found for the given block hash.
#[error("block hash {_0} does not exist in Headers table")]
BlockHashNotFound(BlockHash),
/// A block body is missing.
#[error("block meta not found for block #{_0}")]
BlockBodyIndicesNotFound(BlockNumber),
/// The transition ID was found for the given address and storage key, but the changeset was
/// not found.
#[error(
"storage change set for address {address} and key {storage_key} at block #{block_number} does not exist"
)]
StorageChangesetNotFound {
/// The block number found for the address and storage key.
block_number: BlockNumber,
/// The account address.
address: Address,
/// The storage key.
// NOTE: This is a Box only because otherwise this variant is 16 bytes larger than the
// second largest (which uses `BlockHashOrNumber`).
storage_key: Box<B256>,
},
/// The block number was found for the given address, but the changeset was not found.
#[error("account change set for address {address} at block #{block_number} does not exist")]
AccountChangesetNotFound {
/// Block number found for the address.
block_number: BlockNumber,
/// The account address.
address: Address,
},
/// The total difficulty for a block is missing.
#[error("total difficulty not found for block #{_0}")]
TotalDifficultyNotFound(BlockNumber),
/// When required header related data was not found but was required.
#[error("no header found for {_0:?}")]
HeaderNotFound(BlockHashOrNumber),
/// The specific transaction identified by hash or id is missing.
#[error("no transaction found for {_0:?}")]
TransactionNotFound(HashOrNumber),
/// The specific receipt for a transaction identified by hash or id is missing
#[error("no receipt found for {_0:?}")]
ReceiptNotFound(HashOrNumber),
/// Unable to find the best block.
#[error("best block does not exist")]
BestBlockNotFound,
/// Unable to find the finalized block.
#[error("finalized block does not exist")]
FinalizedBlockNotFound,
/// Unable to find the safe block.
#[error("safe block does not exist")]
SafeBlockNotFound,
/// Thrown when we failed to lookup a block for the pending state.
#[error("unknown block {_0}")]
UnknownBlockHash(B256),
/// Thrown when we were unable to find a state for a block hash.
#[error("no state found for block {_0}")]
StateForHashNotFound(B256),
/// Thrown when we were unable to find a state for a block number.
#[error("no state found for block number {_0}")]
StateForNumberNotFound(u64),
/// Unable to find the block number for a given transaction index.
#[error("unable to find the block number for a given transaction index")]
BlockNumberForTransactionIndexNotFound,
/// Root mismatch.
#[error("merkle trie {_0}")]
StateRootMismatch(Box<RootMismatch>),
/// Root mismatch during unwind
#[error("unwind merkle trie {_0}")]
UnwindStateRootMismatch(Box<RootMismatch>),
/// State is not available for the given block number because it is pruned.
#[error("state at block #{_0} is pruned")]
StateAtBlockPruned(BlockNumber),
/// Provider does not support this particular request.
#[error("this provider does not support this request")]
UnsupportedProvider,
/// Static File is not found at specified path.
#[cfg(feature = "std")]
#[error("not able to find {_0} static file at {_1:?}")]
MissingStaticFilePath(StaticFileSegment, std::path::PathBuf),
/// Static File is not found for requested block.
#[error("not able to find {_0} static file for block number {_1}")]
MissingStaticFileBlock(StaticFileSegment, BlockNumber),
/// Static File is not found for requested transaction.
#[error("unable to find {_0} static file for transaction id {_1}")]
MissingStaticFileTx(StaticFileSegment, TxNumber),
/// Static File is finalized and cannot be written to.
#[error("unable to write block #{_1} to finalized static file {_0}")]
FinalizedStaticFile(StaticFileSegment, BlockNumber),
/// Trying to insert data from an unexpected block number.
#[error("trying to append data to {_0} as block #{_1} but expected block #{_2}")]
UnexpectedStaticFileBlockNumber(StaticFileSegment, BlockNumber, BlockNumber),
/// Trying to insert data from an unexpected block number.
#[error("trying to append row to {_0} at index #{_1} but expected index #{_2}")]
UnexpectedStaticFileTxNumber(StaticFileSegment, TxNumber, TxNumber),
/// Static File Provider was initialized as read-only.
#[error("cannot get a writer on a read-only environment.")]
ReadOnlyStaticFileAccess,
/// Consistent view error.
#[error("failed to initialize consistent view: {_0}")]
ConsistentView(Box<ConsistentViewError>),
/// Storage writer error.
#[error(transparent)]
UnifiedStorageWriterError(#[from] UnifiedStorageWriterError),
/// Received invalid output from configured storage implementation.
#[error("received invalid output from storage")]
InvalidStorageOutput,
/// Enclave encryptography error.
#[error("enclave error: {_0}")]
EnclaveError(EnclaveError),
/// Missing trie updates.
#[error("missing trie updates for block {0}")]
MissingTrieUpdates(B256),
/// Any other error type wrapped into a cloneable [`AnyError`].
#[error(transparent)]
Other(#[from] AnyError),
}
/// Custom error type for reth error handling.
#[derive(Clone, Debug, Eq, PartialEq, Display)]
pub enum EnclaveError {
/// enclave encryption fails
EncryptionError,
/// enclave decryption fails
DecryptionError,
/// Ephemeral keypair generation fails
EphRngKeypairGenerationError(String),
/// Custom error.
Custom(&'static str),
}
impl From<EnclaveError> for ProviderError {
fn from(err: EnclaveError) -> Self {
Self::EnclaveError(err)
}
}
impl ProviderError {
/// Creates a new [`ProviderError::Other`] variant by wrapping the given error into an
/// [`AnyError`]
pub fn other<E>(error: E) -> Self
where
E: core::error::Error + Send + Sync + 'static,
{
Self::Other(AnyError::new(error))
}
/// Returns the arbitrary error if it is [`ProviderError::Other`]
pub fn as_other(&self) -> Option<&(dyn core::error::Error + Send + Sync + 'static)> {
match self {
Self::Other(err) => Some(err.as_error()),
_ => None,
}
}
/// Returns a reference to the [`ProviderError::Other`] value if this type is a
/// [`ProviderError::Other`] and the [`AnyError`] wraps an error of that type. Returns None
/// otherwise.
pub fn downcast_other_ref<T: core::error::Error + 'static>(&self) -> Option<&T> {
let other = self.as_other()?;
other.downcast_ref()
}
/// Returns true if the this type is a [`ProviderError::Other`] of that error
/// type. Returns false otherwise.
pub fn is_other<T: core::error::Error + 'static>(&self) -> bool {
self.as_other().map(|err| err.is::<T>()).unwrap_or(false)
}
}
impl DBErrorMarker for ProviderError {}
impl From<alloy_rlp::Error> for ProviderError {
fn from(error: alloy_rlp::Error) -> Self {
Self::Rlp(error)
}
}
impl From<RecoveryError> for ProviderError {
fn from(_: RecoveryError) -> Self {
Self::SenderRecoveryError
}
}
/// A root mismatch error at a given block height.
#[derive(Clone, Debug, PartialEq, Eq, Display)]
#[display("root mismatch at #{block_number} ({block_hash}): {root}")]
pub struct RootMismatch {
/// The target block root diff.
pub root: GotExpected<B256>,
/// The target block number.
pub block_number: BlockNumber,
/// The target block hash.
pub block_hash: BlockHash,
}
/// A Static File Write Error.
#[derive(Debug, thiserror::Error)]
#[error("{message}")]
pub struct StaticFileWriterError {
/// The error message.
pub message: String,
}
impl StaticFileWriterError {
/// Creates a new [`StaticFileWriterError`] with the given message.
pub fn new(message: impl Into<String>) -> Self {
Self { message: message.into() }
}
}
/// Consistent database view error.
#[derive(Clone, Debug, PartialEq, Eq, Display)]
pub enum ConsistentViewError {
/// Error thrown on attempt to initialize provider while node is still syncing.
#[display("node is syncing. best block: {best_block:?}")]
Syncing {
/// Best block diff.
best_block: GotExpected<BlockNumber>,
},
/// Error thrown on inconsistent database view.
#[display("inconsistent database state: {tip:?}")]
Inconsistent {
/// The tip diff.
tip: GotExpected<Option<B256>>,
},
/// Error thrown when the database does not contain a block from the previous database view.
#[display("database view no longer contains block: {block:?}")]
Reorged {
/// The previous block
block: B256,
},
}
impl From<ConsistentViewError> for ProviderError {
fn from(error: ConsistentViewError) -> Self {
Self::ConsistentView(Box::new(error))
}
}
#[cfg(test)]
mod tests {
use super::*;
#[derive(thiserror::Error, Debug)]
#[error("E")]
struct E;
#[test]
fn other_err() {
let err = ProviderError::other(E);
assert!(err.is_other::<E>());
assert!(err.downcast_other_ref::<E>().is_some());
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
SeismicSystems/seismic-reth | https://github.com/SeismicSystems/seismic-reth/blob/62834bd8deb86513778624a3ba33f55f4d6a1471/crates/storage/codecs/src/lib.rs | crates/storage/codecs/src/lib.rs | //! Compact codec.
//!
//! *Warning*: The `Compact` encoding format and its implementations are
//! designed for storing and retrieving data internally. They are not hardened
//! to safely read potentially malicious data.
//!
//! ## Feature Flags
//!
//! - `alloy`: [Compact] implementation for various alloy types.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/SeismicSystems/seismic-reth/issues/"
)]
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![cfg_attr(not(feature = "std"), no_std)]
extern crate alloc;
pub use reth_codecs_derive::*;
use serde as _;
use alloy_primitives::{aliases::U96, Address, Bloom, Bytes, FixedBytes, U256};
use bytes::{Buf, BufMut};
use alloc::{
borrow::{Cow, ToOwned},
vec::Vec,
};
#[cfg(feature = "test-utils")]
pub mod alloy;
#[cfg(not(feature = "test-utils"))]
#[cfg(any(test, feature = "alloy"))]
pub mod alloy;
pub mod txtype;
#[cfg(any(test, feature = "test-utils"))]
pub mod test_utils;
// Used by generated code and doc tests. Not public API.
#[doc(hidden)]
#[path = "private.rs"]
pub mod __private;
/// Trait that implements the `Compact` codec.
///
/// When deriving the trait for custom structs, be aware of certain limitations/recommendations:
/// * Works best with structs that only have native types (eg. u64, B256, U256).
/// * Fixed array types (B256, Address, Bloom) are not compacted.
/// * Max size of `T` in `Option<T>` or `Vec<T>` shouldn't exceed `0xffff`.
/// * Any `Bytes` field **should be placed last**.
/// * Any other type which is not known to the derive module **should be placed last** in they
/// contain a `Bytes` field.
///
/// The last two points make it easier to decode the data without saving the length on the
/// `StructFlags`. It will fail compilation if it's not respected. If they're alias to known types,
/// add their definitions to `get_bit_size()` or `known_types` in `generator.rs`.
///
/// Regarding the `specialized_to/from_compact` methods: Mainly used as a workaround for not being
/// able to specialize an impl over certain types like `Vec<T>`/`Option<T>` where `T` is a fixed
/// size array like `Vec<B256>`.
///
/// ## Caution
///
/// Due to the bitfields, every type change on the rust type (e.g. `U256` to `u64`) is a breaking
/// change and will lead to a new, incompatible [`Compact`] implementation. Implementers must take
/// special care when changing or rearranging fields.
pub trait Compact: Sized {
/// Takes a buffer which can be written to. *Ideally*, it returns the length written to.
fn to_compact<B>(&self, buf: &mut B) -> usize
where
B: bytes::BufMut + AsMut<[u8]>;
/// Takes a buffer which can be read from. Returns the object and `buf` with its internal cursor
/// advanced (eg.`.advance(len)`).
///
/// `len` can either be the `buf` remaining length, or the length of the compacted type.
///
/// It will panic, if `len` is smaller than `buf.len()`.
fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]);
/// "Optional": If there's no good reason to use it, don't.
#[inline]
fn specialized_to_compact<B>(&self, buf: &mut B) -> usize
where
B: bytes::BufMut + AsMut<[u8]>,
{
self.to_compact(buf)
}
/// "Optional": If there's no good reason to use it, don't.
#[inline]
fn specialized_from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) {
Self::from_compact(buf, len)
}
}
impl Compact for alloc::string::String {
fn to_compact<B>(&self, buf: &mut B) -> usize
where
B: bytes::BufMut + AsMut<[u8]>,
{
self.as_bytes().to_compact(buf)
}
fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) {
let (vec, buf) = Vec::<u8>::from_compact(buf, len);
let string = Self::from_utf8(vec).unwrap(); // Safe conversion
(string, buf)
}
}
impl<T: Compact> Compact for &T {
fn to_compact<B>(&self, buf: &mut B) -> usize
where
B: BufMut + AsMut<[u8]>,
{
(*self).to_compact(buf)
}
fn from_compact(_: &[u8], _: usize) -> (Self, &[u8]) {
unimplemented!()
}
}
/// To be used with `Option<CompactPlaceholder>` to place or replace one bit on the bitflag struct.
pub type CompactPlaceholder = ();
impl Compact for CompactPlaceholder {
#[inline]
fn to_compact<B>(&self, _: &mut B) -> usize
where
B: bytes::BufMut + AsMut<[u8]>,
{
0
}
#[inline]
fn from_compact(buf: &[u8], _: usize) -> (Self, &[u8]) {
((), buf)
}
}
macro_rules! impl_uint_compact {
($($name:tt),+) => {
$(
impl Compact for $name {
#[inline]
fn to_compact<B>(&self, buf: &mut B) -> usize
where B: bytes::BufMut + AsMut<[u8]>
{
let leading = self.leading_zeros() as usize / 8;
buf.put_slice(&self.to_be_bytes()[leading..]);
core::mem::size_of::<$name>() - leading
}
#[inline]
fn from_compact(mut buf: &[u8], len: usize) -> (Self, &[u8]) {
if len == 0 {
return (0, buf);
}
let mut arr = [0; core::mem::size_of::<$name>()];
arr[core::mem::size_of::<$name>() - len..].copy_from_slice(&buf[..len]);
buf.advance(len);
($name::from_be_bytes(arr), buf)
}
}
)+
};
}
impl_uint_compact!(u8, u64, u128);
impl<T> Compact for Vec<T>
where
T: Compact,
{
/// Returns 0 since we won't include it in the `StructFlags`.
#[inline]
fn to_compact<B>(&self, buf: &mut B) -> usize
where
B: bytes::BufMut + AsMut<[u8]>,
{
self.as_slice().to_compact(buf)
}
#[inline]
fn from_compact(buf: &[u8], _: usize) -> (Self, &[u8]) {
let (length, mut buf) = decode_varuint(buf);
let mut list = Self::with_capacity(length);
for _ in 0..length {
let len;
(len, buf) = decode_varuint(buf);
let (element, _) = T::from_compact(&buf[..len], len);
buf.advance(len);
list.push(element);
}
(list, buf)
}
/// To be used by fixed sized types like `Vec<B256>`.
#[inline]
fn specialized_to_compact<B>(&self, buf: &mut B) -> usize
where
B: bytes::BufMut + AsMut<[u8]>,
{
self.as_slice().specialized_to_compact(buf)
}
/// To be used by fixed sized types like `Vec<B256>`.
#[inline]
fn specialized_from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) {
let (length, mut buf) = decode_varuint(buf);
let mut list = Self::with_capacity(length);
for _ in 0..length {
let element;
(element, buf) = T::from_compact(buf, len);
list.push(element);
}
(list, buf)
}
}
impl<T> Compact for &[T]
where
T: Compact,
{
/// Returns 0 since we won't include it in the `StructFlags`.
#[inline]
fn to_compact<B>(&self, buf: &mut B) -> usize
where
B: bytes::BufMut + AsMut<[u8]>,
{
encode_varuint(self.len(), buf);
let mut tmp: Vec<u8> = Vec::with_capacity(64);
for element in *self {
tmp.clear();
// We don't know the length until we compact it
let length = element.to_compact(&mut tmp);
encode_varuint(length, buf);
buf.put_slice(&tmp);
}
0
}
#[inline]
fn from_compact(_: &[u8], _: usize) -> (Self, &[u8]) {
unimplemented!()
}
/// To be used by fixed sized types like `&[B256]`.
#[inline]
fn specialized_to_compact<B>(&self, buf: &mut B) -> usize
where
B: bytes::BufMut + AsMut<[u8]>,
{
encode_varuint(self.len(), buf);
for element in *self {
element.to_compact(buf);
}
0
}
#[inline]
fn specialized_from_compact(_: &[u8], _: usize) -> (Self, &[u8]) {
unimplemented!()
}
}
impl<T> Compact for Option<T>
where
T: Compact,
{
/// Returns 0 for `None` and 1 for `Some(_)`.
#[inline]
fn to_compact<B>(&self, buf: &mut B) -> usize
where
B: bytes::BufMut + AsMut<[u8]>,
{
let Some(element) = self else { return 0 };
// We don't know the length of the element until we compact it.
let mut tmp = Vec::with_capacity(64);
let length = element.to_compact(&mut tmp);
encode_varuint(length, buf);
buf.put_slice(&tmp);
1
}
#[inline]
fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) {
if len == 0 {
return (None, buf)
}
let (len, mut buf) = decode_varuint(buf);
let (element, _) = T::from_compact(&buf[..len], len);
buf.advance(len);
(Some(element), buf)
}
/// To be used by fixed sized types like `Option<B256>`.
#[inline]
fn specialized_to_compact<B>(&self, buf: &mut B) -> usize
where
B: bytes::BufMut + AsMut<[u8]>,
{
if let Some(element) = self {
element.to_compact(buf);
1
} else {
0
}
}
/// To be used by fixed sized types like `Option<B256>`.
#[inline]
fn specialized_from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) {
if len == 0 {
return (None, buf)
}
let (element, buf) = T::from_compact(buf, len);
(Some(element), buf)
}
}
impl<T: Compact + ToOwned<Owned = T>> Compact for Cow<'_, T> {
fn to_compact<B>(&self, buf: &mut B) -> usize
where
B: bytes::BufMut + AsMut<[u8]>,
{
self.as_ref().to_compact(buf)
}
fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) {
let (element, buf) = T::from_compact(buf, len);
(Cow::Owned(element), buf)
}
fn specialized_to_compact<B>(&self, buf: &mut B) -> usize
where
B: bytes::BufMut + AsMut<[u8]>,
{
self.as_ref().specialized_to_compact(buf)
}
fn specialized_from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) {
let (element, buf) = T::specialized_from_compact(buf, len);
(Cow::Owned(element), buf)
}
}
impl Compact for U256 {
#[inline]
fn to_compact<B>(&self, buf: &mut B) -> usize
where
B: bytes::BufMut + AsMut<[u8]>,
{
let inner = self.to_be_bytes::<32>();
let size = 32 - (self.leading_zeros() / 8);
buf.put_slice(&inner[32 - size..]);
size
}
#[inline]
fn from_compact(mut buf: &[u8], len: usize) -> (Self, &[u8]) {
if len == 0 {
return (Self::ZERO, buf)
}
let mut arr = [0; 32];
arr[(32 - len)..].copy_from_slice(&buf[..len]);
buf.advance(len);
(Self::from_be_bytes(arr), buf)
}
}
impl Compact for U96 {
#[inline]
fn to_compact<B>(&self, buf: &mut B) -> usize
where
B: bytes::BufMut + AsMut<[u8]>,
{
let inner = self.to_be_bytes::<12>();
let size = 12 - (self.leading_zeros() / 8);
buf.put_slice(&inner[12 - size..]);
size
}
#[inline]
fn from_compact(mut buf: &[u8], len: usize) -> (Self, &[u8]) {
if len == 0 {
return (Self::ZERO, buf)
}
let mut arr = [0; 12];
arr[(12 - len)..].copy_from_slice(&buf[..len]);
buf.advance(len);
(Self::from_be_bytes(arr), buf)
}
}
impl Compact for Bytes {
#[inline]
fn to_compact<B>(&self, buf: &mut B) -> usize
where
B: bytes::BufMut + AsMut<[u8]>,
{
let len = self.len();
buf.put_slice(&self.0);
len
}
#[inline]
fn from_compact(mut buf: &[u8], len: usize) -> (Self, &[u8]) {
(buf.copy_to_bytes(len).into(), buf)
}
}
impl<const N: usize> Compact for [u8; N] {
#[inline]
fn to_compact<B>(&self, buf: &mut B) -> usize
where
B: bytes::BufMut + AsMut<[u8]>,
{
buf.put_slice(&self[..]);
N
}
#[inline]
fn from_compact(mut buf: &[u8], len: usize) -> (Self, &[u8]) {
if len == 0 {
return ([0; N], buf)
}
let v = buf[..N].try_into().unwrap();
buf.advance(N);
(v, buf)
}
}
/// Implements the [`Compact`] trait for wrappers over fixed size byte array types.
#[macro_export]
macro_rules! impl_compact_for_wrapped_bytes {
($($name:tt),+) => {
$(
impl Compact for $name {
#[inline]
fn to_compact<B>(&self, buf: &mut B) -> usize
where
B: bytes::BufMut + AsMut<[u8]>
{
self.0.to_compact(buf)
}
#[inline]
fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) {
let (v, buf) = <[u8; core::mem::size_of::<$name>()]>::from_compact(buf, len);
(Self::from(v), buf)
}
}
)+
};
}
impl_compact_for_wrapped_bytes!(Address, Bloom);
impl<const N: usize> Compact for FixedBytes<N> {
#[inline]
fn to_compact<B>(&self, buf: &mut B) -> usize
where
B: bytes::BufMut + AsMut<[u8]>,
{
self.0.to_compact(buf)
}
#[inline]
fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) {
let (v, buf) = <[u8; N]>::from_compact(buf, len);
(Self::from(v), buf)
}
}
impl Compact for bool {
/// `bool` vars go directly to the `StructFlags` and are not written to the buffer.
#[inline]
fn to_compact<B>(&self, _: &mut B) -> usize
where
B: bytes::BufMut + AsMut<[u8]>,
{
*self as usize
}
/// `bool` expects the real value to come in `len`, and does not advance the cursor.
#[inline]
fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) {
(len != 0, buf)
}
}
fn encode_varuint<B>(mut n: usize, buf: &mut B)
where
B: bytes::BufMut + AsMut<[u8]>,
{
while n >= 0x80 {
buf.put_u8((n as u8) | 0x80);
n >>= 7;
}
buf.put_u8(n as u8);
}
fn decode_varuint(buf: &[u8]) -> (usize, &[u8]) {
let mut value = 0;
for i in 0..33 {
let byte = buf[i];
value |= usize::from(byte & 0x7F) << (i * 7);
if byte < 0x80 {
return (value, &buf[i + 1..])
}
}
decode_varuint_panic();
}
#[inline(never)]
#[cold]
const fn decode_varuint_panic() -> ! {
panic!("could not decode varuint");
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_primitives::B256;
use serde::{Deserialize, Serialize};
#[test]
fn compact_bytes() {
let arr = [1, 2, 3, 4, 5];
let list = Bytes::copy_from_slice(&arr);
let mut buf = Vec::with_capacity(list.len() + 1);
assert_eq!(list.to_compact(&mut buf), list.len());
// Add some noise data.
buf.push(1);
assert_eq!(&buf[..arr.len()], &arr);
assert_eq!(Bytes::from_compact(&buf, list.len()), (list, vec![1].as_slice()));
}
#[test]
fn compact_address() {
let mut buf = Vec::with_capacity(21);
assert_eq!(Address::ZERO.to_compact(&mut buf), 20);
assert_eq!(buf, vec![0; 20]);
// Add some noise data.
buf.push(1);
// Address shouldn't care about the len passed, since it's not actually compacted.
assert_eq!(Address::from_compact(&buf, 1000), (Address::ZERO, vec![1u8].as_slice()));
}
#[test]
fn compact_b256() {
let mut buf = Vec::with_capacity(32 + 1);
assert_eq!(B256::ZERO.to_compact(&mut buf), 32);
assert_eq!(buf, vec![0; 32]);
// Add some noise data.
buf.push(1);
// B256 shouldn't care about the len passed, since it's not actually compacted.
assert_eq!(B256::from_compact(&buf, 1000), (B256::ZERO, vec![1u8].as_slice()));
}
#[test]
fn compact_bool() {
let _vtrue = true;
let mut buf = vec![];
assert_eq!(true.to_compact(&mut buf), 1);
// Bool vars go directly to the `StructFlags` and not written to the buf.
assert_eq!(buf.len(), 0);
assert_eq!(false.to_compact(&mut buf), 0);
assert_eq!(buf.len(), 0);
let buf = vec![100u8];
// Bool expects the real value to come in `len`, and does not advance the cursor.
assert_eq!(bool::from_compact(&buf, 1), (true, buf.as_slice()));
assert_eq!(bool::from_compact(&buf, 0), (false, buf.as_slice()));
}
#[test]
fn compact_option() {
let opt = Some(B256::ZERO);
let mut buf = Vec::with_capacity(1 + 32);
assert_eq!(None::<B256>.to_compact(&mut buf), 0);
assert_eq!(opt.to_compact(&mut buf), 1);
assert_eq!(buf.len(), 1 + 32);
assert_eq!(Option::<B256>::from_compact(&buf, 1), (opt, vec![].as_slice()));
// If `None`, it returns the slice at the same cursor position.
assert_eq!(Option::<B256>::from_compact(&buf, 0), (None, buf.as_slice()));
let mut buf = Vec::with_capacity(32);
assert_eq!(opt.specialized_to_compact(&mut buf), 1);
assert_eq!(buf.len(), 32);
assert_eq!(Option::<B256>::specialized_from_compact(&buf, 1), (opt, vec![].as_slice()));
}
#[test]
fn compact_vec() {
let list = vec![B256::ZERO, B256::ZERO];
let mut buf = vec![];
// Vec doesn't return a total length
assert_eq!(list.to_compact(&mut buf), 0);
// Add some noise data in the end that should be returned by `from_compact`.
buf.extend([1u8, 2]);
let mut remaining_buf = buf.as_slice();
remaining_buf.advance(1 + 1 + 32 + 1 + 32);
assert_eq!(Vec::<B256>::from_compact(&buf, 0), (list, remaining_buf));
assert_eq!(remaining_buf, &[1u8, 2]);
}
#[test]
fn compact_u256() {
let mut buf = vec![];
assert_eq!(U256::ZERO.to_compact(&mut buf), 0);
assert!(buf.is_empty());
assert_eq!(U256::from_compact(&buf, 0), (U256::ZERO, vec![].as_slice()));
assert_eq!(U256::from(2).to_compact(&mut buf), 1);
assert_eq!(buf, vec![2u8]);
assert_eq!(U256::from_compact(&buf, 1), (U256::from(2), vec![].as_slice()));
}
#[test]
fn compact_u64() {
let mut buf = vec![];
assert_eq!(0u64.to_compact(&mut buf), 0);
assert!(buf.is_empty());
assert_eq!(u64::from_compact(&buf, 0), (0u64, vec![].as_slice()));
assert_eq!(2u64.to_compact(&mut buf), 1);
assert_eq!(buf, vec![2u8]);
assert_eq!(u64::from_compact(&buf, 1), (2u64, vec![].as_slice()));
let mut buf = Vec::with_capacity(8);
assert_eq!(0xffffffffffffffffu64.to_compact(&mut buf), 8);
assert_eq!(&buf, &[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff]);
assert_eq!(u64::from_compact(&buf, 8), (0xffffffffffffffffu64, vec![].as_slice()));
}
#[test]
fn variable_uint() {
proptest::proptest!(|(val: usize)| {
let mut buf = vec![];
encode_varuint(val, &mut buf);
let (decoded, read_buf) = decode_varuint(&buf);
assert_eq!(val, decoded);
assert!(!read_buf.has_remaining());
});
}
#[test]
fn compact_slice() {
let vec_list = vec![B256::ZERO, B256::random(), B256::random(), B256::ZERO];
// to_compact
{
let mut vec_buf = vec![];
assert_eq!(vec_list.to_compact(&mut vec_buf), 0);
let mut slice_buf = vec![];
assert_eq!(vec_list.as_slice().to_compact(&mut slice_buf), 0);
assert_eq!(vec_buf, slice_buf);
}
// specialized_to_compact
{
let mut vec_buf = vec![];
assert_eq!(vec_list.specialized_to_compact(&mut vec_buf), 0);
let mut slice_buf = vec![];
assert_eq!(vec_list.as_slice().specialized_to_compact(&mut slice_buf), 0);
assert_eq!(vec_buf, slice_buf);
}
}
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Compact, arbitrary::Arbitrary)]
#[add_arbitrary_tests(crate, compact)]
#[reth_codecs(crate = "crate")]
struct TestStruct {
f_u64: u64,
f_u256: U256,
f_bool_t: bool,
f_bool_f: bool,
f_option_none: Option<B256>,
f_option_some: Option<B256>,
f_option_some_u64: Option<u64>,
f_vec_empty: Vec<Address>,
f_vec_some: Vec<Address>,
}
impl Default for TestStruct {
fn default() -> Self {
Self {
f_u64: 1u64, // 4 bits | 1 byte
f_u256: U256::from(1u64), // 6 bits | 1 byte
f_bool_f: false, // 1 bit | 0 bytes
f_bool_t: true, // 1 bit | 0 bytes
f_option_none: None, // 1 bit | 0 bytes
f_option_some: Some(B256::ZERO), // 1 bit | 32 bytes
f_option_some_u64: Some(0xffffu64), // 1 bit | 1 + 2 bytes
f_vec_empty: vec![], // 0 bits | 1 bytes
f_vec_some: vec![Address::ZERO, Address::ZERO], // 0 bits | 1 + 20*2 bytes
}
}
}
#[test]
fn compact_test_struct() {
let test = TestStruct::default();
const EXPECTED_SIZE: usize = 2 + // TestStructFlags
1 +
1 +
// 0 + 0 + 0 +
32 +
1 + 2 +
1 +
1 + 20 * 2;
let mut buf = Vec::with_capacity(EXPECTED_SIZE);
assert_eq!(test.to_compact(&mut buf), EXPECTED_SIZE);
assert_eq!(
TestStruct::from_compact(&buf, buf.len()),
(TestStruct::default(), vec![].as_slice())
);
}
#[derive(
Debug, PartialEq, Clone, Default, Serialize, Deserialize, Compact, arbitrary::Arbitrary,
)]
#[add_arbitrary_tests(crate, compact)]
#[reth_codecs(crate = "crate")]
enum TestEnum {
#[default]
Var0,
Var1(TestStruct),
Var2(u64),
}
#[cfg(test)]
#[test_fuzz::test_fuzz]
fn compact_test_enum_all_variants(var0: TestEnum, var1: TestEnum, var2: TestEnum) {
let mut buf = vec![];
var0.to_compact(&mut buf);
assert_eq!(TestEnum::from_compact(&buf, buf.len()).0, var0);
let mut buf = vec![];
var1.to_compact(&mut buf);
assert_eq!(TestEnum::from_compact(&buf, buf.len()).0, var1);
let mut buf = vec![];
var2.to_compact(&mut buf);
assert_eq!(TestEnum::from_compact(&buf, buf.len()).0, var2);
}
#[test]
fn compact_test_enum() {
let var0 = TestEnum::Var0;
let var1 = TestEnum::Var1(TestStruct::default());
let var2 = TestEnum::Var2(1u64);
compact_test_enum_all_variants(var0, var1, var2);
}
}
| rust | Apache-2.0 | 62834bd8deb86513778624a3ba33f55f4d6a1471 | 2026-01-04T20:20:17.218210Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.