repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/fmt/src/state/yul.rs | crates/fmt/src/state/yul.rs | #![allow(clippy::too_many_arguments)]
use super::{
CommentConfig, State,
common::{BlockFormat, ListFormat},
};
use solar::parse::ast::{self, Span, yul};
#[rustfmt::skip]
macro_rules! get_span {
() => { |value| value.span };
(()) => { |value| value.span() };
}
/// Language-specific pretty printing: Yul.
impl<'ast> State<'_, 'ast> {
fn print_lit_yul(&mut self, lit: &'ast ast::Lit<'ast>) {
self.print_lit_inner(lit, true);
}
pub(crate) fn print_yul_stmt(&mut self, stmt: &'ast yul::Stmt<'ast>) {
let yul::Stmt { ref docs, span, ref kind } = *stmt;
self.print_docs(docs);
if self.handle_span(span, false) {
return;
}
match kind {
yul::StmtKind::Block(stmts) => self.print_yul_block(stmts, span, false, 0),
yul::StmtKind::AssignSingle(path, expr) => {
self.print_path(path, false);
self.word(" := ");
self.neverbreak();
self.cursor.advance_to(expr.span.lo(), self.cursor.enabled);
self.print_yul_expr(expr);
}
yul::StmtKind::AssignMulti(paths, expr_call) => {
self.ibox(0);
self.commasep(
paths,
stmt.span.lo(),
stmt.span.hi(),
|this, path| this.print_path(path, false),
get_span!(()),
ListFormat::consistent(),
);
self.word(" :=");
self.space();
self.s.offset(self.ind);
self.ibox(0);
self.print_yul_expr(expr_call);
self.end();
self.end();
}
yul::StmtKind::Expr(expr_call) => self.print_yul_expr(expr_call),
yul::StmtKind::If(expr, stmts) => {
self.print_word("if "); // 3 chars
self.print_yul_expr(expr);
self.nbsp(); // 1 char
self.print_yul_block(stmts, span, false, 4 + self.estimate_size(expr.span));
}
yul::StmtKind::For(yul::StmtFor { init, cond, step, body }) => {
self.ibox(0);
self.print_word("for "); // 4 chars
self.print_yul_block(init, init.span, false, 4);
self.space();
self.print_yul_expr(cond);
self.space();
self.print_yul_block(step, step.span, false, 0);
self.space();
self.print_yul_block(body, body.span, false, 0);
self.end();
}
yul::StmtKind::Switch(yul::StmtSwitch { selector, cases }) => {
self.print_word("switch ");
self.print_yul_expr(selector);
self.print_trailing_comment(selector.span.hi(), None);
for yul::StmtSwitchCase { constant, body, span } in cases.iter() {
self.hardbreak_if_not_bol();
if let Some(constant) = constant {
self.print_comments(
constant.span.lo(),
CommentConfig::default().mixed_prev_space(),
);
self.print_word("case ");
self.print_lit_yul(constant);
self.nbsp();
} else {
self.print_comments(
body.span.lo(),
CommentConfig::default().mixed_prev_space(),
);
self.print_word("default ");
}
self.print_yul_block(body, *span, false, 0);
self.print_trailing_comment(selector.span.hi(), None);
}
}
yul::StmtKind::Leave => self.print_word("leave"),
yul::StmtKind::Break => self.print_word("break"),
yul::StmtKind::Continue => self.print_word("continue"),
yul::StmtKind::FunctionDef(func) => {
let yul::Function { name, parameters, returns, body } = func;
let params_hi = parameters
.last()
.map_or(returns.first().map_or(body.span.lo(), |r| r.span.lo()), |p| {
p.span.hi()
});
self.cbox(0);
self.s.ibox(0);
self.print_word("function ");
self.print_ident(name);
self.print_tuple(
parameters,
span.lo(),
params_hi,
Self::print_ident,
get_span!(),
ListFormat::consistent(),
);
self.nbsp();
let has_returns = !returns.is_empty();
let skip_opening_brace = has_returns;
if self.can_yul_header_params_be_inlined(func) {
self.neverbreak();
}
if has_returns {
self.commasep(
returns,
returns.first().map_or(params_hi, |ret| ret.span.lo()),
returns.last().map_or(body.span.lo(), |ret| ret.span.hi()),
Self::print_ident,
get_span!(),
ListFormat::yul(Some("->"), Some("{")),
);
}
self.end();
self.print_yul_block(body, span, skip_opening_brace, 0);
self.end();
}
yul::StmtKind::VarDecl(idents, expr) => {
self.s.ibox(self.ind);
self.print_word("let ");
self.commasep(
idents,
stmt.span.lo(),
idents.last().map_or(stmt.span.lo(), |i| i.span.hi()),
Self::print_ident,
get_span!(),
ListFormat::consistent(),
);
if let Some(expr) = expr {
self.print_word(" :=");
self.space();
self.print_yul_expr(expr);
}
self.end();
}
}
}
fn print_yul_expr(&mut self, expr: &'ast yul::Expr<'ast>) {
let yul::Expr { span, ref kind } = *expr;
if self.handle_span(span, false) {
return;
}
match kind {
yul::ExprKind::Path(path) => self.print_path(path, false),
yul::ExprKind::Call(yul::ExprCall { name, arguments }) => {
self.print_ident(name);
self.print_tuple(
arguments,
span.lo(),
span.hi(),
|s, arg| s.print_yul_expr(arg),
get_span!(),
ListFormat::consistent().break_single(true),
);
}
yul::ExprKind::Lit(lit) => {
if matches!(&lit.kind, ast::LitKind::Address(_)) {
self.print_span_cold(lit.span);
} else {
self.print_lit_yul(lit);
}
}
}
}
pub(super) fn print_yul_block(
&mut self,
block: &'ast yul::Block<'ast>,
span: Span,
skip_opening_brace: bool,
prefix_len: usize,
) {
if self.handle_span(span, false) {
return;
}
if !skip_opening_brace {
self.print_word("{");
}
let can_inline_block = if block.len() <= 1 && !self.is_multiline_yul_block(block) {
if self.max_space_left(prefix_len) == 0 {
self.estimate_size(block.span) + self.config.tab_width < self.space_left()
} else {
self.estimate_size(block.span) + prefix_len < self.space_left()
}
} else {
false
};
if can_inline_block {
self.neverbreak();
self.print_block_inner(
block,
BlockFormat::NoBraces(None),
|s, stmt| {
s.nbsp();
s.print_yul_stmt(stmt);
if s.peek_comment_before(stmt.span.hi()).is_none()
&& s.peek_trailing_comment(stmt.span.hi(), None).is_none()
{
s.nbsp();
}
s.print_comments(
stmt.span.hi(),
CommentConfig::skip_ws().mixed_no_break().mixed_post_nbsp(),
);
if !s.last_token_is_space() {
s.nbsp();
}
},
|b| b.span,
span.hi(),
);
} else {
let (mut i, n_args) = (0, block.len().saturating_sub(1));
self.print_block_inner(
block,
BlockFormat::NoBraces(Some(self.ind)),
|s, stmt| {
s.print_yul_stmt(stmt);
s.print_comments(stmt.span.hi(), CommentConfig::default());
if i != n_args {
let next_span = block[i + 1].span;
s.print_trailing_comment(stmt.span.hi(), Some(next_span.lo()));
if !s.is_bol_or_only_ind() && !s.inline_config.is_disabled(stmt.span) {
// when disabling a single line, manually add a nonbreaking line jump so
// that the indentation of the disabled line is maintained.
if s.inline_config.is_disabled(next_span)
&& s.peek_comment_before(next_span.lo())
.is_none_or(|cmnt| !cmnt.style.is_isolated())
{
s.word("\n");
// otherwise, use a regular hardbreak
} else {
s.hardbreak_if_not_bol();
}
}
i += 1;
} else {
s.print_trailing_comment(stmt.span.hi(), Some(span.hi()));
}
},
|b| b.span,
span.hi(),
);
}
self.print_word("}");
self.print_trailing_comment(span.hi(), None);
}
/// Checks if a block statement `{ ... }` contains more than one line of actual code.
fn is_multiline_yul_block(&self, block: &'ast yul::Block<'ast>) -> bool {
if block.stmts.is_empty() {
return false;
}
if self.sm.is_multiline(block.span)
&& let Ok(snip) = self.sm.span_to_snippet(block.span)
{
let code_lines = snip.lines().filter(|line| {
let trimmed = line.trim();
// Ignore empty lines and lines with only '{' or '}'
!trimmed.is_empty()
});
return code_lines.count() > 1;
}
false
}
fn estimate_yul_header_params_size(&mut self, func: &yul::Function<'_>) -> usize {
// '(' + param + (', ' + param) + ')'
let params = func
.parameters
.iter()
.fold(0, |len, p| if len != 0 { len + 2 } else { 2 } + self.estimate_size(p.span));
// 'function ' + name + ' ' + params + ' ->'
9 + self.estimate_size(func.name.span) + 1 + params + 3
}
fn can_yul_header_params_be_inlined(&mut self, func: &yul::Function<'_>) -> bool {
self.estimate_yul_header_params_size(func) <= self.space_left()
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/fmt/tests/formatter.rs | crates/fmt/tests/formatter.rs | use forge_fmt::FormatterConfig;
use foundry_test_utils::init_tracing;
use snapbox::{Data, assert_data_eq};
use solar::sema::Compiler;
use std::{
fs,
path::{Path, PathBuf},
sync::Arc,
};
#[track_caller]
fn format(source: &str, path: &Path, fmt_config: Arc<FormatterConfig>) -> String {
let mut compiler = Compiler::new(
solar::interface::Session::builder().with_buffer_emitter(Default::default()).build(),
);
match forge_fmt::format_source(source, Some(path), fmt_config, &mut compiler).into_result() {
Ok(formatted) => formatted,
Err(e) => panic!("failed to format {path:?}: {e}"),
}
}
#[track_caller]
fn assert_eof(content: &str) {
assert!(content.ends_with('\n'), "missing trailing newline");
assert!(!content.ends_with("\n\n"), "extra trailing newline");
}
fn tests_dir() -> PathBuf {
Path::new(env!("CARGO_MANIFEST_DIR")).join("testdata")
}
fn test_directory(base_name: &str) {
init_tracing();
let dir = tests_dir().join(base_name);
let mut original = fs::read_to_string(dir.join("original.sol")).unwrap();
if cfg!(windows) {
original = original.replace("\r\n", "\n");
}
let mut handles = vec![];
for res in dir.read_dir().unwrap() {
let entry = res.unwrap();
let path = entry.path();
let filename = path.file_name().and_then(|name| name.to_str()).unwrap();
if filename == "original.sol" {
continue;
}
assert!(path.is_file(), "expected file: {path:?}");
assert!(filename.ends_with("fmt.sol"), "unknown file: {path:?}");
let mut expected = fs::read_to_string(&path).unwrap();
if cfg!(windows) {
expected = expected
.replace("\r\n", "\n")
.replace(r"\'", r"/'")
.replace(r#"\""#, r#"/""#)
.replace("\\\n", "/\n");
}
// The majority of the tests were written with the assumption that the default value for max
// line length is `80`. Preserve that to avoid rewriting test logic.
let default_config = FormatterConfig { line_length: 80, ..Default::default() };
let mut config = toml::Value::try_from(default_config).unwrap();
let config_table = config.as_table_mut().unwrap();
let mut comments_end = 0;
for (i, line) in expected.lines().enumerate() {
let line_num = i + 1;
let Some(entry) = line
.strip_prefix("//")
.and_then(|line| line.trim().strip_prefix("config:"))
.map(str::trim)
else {
break;
};
let values = match toml::from_str::<toml::Value>(entry) {
Ok(toml::Value::Table(table)) => table,
r => panic!("invalid fmt config item in {filename} at {line_num}: {r:?}"),
};
config_table.extend(values);
comments_end += line.len() + 1;
}
let config = Arc::new(
config
.try_into::<FormatterConfig>()
.unwrap_or_else(|err| panic!("invalid test config for {filename}: {err}")),
);
let original = original.clone();
let tname = format!("{base_name}/{filename}");
let spawn = move || {
test_formatter(&path, config.clone(), &original, &expected, comments_end);
};
handles.push(std::thread::Builder::new().name(tname).spawn(spawn).unwrap());
}
let results = handles.into_iter().map(|h| h.join()).collect::<Vec<_>>();
for result in results {
result.unwrap();
}
}
fn test_formatter(
expected_path: &Path,
config: Arc<FormatterConfig>,
source: &str,
expected_source: &str,
comments_end: usize,
) {
let path = &*expected_path.with_file_name("original.sol");
let expected_data = || Data::read_from(expected_path, None).raw();
let mut source_formatted = format(source, path, config.clone());
// Inject `expected`'s comments, if any, so we can use the expected file as a snapshot.
source_formatted.insert_str(0, &expected_source[..comments_end]);
assert_data_eq!(&source_formatted, expected_data());
assert_eof(&source_formatted);
let mut expected_content = std::fs::read_to_string(expected_path).unwrap();
if cfg!(windows) {
expected_content = expected_content.replace("\r\n", "\n");
}
let expected_formatted = format(&expected_content, expected_path, config);
assert_data_eq!(&expected_formatted, expected_data());
assert_eof(expected_source);
assert_eof(&expected_formatted);
}
fn test_all_dirs_are_declared(dirs: &[&str]) {
let mut undeclared = vec![];
for actual_dir in tests_dir().read_dir().unwrap().filter_map(Result::ok) {
let path = actual_dir.path();
assert!(path.is_dir(), "expected directory: {path:?}");
let actual_dir_name = path.file_name().unwrap().to_str().unwrap();
if !dirs.contains(&actual_dir_name) {
undeclared.push(actual_dir_name.to_string());
}
}
if !undeclared.is_empty() {
panic!(
"the following test directories are not declared in the test suite macro call: {undeclared:#?}"
);
}
}
macro_rules! fmt_tests {
($($(#[$attr:meta])* $dir:ident),+ $(,)?) => {
#[test]
fn all_dirs_are_declared() {
test_all_dirs_are_declared(&[$(stringify!($dir)),*]);
}
$(
#[allow(non_snake_case)]
#[test]
$(#[$attr])*
fn $dir() {
test_directory(stringify!($dir));
}
)+
};
}
fmt_tests! {
#[ignore = "annotations are not valid Solidity"]
Annotation,
ArrayExpressions,
BlockComments,
BlockCommentsFunction,
ConditionalOperatorExpression,
ConstructorDefinition,
ConstructorModifierStyle,
ContractDefinition,
DocComments,
DoWhileStatement,
EmitStatement,
EnumDefinition,
EnumVariants,
ErrorDefinition,
EventDefinition,
ForStatement,
FunctionCall,
FunctionCallArgsStatement,
FunctionDefinition,
FunctionDefinitionWithFunctionReturns,
FunctionType,
HexUnderscore,
IfStatement,
IfStatement2,
ImportDirective,
InlineDisable,
IntTypes,
LiteralExpression,
MappingType,
ModifierDefinition,
NamedFunctionCallExpression,
NonKeywords,
NumberLiteralUnderscore,
OperatorExpressions,
PragmaDirective,
Repros,
ReprosCalls,
ReprosFunctionDefs,
ReturnStatement,
RevertNamedArgsStatement,
RevertStatement,
SimpleComments,
SortedImports,
StatementBlock,
StructDefinition,
ThisExpression,
#[ignore = "Solar errors when parsing inputs with trailing commas"]
TrailingComma,
TryStatement,
TypeDefinition,
UnitExpression,
UsingDirective,
VariableAssignment,
VariableDefinition,
WhileStatement,
Yul,
YulStrings,
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/config/src/vyper.rs | crates/config/src/vyper.rs | //! Vyper specific configuration types.
use foundry_compilers::artifacts::vyper::VyperOptimizationMode;
use serde::{Deserialize, Serialize};
use std::path::PathBuf;
#[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct VyperConfig {
/// Vyper optimization mode. "gas", "none" or "codesize"
#[serde(default, skip_serializing_if = "Option::is_none")]
pub optimize: Option<VyperOptimizationMode>,
/// The Vyper instance to use if any.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub path: Option<PathBuf>,
/// Optionally enables experimental Venom pipeline
#[serde(default, skip_serializing_if = "Option::is_none")]
pub experimental_codegen: Option<bool>,
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/config/src/lib.rs | crates/config/src/lib.rs | //! # foundry-config
//!
//! Foundry configuration.
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg))]
#[macro_use]
extern crate tracing;
use crate::cache::StorageCachingConfig;
use alloy_primitives::{Address, B256, FixedBytes, U256, address, map::AddressHashMap};
use eyre::{ContextCompat, WrapErr};
use figment::{
Error, Figment, Metadata, Profile, Provider,
providers::{Env, Format, Serialized, Toml},
value::{Dict, Map, Value},
};
use filter::GlobMatcher;
use foundry_compilers::{
ArtifactOutput, ConfigurableArtifacts, Graph, Project, ProjectPathsConfig,
RestrictionsWithVersion, VyperLanguage,
artifacts::{
BytecodeHash, DebuggingSettings, EvmVersion, Libraries, ModelCheckerSettings,
ModelCheckerTarget, Optimizer, OptimizerDetails, RevertStrings, Settings, SettingsMetadata,
Severity,
output_selection::{ContractOutputSelection, OutputSelection},
remappings::{RelativeRemapping, Remapping},
serde_helpers,
},
cache::SOLIDITY_FILES_CACHE_FILENAME,
compilers::{
Compiler,
multi::{MultiCompiler, MultiCompilerSettings},
solc::{Solc, SolcCompiler},
vyper::{Vyper, VyperSettings},
},
error::SolcError,
multi::{MultiCompilerParser, MultiCompilerRestrictions},
solc::{CliSettings, SolcLanguage, SolcSettings},
};
use regex::Regex;
use revm::primitives::hardfork::SpecId;
use semver::Version;
use serde::{Deserialize, Deserializer, Serialize, Serializer, de};
use std::{
borrow::Cow,
collections::BTreeMap,
fs,
path::{Path, PathBuf},
str::FromStr,
};
mod macros;
pub mod utils;
pub use utils::*;
mod endpoints;
pub use endpoints::{
ResolvedRpcEndpoint, ResolvedRpcEndpoints, RpcEndpoint, RpcEndpointUrl, RpcEndpoints,
};
mod etherscan;
pub use etherscan::EtherscanConfigError;
use etherscan::{EtherscanConfigs, EtherscanEnvProvider, ResolvedEtherscanConfig};
pub mod resolve;
pub use resolve::UnresolvedEnvVarError;
pub mod cache;
use cache::{Cache, ChainCache};
pub mod fmt;
pub use fmt::FormatterConfig;
pub mod lint;
pub use lint::{LinterConfig, Severity as LintSeverity};
pub mod fs_permissions;
pub use fs_permissions::FsPermissions;
use fs_permissions::PathPermission;
pub mod error;
use error::ExtractConfigError;
pub use error::SolidityErrorCode;
pub mod doc;
pub use doc::DocConfig;
pub mod filter;
pub use filter::SkipBuildFilters;
mod warning;
pub use warning::*;
pub mod fix;
// reexport so cli types can implement `figment::Provider` to easily merge compiler arguments
pub use alloy_chains::{Chain, NamedChain};
pub use figment;
pub mod providers;
pub use providers::Remappings;
use providers::*;
mod fuzz;
pub use fuzz::{FuzzConfig, FuzzCorpusConfig, FuzzDictionaryConfig};
mod invariant;
pub use invariant::InvariantConfig;
mod inline;
pub use inline::{InlineConfig, InlineConfigError, NatSpec};
pub mod soldeer;
use soldeer::{SoldeerConfig, SoldeerDependencyConfig};
mod vyper;
pub use vyper::VyperConfig;
mod bind_json;
use bind_json::BindJsonConfig;
mod compilation;
pub use compilation::{CompilationRestrictions, SettingsOverrides};
pub mod extend;
use extend::Extends;
use foundry_evm_networks::NetworkConfigs;
pub use semver;
/// Foundry configuration
///
/// # Defaults
///
/// All configuration values have a default, documented in the [fields](#fields)
/// section below. [`Config::default()`] returns the default values for
/// the default profile while [`Config::with_root()`] returns the values based on the given
/// directory. [`Config::load()`] starts with the default profile and merges various providers into
/// the config, same for [`Config::load_with_root()`], but there the default values are determined
/// by [`Config::with_root()`]
///
/// # Provider Details
///
/// `Config` is a Figment [`Provider`] with the following characteristics:
///
/// * **Profile**
///
/// The profile is set to the value of the `profile` field.
///
/// * **Metadata**
///
/// This provider is named `Foundry Config`. It does not specify a
/// [`Source`](figment::Source) and uses default interpolation.
///
/// * **Data**
///
/// The data emitted by this provider are the keys and values corresponding
/// to the fields and values of the structure. The dictionary is emitted to
/// the "default" meta-profile.
///
/// Note that these behaviors differ from those of [`Config::figment()`].
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Config {
/// The selected profile. **(default: _default_ `default`)**
///
/// **Note:** This field is never serialized nor deserialized. When a
/// `Config` is merged into a `Figment` as a `Provider`, this profile is
/// selected on the `Figment`. When a `Config` is extracted, this field is
/// set to the extracting Figment's selected `Profile`.
#[serde(skip)]
pub profile: Profile,
/// The list of all profiles defined in the config.
///
/// See `profile`.
#[serde(skip)]
pub profiles: Vec<Profile>,
/// The root path where the config detection started from, [`Config::with_root`].
// We're skipping serialization here, so it won't be included in the [`Config::to_string()`]
// representation, but will be deserialized from the `Figment` so that forge commands can
// override it.
#[serde(default = "root_default", skip_serializing)]
pub root: PathBuf,
/// Configuration for extending from another foundry.toml (base) file.
///
/// Can be either a string path or an object with path and strategy.
/// Base files cannot extend (inherit) other files.
#[serde(default, skip_serializing)]
pub extends: Option<Extends>,
/// Path of the sources directory.
///
/// Defaults to `src`.
pub src: PathBuf,
/// Path of the tests directory.
pub test: PathBuf,
/// Path of the scripts directory.
pub script: PathBuf,
/// Path to the artifacts directory.
pub out: PathBuf,
/// Paths to all library folders, such as `lib`, or `node_modules`.
pub libs: Vec<PathBuf>,
/// Remappings to use for this repo
pub remappings: Vec<RelativeRemapping>,
/// Whether to autodetect remappings.
pub auto_detect_remappings: bool,
/// Library addresses to link.
pub libraries: Vec<String>,
/// Whether to enable the build cache.
pub cache: bool,
/// The path to the cache store.
pub cache_path: PathBuf,
/// Whether to dynamically link tests.
pub dynamic_test_linking: bool,
/// Where the gas snapshots are stored.
pub snapshots: PathBuf,
/// Whether to check for differences against previously stored gas snapshots.
pub gas_snapshot_check: bool,
/// Whether to emit gas snapshots to disk.
pub gas_snapshot_emit: bool,
/// The path to store broadcast logs at.
pub broadcast: PathBuf,
/// Additional paths passed to `solc --allow-paths`.
pub allow_paths: Vec<PathBuf>,
/// Additional paths passed to `solc --include-path`.
pub include_paths: Vec<PathBuf>,
/// Glob patterns for file paths to skip when building and executing contracts.
pub skip: Vec<GlobMatcher>,
/// Whether to forcefully clean all project artifacts before running commands.
pub force: bool,
/// The EVM version to use when building contracts.
#[serde(with = "from_str_lowercase")]
pub evm_version: EvmVersion,
/// List of contracts to generate gas reports for.
pub gas_reports: Vec<String>,
/// List of contracts to ignore for gas reports.
pub gas_reports_ignore: Vec<String>,
/// Whether to include gas reports for tests.
pub gas_reports_include_tests: bool,
/// The Solc instance to use if any.
///
/// This takes precedence over `auto_detect_solc`, if a version is set then this overrides
/// auto-detection.
///
/// **Note** for backwards compatibility reasons this also accepts solc_version from the toml
/// file, see `BackwardsCompatTomlProvider`.
///
/// Avoid using this field directly; call the related `solc` methods instead.
#[doc(hidden)]
pub solc: Option<SolcReq>,
/// Whether to autodetect the solc compiler version to use.
pub auto_detect_solc: bool,
/// Offline mode, if set, network access (downloading solc) is disallowed.
///
/// Relationship with `auto_detect_solc`:
/// - if `auto_detect_solc = true` and `offline = true`, the required solc version(s) will
/// be auto detected but if the solc version is not installed, it will _not_ try to
/// install it
pub offline: bool,
/// Whether to activate optimizer
pub optimizer: Option<bool>,
/// The number of runs specifies roughly how often each opcode of the deployed code will be
/// executed across the life-time of the contract. This means it is a trade-off parameter
/// between code size (deploy cost) and code execution cost (cost after deployment).
/// An `optimizer_runs` parameter of `1` will produce short but expensive code. In contrast, a
/// larger `optimizer_runs` parameter will produce longer but more gas efficient code. The
/// maximum value of the parameter is `2**32-1`.
///
/// A common misconception is that this parameter specifies the number of iterations of the
/// optimizer. This is not true: The optimizer will always run as many times as it can
/// still improve the code.
pub optimizer_runs: Option<usize>,
/// Switch optimizer components on or off in detail.
/// The "enabled" switch above provides two defaults which can be
/// tweaked here. If "details" is given, "enabled" can be omitted.
pub optimizer_details: Option<OptimizerDetails>,
/// Model checker settings.
pub model_checker: Option<ModelCheckerSettings>,
/// verbosity to use
pub verbosity: u8,
/// url of the rpc server that should be used for any rpc calls
pub eth_rpc_url: Option<String>,
/// Whether to accept invalid certificates for the rpc server.
pub eth_rpc_accept_invalid_certs: bool,
/// JWT secret that should be used for any rpc calls
pub eth_rpc_jwt: Option<String>,
/// Timeout that should be used for any rpc calls
pub eth_rpc_timeout: Option<u64>,
/// Headers that should be used for any rpc calls
///
/// # Example
///
/// rpc_headers = ["x-custom-header:value", "x-another-header:another-value"]
///
/// You can also the ETH_RPC_HEADERS env variable like so:
/// `ETH_RPC_HEADERS="x-custom-header:value x-another-header:another-value"`
pub eth_rpc_headers: Option<Vec<String>>,
/// etherscan API key, or alias for an `EtherscanConfig` in `etherscan` table
pub etherscan_api_key: Option<String>,
/// Multiple etherscan api configs and their aliases
#[serde(default, skip_serializing_if = "EtherscanConfigs::is_empty")]
pub etherscan: EtherscanConfigs,
/// List of solidity error codes to always silence in the compiler output.
pub ignored_error_codes: Vec<SolidityErrorCode>,
/// List of file paths to ignore.
#[serde(rename = "ignored_warnings_from")]
pub ignored_file_paths: Vec<PathBuf>,
/// Diagnostic level (minimum) at which the process should finish with a non-zero exit.
pub deny: DenyLevel,
/// DEPRECATED: use `deny` instead.
#[serde(default, skip_serializing)]
pub deny_warnings: bool,
/// Only run test functions matching the specified regex pattern.
#[serde(rename = "match_test")]
pub test_pattern: Option<RegexWrapper>,
/// Only run test functions that do not match the specified regex pattern.
#[serde(rename = "no_match_test")]
pub test_pattern_inverse: Option<RegexWrapper>,
/// Only run tests in contracts matching the specified regex pattern.
#[serde(rename = "match_contract")]
pub contract_pattern: Option<RegexWrapper>,
/// Only run tests in contracts that do not match the specified regex pattern.
#[serde(rename = "no_match_contract")]
pub contract_pattern_inverse: Option<RegexWrapper>,
/// Only run tests in source files matching the specified glob pattern.
#[serde(rename = "match_path", with = "from_opt_glob")]
pub path_pattern: Option<globset::Glob>,
/// Only run tests in source files that do not match the specified glob pattern.
#[serde(rename = "no_match_path", with = "from_opt_glob")]
pub path_pattern_inverse: Option<globset::Glob>,
/// Only show coverage for files that do not match the specified regex pattern.
#[serde(rename = "no_match_coverage")]
pub coverage_pattern_inverse: Option<RegexWrapper>,
/// Path where last test run failures are recorded.
pub test_failures_file: PathBuf,
/// Max concurrent threads to use.
pub threads: Option<usize>,
/// Whether to show test execution progress.
pub show_progress: bool,
/// Configuration for fuzz testing
pub fuzz: FuzzConfig,
/// Configuration for invariant testing
pub invariant: InvariantConfig,
/// Whether to allow ffi cheatcodes in test
pub ffi: bool,
/// Whether to allow `expectRevert` for internal functions.
pub allow_internal_expect_revert: bool,
/// Use the create 2 factory in all cases including tests and non-broadcasting scripts.
pub always_use_create_2_factory: bool,
/// Sets a timeout in seconds for vm.prompt cheatcodes
pub prompt_timeout: u64,
/// The address which will be executing all tests
pub sender: Address,
/// The tx.origin value during EVM execution
pub tx_origin: Address,
/// the initial balance of each deployed test contract
pub initial_balance: U256,
/// the block.number value during EVM execution
#[serde(
deserialize_with = "crate::deserialize_u64_to_u256",
serialize_with = "crate::serialize_u64_or_u256"
)]
pub block_number: U256,
/// pins the block number for the state fork
pub fork_block_number: Option<u64>,
/// The chain name or EIP-155 chain ID.
#[serde(rename = "chain_id", alias = "chain")]
pub chain: Option<Chain>,
/// Block gas limit.
pub gas_limit: GasLimit,
/// EIP-170: Contract code size limit in bytes. Useful to increase this because of tests.
pub code_size_limit: Option<usize>,
/// `tx.gasprice` value during EVM execution.
///
/// This is an Option, so we can determine in fork mode whether to use the config's gas price
/// (if set by user) or the remote client's gas price.
pub gas_price: Option<u64>,
/// The base fee in a block.
pub block_base_fee_per_gas: u64,
/// The `block.coinbase` value during EVM execution.
pub block_coinbase: Address,
/// The `block.timestamp` value during EVM execution.
#[serde(
deserialize_with = "crate::deserialize_u64_to_u256",
serialize_with = "crate::serialize_u64_or_u256"
)]
pub block_timestamp: U256,
/// The `block.difficulty` value during EVM execution.
pub block_difficulty: u64,
/// Before merge the `block.max_hash`, after merge it is `block.prevrandao`.
pub block_prevrandao: B256,
/// The `block.gaslimit` value during EVM execution.
pub block_gas_limit: Option<GasLimit>,
/// The memory limit per EVM execution in bytes.
/// If this limit is exceeded, a `MemoryLimitOOG` result is thrown.
///
/// The default is 128MiB.
pub memory_limit: u64,
/// Additional output selection for all contracts, such as "ir", "devdoc", "storageLayout",
/// etc.
///
/// See the [Solc Compiler Api](https://docs.soliditylang.org/en/latest/using-the-compiler.html#compiler-api) for more information.
///
/// The following values are always set because they're required by `forge`:
/// ```json
/// {
/// "*": [
/// "abi",
/// "evm.bytecode",
/// "evm.deployedBytecode",
/// "evm.methodIdentifiers"
/// ]
/// }
/// ```
#[serde(default)]
pub extra_output: Vec<ContractOutputSelection>,
/// If set, a separate JSON file will be emitted for every contract depending on the
/// selection, eg. `extra_output_files = ["metadata"]` will create a `metadata.json` for
/// each contract in the project.
///
/// See [Contract Metadata](https://docs.soliditylang.org/en/latest/metadata.html) for more information.
///
/// The difference between `extra_output = ["metadata"]` and
/// `extra_output_files = ["metadata"]` is that the former will include the
/// contract's metadata in the contract's json artifact, whereas the latter will emit the
/// output selection as separate files.
#[serde(default)]
pub extra_output_files: Vec<ContractOutputSelection>,
/// Whether to print the names of the compiled contracts.
pub names: bool,
/// Whether to print the sizes of the compiled contracts.
pub sizes: bool,
/// If set to true, changes compilation pipeline to go through the Yul intermediate
/// representation.
pub via_ir: bool,
/// Whether to include the AST as JSON in the compiler output.
pub ast: bool,
/// RPC storage caching settings determines what chains and endpoints to cache
pub rpc_storage_caching: StorageCachingConfig,
/// Disables storage caching entirely. This overrides any settings made in
/// `rpc_storage_caching`
pub no_storage_caching: bool,
/// Disables rate limiting entirely. This overrides any settings made in
/// `compute_units_per_second`
pub no_rpc_rate_limit: bool,
/// Multiple rpc endpoints and their aliases
#[serde(default, skip_serializing_if = "RpcEndpoints::is_empty")]
pub rpc_endpoints: RpcEndpoints,
/// Whether to store the referenced sources in the metadata as literal data.
pub use_literal_content: bool,
/// Whether to include the metadata hash.
///
/// The metadata hash is machine dependent. By default, this is set to [BytecodeHash::None] to allow for deterministic code, See: <https://docs.soliditylang.org/en/latest/metadata.html>
#[serde(with = "from_str_lowercase")]
pub bytecode_hash: BytecodeHash,
/// Whether to append the metadata hash to the bytecode.
///
/// If this is `false` and the `bytecode_hash` option above is not `None` solc will issue a
/// warning.
pub cbor_metadata: bool,
/// How to treat revert (and require) reason strings.
#[serde(with = "serde_helpers::display_from_str_opt")]
pub revert_strings: Option<RevertStrings>,
/// Whether to compile in sparse mode
///
/// If this option is enabled, only the required contracts/files will be selected to be
/// included in solc's output selection, see also [`OutputSelection`].
pub sparse_mode: bool,
/// Generates additional build info json files for every new build, containing the
/// `CompilerInput` and `CompilerOutput`.
pub build_info: bool,
/// The path to the `build-info` directory that contains the build info json files.
pub build_info_path: Option<PathBuf>,
/// Configuration for `forge fmt`
pub fmt: FormatterConfig,
/// Configuration for `forge lint`
pub lint: LinterConfig,
/// Configuration for `forge doc`
pub doc: DocConfig,
/// Configuration for `forge bind-json`
pub bind_json: BindJsonConfig,
/// Configures the permissions of cheat codes that touch the file system.
///
/// This includes what operations can be executed (read, write)
pub fs_permissions: FsPermissions,
/// Whether to enable call isolation.
///
/// Useful for more correct gas accounting and EVM behavior in general.
pub isolate: bool,
/// Whether to disable the block gas limit checks.
pub disable_block_gas_limit: bool,
/// Whether to enable the tx gas limit checks as imposed by Osaka (EIP-7825).
pub enable_tx_gas_limit: bool,
/// Address labels
pub labels: AddressHashMap<String>,
/// Whether to enable safety checks for `vm.getCode` and `vm.getDeployedCode` invocations.
/// If disabled, it is possible to access artifacts which were not recompiled or cached.
pub unchecked_cheatcode_artifacts: bool,
/// CREATE2 salt to use for the library deployment in scripts.
pub create2_library_salt: B256,
/// The CREATE2 deployer address to use.
pub create2_deployer: Address,
/// Configuration for Vyper compiler
pub vyper: VyperConfig,
/// Soldeer dependencies
pub dependencies: Option<SoldeerDependencyConfig>,
/// Soldeer custom configs
pub soldeer: Option<SoldeerConfig>,
/// Whether failed assertions should revert.
///
/// Note that this only applies to native (cheatcode) assertions, invoked on Vm contract.
pub assertions_revert: bool,
/// Whether `failed()` should be invoked to check if the test have failed.
pub legacy_assertions: bool,
/// Optional additional CLI arguments to pass to `solc` binary.
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub extra_args: Vec<String>,
/// Networks with enabled features.
#[serde(flatten)]
pub networks: NetworkConfigs,
/// Timeout for transactions in seconds.
pub transaction_timeout: u64,
/// Warnings gathered when loading the Config. See [`WarningsProvider`] for more information.
#[serde(rename = "__warnings", default, skip_serializing)]
pub warnings: Vec<Warning>,
/// Additional settings profiles to use when compiling.
#[serde(default)]
pub additional_compiler_profiles: Vec<SettingsOverrides>,
/// Restrictions on compilation of certain files.
#[serde(default)]
pub compilation_restrictions: Vec<CompilationRestrictions>,
/// Whether to enable script execution protection.
pub script_execution_protection: bool,
/// PRIVATE: This structure may grow, As such, constructing this structure should
/// _always_ be done using a public constructor or update syntax:
///
/// ```ignore
/// use foundry_config::Config;
///
/// let config = Config { src: "other".into(), ..Default::default() };
/// ```
#[doc(hidden)]
#[serde(skip)]
pub _non_exhaustive: (),
}
/// Diagnostic level (minimum) at which the process should finish with a non-zero exit.
#[derive(Debug, Clone, Copy, PartialEq, Eq, clap::ValueEnum, Default, Serialize)]
#[serde(rename_all = "lowercase")]
pub enum DenyLevel {
/// Always exit with zero code.
#[default]
Never,
/// Exit with a non-zero code if any warnings are found.
Warnings,
/// Exit with a non-zero code if any notes or warnings are found.
Notes,
}
// Custom deserialization to make `DenyLevel` parsing case-insensitive and backwards compatible with
// booleans.
impl<'de> Deserialize<'de> for DenyLevel {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct DenyLevelVisitor;
impl<'de> de::Visitor<'de> for DenyLevelVisitor {
type Value = DenyLevel;
fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
formatter.write_str("one of the following strings: `never`, `warnings`, `notes`")
}
fn visit_bool<E>(self, value: bool) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok(DenyLevel::from(value))
}
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E>
where
E: de::Error,
{
DenyLevel::from_str(value).map_err(de::Error::custom)
}
}
deserializer.deserialize_any(DenyLevelVisitor)
}
}
impl FromStr for DenyLevel {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.to_lowercase().as_str() {
"warnings" | "warning" | "w" => Ok(Self::Warnings),
"notes" | "note" | "n" => Ok(Self::Notes),
"never" | "false" | "f" => Ok(Self::Never),
_ => Err(format!(
"unknown variant: found `{s}`, expected one of `never`, `warnings`, `notes`"
)),
}
}
}
impl From<bool> for DenyLevel {
fn from(deny: bool) -> Self {
if deny { Self::Warnings } else { Self::Never }
}
}
impl DenyLevel {
/// Returns `true` if the deny level includes warnings.
pub fn warnings(&self) -> bool {
match self {
Self::Never => false,
Self::Warnings | Self::Notes => true,
}
}
/// Returns `true` if the deny level includes notes.
pub fn notes(&self) -> bool {
match self {
Self::Never | Self::Warnings => false,
Self::Notes => true,
}
}
/// Returns `true` if the deny level is set to never (only errors).
pub fn never(&self) -> bool {
match self {
Self::Never => true,
Self::Warnings | Self::Notes => false,
}
}
}
/// Mapping of fallback standalone sections. See [`FallbackProfileProvider`].
pub const STANDALONE_FALLBACK_SECTIONS: &[(&str, &str)] = &[("invariant", "fuzz")];
/// Deprecated keys and their replacements.
///
/// See [Warning::DeprecatedKey]
pub const DEPRECATIONS: &[(&str, &str)] =
&[("cancun", "evm_version = Cancun"), ("deny_warnings", "deny = warnings")];
impl Config {
/// The default profile: "default"
pub const DEFAULT_PROFILE: Profile = Profile::Default;
/// The hardhat profile: "hardhat"
pub const HARDHAT_PROFILE: Profile = Profile::const_new("hardhat");
/// TOML section for profiles
pub const PROFILE_SECTION: &'static str = "profile";
/// External config sections, ignored from warnings.
pub const EXTERNAL_SECTION: &'static str = "external";
/// Standalone sections in the config which get integrated into the selected profile
pub const STANDALONE_SECTIONS: &'static [&'static str] = &[
"rpc_endpoints",
"etherscan",
"fmt",
"lint",
"doc",
"fuzz",
"invariant",
"labels",
"dependencies",
"soldeer",
"vyper",
"bind_json",
];
pub(crate) fn is_standalone_section<T: ?Sized + PartialEq<str>>(section: &T) -> bool {
section == Self::PROFILE_SECTION
|| section == Self::EXTERNAL_SECTION
|| Self::STANDALONE_SECTIONS.iter().any(|s| section == *s)
}
/// File name of config toml file
pub const FILE_NAME: &'static str = "foundry.toml";
/// The name of the directory foundry reserves for itself under the user's home directory: `~`
pub const FOUNDRY_DIR_NAME: &'static str = ".foundry";
/// Default address for tx.origin
///
/// `0x1804c8AB1F12E6bbf3894d4083f33e07309d1f38`
pub const DEFAULT_SENDER: Address = address!("0x1804c8AB1F12E6bbf3894d4083f33e07309d1f38");
/// Default salt for create2 library deployments
pub const DEFAULT_CREATE2_LIBRARY_SALT: FixedBytes<32> = FixedBytes::<32>::ZERO;
/// Default create2 deployer
pub const DEFAULT_CREATE2_DEPLOYER: Address =
address!("0x4e59b44847b379578588920ca78fbf26c0b4956c");
/// Loads the `Config` from the current directory.
///
/// See [`figment`](Self::figment) for more details.
pub fn load() -> Result<Self, ExtractConfigError> {
Self::from_provider(Self::figment())
}
/// Loads the `Config` with the given `providers` preset.
///
/// See [`figment`](Self::figment) for more details.
pub fn load_with_providers(providers: FigmentProviders) -> Result<Self, ExtractConfigError> {
Self::from_provider(Self::default().to_figment(providers))
}
/// Loads the `Config` from the given root directory.
///
/// See [`figment_with_root`](Self::figment_with_root) for more details.
#[track_caller]
pub fn load_with_root(root: impl AsRef<Path>) -> Result<Self, ExtractConfigError> {
Self::from_provider(Self::figment_with_root(root.as_ref()))
}
/// Attempts to extract a `Config` from `provider`, returning the result.
///
/// # Example
///
/// ```rust
/// use figment::providers::{Env, Format, Toml};
/// use foundry_config::Config;
///
/// // Use foundry's default `Figment`, but allow values from `other.toml`
/// // to supersede its values.
/// let figment = Config::figment().merge(Toml::file("other.toml").nested());
///
/// let config = Config::from_provider(figment);
/// ```
#[doc(alias = "try_from")]
pub fn from_provider<T: Provider>(provider: T) -> Result<Self, ExtractConfigError> {
trace!("load config with provider: {:?}", provider.metadata());
Self::from_figment(Figment::from(provider))
}
#[doc(hidden)]
#[deprecated(note = "use `Config::from_provider` instead")]
pub fn try_from<T: Provider>(provider: T) -> Result<Self, ExtractConfigError> {
Self::from_provider(provider)
}
fn from_figment(figment: Figment) -> Result<Self, ExtractConfigError> {
let mut config = figment.extract::<Self>().map_err(ExtractConfigError::new)?;
config.profile = figment.profile().clone();
// The `"profile"` profile contains all the profiles as keys.
let mut add_profile = |profile: &Profile| {
if !config.profiles.contains(profile) {
config.profiles.push(profile.clone());
}
};
let figment = figment.select(Self::PROFILE_SECTION);
if let Ok(data) = figment.data()
&& let Some(profiles) = data.get(&Profile::new(Self::PROFILE_SECTION))
{
for profile in profiles.keys() {
add_profile(&Profile::new(profile));
}
}
add_profile(&Self::DEFAULT_PROFILE);
add_profile(&config.profile);
config.normalize_optimizer_settings();
Ok(config)
}
/// Returns the populated [Figment] using the requested [FigmentProviders] preset.
///
/// This will merge various providers, such as env,toml,remappings into the figment if
/// requested.
pub fn to_figment(&self, providers: FigmentProviders) -> Figment {
// Note that `Figment::from` here is a method on `Figment` rather than the `From` impl below
if providers.is_none() {
return Figment::from(self);
}
let root = self.root.as_path();
let profile = Self::selected_profile();
let mut figment = Figment::default().merge(DappHardhatDirProvider(root));
// merge global foundry.toml file
if let Some(global_toml) = Self::foundry_dir_toml().filter(|p| p.exists()) {
figment = Self::merge_toml_provider(
figment,
TomlFileProvider::new(None, global_toml),
profile.clone(),
);
}
// merge local foundry.toml file
figment = Self::merge_toml_provider(
figment,
TomlFileProvider::new(Some("FOUNDRY_CONFIG"), root.join(Self::FILE_NAME)),
profile.clone(),
);
// merge environment variables
figment = figment
.merge(
Env::prefixed("DAPP_")
.ignore(&["REMAPPINGS", "LIBRARIES", "FFI", "FS_PERMISSIONS"])
.global(),
)
.merge(
Env::prefixed("DAPP_TEST_")
.ignore(&["CACHE", "FUZZ_RUNS", "DEPTH", "FFI", "FS_PERMISSIONS"])
.global(),
)
.merge(DappEnvCompatProvider)
.merge(EtherscanEnvProvider::default())
.merge(
Env::prefixed("FOUNDRY_")
.ignore(&["PROFILE", "REMAPPINGS", "LIBRARIES", "FFI", "FS_PERMISSIONS"])
.map(|key| {
let key = key.as_str();
if Self::STANDALONE_SECTIONS.iter().any(|section| {
key.starts_with(&format!("{}_", section.to_ascii_uppercase()))
}) {
key.replacen('_', ".", 1).into()
} else {
key.into()
}
})
.global(),
)
.select(profile.clone());
// only resolve remappings if all providers are requested
if providers.is_all() {
// we try to merge remappings after we've merged all other providers, this prevents
// redundant fs lookups to determine the default remappings that are eventually updated
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | true |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/config/src/endpoints.rs | crates/config/src/endpoints.rs | //! Support for multiple RPC-endpoints
use crate::resolve::{RE_PLACEHOLDER, UnresolvedEnvVarError, interpolate};
use serde::{Deserialize, Deserializer, Serialize, Serializer, ser::SerializeMap};
use std::{
collections::BTreeMap,
fmt,
ops::{Deref, DerefMut},
};
/// Container type for API endpoints, like various RPC endpoints
#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
#[serde(transparent)]
pub struct RpcEndpoints {
endpoints: BTreeMap<String, RpcEndpoint>,
}
impl RpcEndpoints {
/// Creates a new list of endpoints
pub fn new(
endpoints: impl IntoIterator<Item = (impl Into<String>, impl Into<RpcEndpointType>)>,
) -> Self {
Self {
endpoints: endpoints
.into_iter()
.map(|(name, e)| match e.into() {
RpcEndpointType::String(url) => (name.into(), RpcEndpoint::new(url)),
RpcEndpointType::Config(config) => (name.into(), config),
})
.collect(),
}
}
/// Returns `true` if this type doesn't contain any endpoints
pub fn is_empty(&self) -> bool {
self.endpoints.is_empty()
}
/// Returns all (alias -> rpc_endpoint) pairs
pub fn resolved(self) -> ResolvedRpcEndpoints {
ResolvedRpcEndpoints {
endpoints: self.endpoints.into_iter().map(|(name, e)| (name, e.resolve())).collect(),
}
}
}
impl Deref for RpcEndpoints {
type Target = BTreeMap<String, RpcEndpoint>;
fn deref(&self) -> &Self::Target {
&self.endpoints
}
}
/// RPC endpoint wrapper type
#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize)]
#[serde(untagged)]
pub enum RpcEndpointType {
/// Raw Endpoint url string
String(RpcEndpointUrl),
/// Config object
Config(RpcEndpoint),
}
impl RpcEndpointType {
/// Returns the string variant
pub fn as_endpoint_string(&self) -> Option<&RpcEndpointUrl> {
match self {
Self::String(url) => Some(url),
Self::Config(_) => None,
}
}
/// Returns the config variant
pub fn as_endpoint_config(&self) -> Option<&RpcEndpoint> {
match self {
Self::Config(config) => Some(config),
Self::String(_) => None,
}
}
/// Returns the url or config this type holds
///
/// # Error
///
/// Returns an error if the type holds a reference to an env var and the env var is not set
pub fn resolve(self) -> Result<String, UnresolvedEnvVarError> {
match self {
Self::String(url) => url.resolve(),
Self::Config(config) => config.endpoint.resolve(),
}
}
}
impl fmt::Display for RpcEndpointType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::String(url) => url.fmt(f),
Self::Config(config) => config.fmt(f),
}
}
}
impl TryFrom<RpcEndpointType> for String {
type Error = UnresolvedEnvVarError;
fn try_from(value: RpcEndpointType) -> Result<Self, Self::Error> {
match value {
RpcEndpointType::String(url) => url.resolve(),
RpcEndpointType::Config(config) => config.endpoint.resolve(),
}
}
}
/// Represents a single endpoint
///
/// This type preserves the value as it's stored in the config. If the value is a reference to an
/// env var, then the `Endpoint::Env` var will hold the reference (`${MAIN_NET}`) and _not_ the
/// value of the env var itself.
/// In other words, this type does not resolve env vars when it's being deserialized
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum RpcEndpointUrl {
/// A raw Url (ws, http)
Url(String),
/// An endpoint that contains at least one `${ENV_VAR}` placeholder
///
/// **Note:** this contains the endpoint as is, like `https://eth-mainnet.alchemyapi.io/v2/${API_KEY}` or `${EPC_ENV_VAR}`
Env(String),
}
impl RpcEndpointUrl {
/// Returns the url variant
pub fn as_url(&self) -> Option<&str> {
match self {
Self::Url(url) => Some(url),
Self::Env(_) => None,
}
}
/// Returns the env variant
pub fn as_env(&self) -> Option<&str> {
match self {
Self::Env(val) => Some(val),
Self::Url(_) => None,
}
}
/// Returns the url this type holds
///
/// # Error
///
/// Returns an error if the type holds a reference to an env var and the env var is not set
pub fn resolve(self) -> Result<String, UnresolvedEnvVarError> {
match self {
Self::Url(url) => Ok(url),
Self::Env(val) => interpolate(&val),
}
}
}
impl fmt::Display for RpcEndpointUrl {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Url(url) => url.fmt(f),
Self::Env(var) => var.fmt(f),
}
}
}
impl TryFrom<RpcEndpointUrl> for String {
type Error = UnresolvedEnvVarError;
fn try_from(value: RpcEndpointUrl) -> Result<Self, Self::Error> {
value.resolve()
}
}
impl Serialize for RpcEndpointUrl {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(&self.to_string())
}
}
impl<'de> Deserialize<'de> for RpcEndpointUrl {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let val = String::deserialize(deserializer)?;
let endpoint = if RE_PLACEHOLDER.is_match(&val) { Self::Env(val) } else { Self::Url(val) };
Ok(endpoint)
}
}
impl From<RpcEndpointUrl> for RpcEndpointType {
fn from(endpoint: RpcEndpointUrl) -> Self {
Self::String(endpoint)
}
}
impl From<RpcEndpointUrl> for RpcEndpoint {
fn from(endpoint: RpcEndpointUrl) -> Self {
Self { endpoint, ..Default::default() }
}
}
/// The auth token to be used for RPC endpoints
/// It works in the same way as the `RpcEndpoint` type, where it can be a raw string or a reference
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum RpcAuth {
Raw(String),
Env(String),
}
impl RpcAuth {
/// Returns the auth token this type holds
///
/// # Error
///
/// Returns an error if the type holds a reference to an env var and the env var is not set
pub fn resolve(self) -> Result<String, UnresolvedEnvVarError> {
match self {
Self::Raw(raw_auth) => Ok(raw_auth),
Self::Env(var) => interpolate(&var),
}
}
}
impl fmt::Display for RpcAuth {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Raw(url) => url.fmt(f),
Self::Env(var) => var.fmt(f),
}
}
}
impl Serialize for RpcAuth {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(&self.to_string())
}
}
impl<'de> Deserialize<'de> for RpcAuth {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let val = String::deserialize(deserializer)?;
let auth = if RE_PLACEHOLDER.is_match(&val) { Self::Env(val) } else { Self::Raw(val) };
Ok(auth)
}
}
// Rpc endpoint configuration
#[derive(Debug, Clone, Default, PartialEq, Eq)]
pub struct RpcEndpointConfig {
/// The number of retries.
pub retries: Option<u32>,
/// Initial retry backoff.
pub retry_backoff: Option<u64>,
/// The available compute units per second.
///
/// See also <https://docs.alchemy.com/reference/compute-units#what-are-cups-compute-units-per-second>
pub compute_units_per_second: Option<u64>,
}
impl fmt::Display for RpcEndpointConfig {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let Self { retries, retry_backoff, compute_units_per_second } = self;
if let Some(retries) = retries {
write!(f, ", retries={retries}")?;
}
if let Some(retry_backoff) = retry_backoff {
write!(f, ", retry_backoff={retry_backoff}")?;
}
if let Some(compute_units_per_second) = compute_units_per_second {
write!(f, ", compute_units_per_second={compute_units_per_second}")?;
}
Ok(())
}
}
/// Rpc endpoint configuration variant
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct RpcEndpoint {
/// endpoint url or env
pub endpoint: RpcEndpointUrl,
/// Token to be used as authentication
pub auth: Option<RpcAuth>,
/// additional configuration
pub config: RpcEndpointConfig,
}
impl RpcEndpoint {
pub fn new(endpoint: RpcEndpointUrl) -> Self {
Self { endpoint, ..Default::default() }
}
/// Resolves environment variables in fields into their raw values
pub fn resolve(self) -> ResolvedRpcEndpoint {
ResolvedRpcEndpoint {
endpoint: self.endpoint.resolve(),
auth: self.auth.map(|auth| auth.resolve()),
config: self.config,
}
}
}
impl fmt::Display for RpcEndpoint {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let Self { endpoint, auth, config } = self;
write!(f, "{endpoint}")?;
write!(f, "{config}")?;
if let Some(auth) = auth {
write!(f, ", auth={auth}")?;
}
Ok(())
}
}
impl Serialize for RpcEndpoint {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
if self.config.retries.is_none()
&& self.config.retry_backoff.is_none()
&& self.config.compute_units_per_second.is_none()
&& self.auth.is_none()
{
// serialize as endpoint if there's no additional config
self.endpoint.serialize(serializer)
} else {
let mut map = serializer.serialize_map(Some(5))?;
map.serialize_entry("endpoint", &self.endpoint)?;
map.serialize_entry("retries", &self.config.retries)?;
map.serialize_entry("retry_backoff", &self.config.retry_backoff)?;
map.serialize_entry("compute_units_per_second", &self.config.compute_units_per_second)?;
map.serialize_entry("auth", &self.auth)?;
map.end()
}
}
}
impl<'de> Deserialize<'de> for RpcEndpoint {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let value = serde_json::Value::deserialize(deserializer)?;
if value.is_string() {
return Ok(Self {
endpoint: serde_json::from_value(value).map_err(serde::de::Error::custom)?,
..Default::default()
});
}
#[derive(Deserialize)]
struct RpcEndpointConfigInner {
#[serde(alias = "url")]
endpoint: RpcEndpointUrl,
retries: Option<u32>,
retry_backoff: Option<u64>,
compute_units_per_second: Option<u64>,
auth: Option<RpcAuth>,
}
let RpcEndpointConfigInner {
endpoint,
retries,
retry_backoff,
compute_units_per_second,
auth,
} = serde_json::from_value(value).map_err(serde::de::Error::custom)?;
Ok(Self {
endpoint,
auth,
config: RpcEndpointConfig { retries, retry_backoff, compute_units_per_second },
})
}
}
impl From<RpcEndpoint> for RpcEndpointType {
fn from(config: RpcEndpoint) -> Self {
Self::Config(config)
}
}
impl Default for RpcEndpoint {
fn default() -> Self {
Self {
endpoint: RpcEndpointUrl::Url("http://localhost:8545".to_string()),
config: RpcEndpointConfig::default(),
auth: None,
}
}
}
/// Rpc endpoint with environment variables resolved to values, see [`RpcEndpoint::resolve`].
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct ResolvedRpcEndpoint {
pub endpoint: Result<String, UnresolvedEnvVarError>,
pub auth: Option<Result<String, UnresolvedEnvVarError>>,
pub config: RpcEndpointConfig,
}
impl ResolvedRpcEndpoint {
/// Returns the url this type holds, see [`RpcEndpoint::resolve`]
pub fn url(&self) -> Result<String, UnresolvedEnvVarError> {
self.endpoint.clone()
}
// Returns true if all environment variables are resolved successfully
pub fn is_unresolved(&self) -> bool {
let endpoint_err = self.endpoint.is_err();
let auth_err = self.auth.as_ref().map(|auth| auth.is_err()).unwrap_or(false);
endpoint_err || auth_err
}
// Attempts to resolve unresolved environment variables into a new instance
pub fn try_resolve(mut self) -> Self {
if !self.is_unresolved() {
return self;
}
if let Err(err) = self.endpoint {
self.endpoint = err.try_resolve()
}
if let Some(Err(err)) = self.auth {
self.auth = Some(err.try_resolve())
}
self
}
}
/// Container type for _resolved_ endpoints.
#[derive(Clone, Debug, Default, PartialEq, Eq)]
pub struct ResolvedRpcEndpoints {
endpoints: BTreeMap<String, ResolvedRpcEndpoint>,
}
impl ResolvedRpcEndpoints {
/// Returns true if there's an endpoint that couldn't be resolved
pub fn has_unresolved(&self) -> bool {
self.endpoints.values().any(|e| e.is_unresolved())
}
}
impl Deref for ResolvedRpcEndpoints {
type Target = BTreeMap<String, ResolvedRpcEndpoint>;
fn deref(&self) -> &Self::Target {
&self.endpoints
}
}
impl DerefMut for ResolvedRpcEndpoints {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.endpoints
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn serde_rpc_config() {
let s = r#"{
"endpoint": "http://localhost:8545",
"retries": 5,
"retry_backoff": 250,
"compute_units_per_second": 100,
"auth": "Bearer 123"
}"#;
let config: RpcEndpoint = serde_json::from_str(s).unwrap();
assert_eq!(
config,
RpcEndpoint {
endpoint: RpcEndpointUrl::Url("http://localhost:8545".to_string()),
config: RpcEndpointConfig {
retries: Some(5),
retry_backoff: Some(250),
compute_units_per_second: Some(100),
},
auth: Some(RpcAuth::Raw("Bearer 123".to_string())),
}
);
let s = "\"http://localhost:8545\"";
let config: RpcEndpoint = serde_json::from_str(s).unwrap();
assert_eq!(
config,
RpcEndpoint {
endpoint: RpcEndpointUrl::Url("http://localhost:8545".to_string()),
config: RpcEndpointConfig {
retries: None,
retry_backoff: None,
compute_units_per_second: None,
},
auth: None,
}
);
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/config/src/soldeer.rs | crates/config/src/soldeer.rs | //! Configuration specific to the `forge soldeer` command and the `forge_soldeer` package
use serde::{Deserialize, Serialize};
pub use soldeer_core::config::SoldeerConfig;
use std::collections::BTreeMap;
/// Soldeer dependencies config structure when it's defined as a map
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct MapDependency {
/// The version of the dependency
pub version: String,
/// The url from where the dependency was retrieved
#[serde(default, skip_serializing_if = "Option::is_none")]
pub url: Option<String>,
/// The git URL for the source repo
#[serde(default, skip_serializing_if = "Option::is_none")]
pub git: Option<String>,
/// The commit in case git is used as dependency source
#[serde(default, skip_serializing_if = "Option::is_none")]
pub rev: Option<String>,
/// The branch in case git is used as dependency source
#[serde(default, skip_serializing_if = "Option::is_none")]
pub branch: Option<String>,
/// The git tag in case git is used as dependency source
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tag: Option<String>,
/// An optional relative path to the project's root within the repository
#[serde(default, skip_serializing_if = "Option::is_none")]
pub project_root: Option<String>,
}
/// Type for Soldeer configs, under dependencies tag in the foundry.toml
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct SoldeerDependencyConfig(pub BTreeMap<String, SoldeerDependencyValue>);
impl AsRef<Self> for SoldeerDependencyConfig {
fn as_ref(&self) -> &Self {
self
}
}
/// Enum to cover both available formats for defining a dependency
/// `dep = { version = "1.1", url = "https://my-dependency" }`
/// or
/// `dep = "1.1"`
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[serde(untagged)]
pub enum SoldeerDependencyValue {
Map(MapDependency),
Str(String),
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/config/src/fix.rs | crates/config/src/fix.rs | //! Helpers to automatically fix configuration warnings.
use crate::{Config, Warning};
use figment::providers::Env;
use std::{
fs, io,
ops::{Deref, DerefMut},
path::{Path, PathBuf},
};
/// A convenience wrapper around a TOML document and the path it was read from
struct TomlFile {
doc: toml_edit::DocumentMut,
path: PathBuf,
}
impl TomlFile {
fn open(path: impl AsRef<Path>) -> eyre::Result<Self> {
let path = path.as_ref().to_owned();
let doc = fs::read_to_string(&path)?.parse()?;
Ok(Self { doc, path })
}
fn doc(&self) -> &toml_edit::DocumentMut {
&self.doc
}
fn doc_mut(&mut self) -> &mut toml_edit::DocumentMut {
&mut self.doc
}
fn path(&self) -> &Path {
self.path.as_ref()
}
fn save(&self) -> io::Result<()> {
fs::write(self.path(), self.doc().to_string())
}
}
impl Deref for TomlFile {
type Target = toml_edit::DocumentMut;
fn deref(&self) -> &Self::Target {
self.doc()
}
}
impl DerefMut for TomlFile {
fn deref_mut(&mut self) -> &mut Self::Target {
self.doc_mut()
}
}
/// The error emitted when failing to insert into a profile.
#[derive(Debug)]
struct InsertProfileError {
pub message: String,
pub value: toml_edit::Item,
}
impl std::fmt::Display for InsertProfileError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(&self.message)
}
}
impl std::error::Error for InsertProfileError {}
impl TomlFile {
/// Insert a name as `[profile.name]`. Creating the `[profile]` table where necessary and
/// throwing an error if there exists a conflict
#[expect(clippy::result_large_err)]
fn insert_profile(
&mut self,
profile_str: &str,
value: toml_edit::Item,
) -> Result<(), InsertProfileError> {
if !value.is_table_like() {
return Err(InsertProfileError {
message: format!("Expected [{profile_str}] to be a Table"),
value,
});
}
// get or create the profile section
let profile_map = if let Some(map) = self.get_mut(Config::PROFILE_SECTION) {
map
} else {
// insert profile section at the beginning of the map
let mut profile_section = toml_edit::Table::new();
profile_section.set_position(Some(0));
profile_section.set_implicit(true);
self.insert(Config::PROFILE_SECTION, toml_edit::Item::Table(profile_section));
self.get_mut(Config::PROFILE_SECTION).expect("exists per above")
};
// ensure the profile section is a table
let profile_map = if let Some(table) = profile_map.as_table_like_mut() {
table
} else {
return Err(InsertProfileError {
message: format!("Expected [{}] to be a Table", Config::PROFILE_SECTION),
value,
});
};
// check the profile map for structure and existing keys
if let Some(profile) = profile_map.get(profile_str) {
if let Some(profile_table) = profile.as_table_like() {
if !profile_table.is_empty() {
return Err(InsertProfileError {
message: format!(
"[{}.{}] already exists",
Config::PROFILE_SECTION,
profile_str
),
value,
});
}
} else {
return Err(InsertProfileError {
message: format!(
"Expected [{}.{}] to be a Table",
Config::PROFILE_SECTION,
profile_str
),
value,
});
}
}
// insert the profile
profile_map.insert(profile_str, value);
Ok(())
}
}
/// Making sure any implicit profile `[name]` becomes `[profile.name]` for the given file and
/// returns the implicit profiles and the result of editing them
fn fix_toml_non_strict_profiles(
toml_file: &mut TomlFile,
) -> Vec<(String, Result<(), InsertProfileError>)> {
let mut results = vec![];
// get any non root level keys that need to be inserted into [profile]
let profiles = toml_file
.as_table()
.iter()
.map(|(k, _)| k.to_string())
.filter(|k| !Config::is_standalone_section(k))
.collect::<Vec<_>>();
// remove each profile and insert into [profile] section
for profile in profiles {
if let Some(value) = toml_file.remove(&profile) {
let res = toml_file.insert_profile(&profile, value);
if let Err(err) = res.as_ref() {
toml_file.insert(&profile, err.value.clone());
}
results.push((profile, res))
}
}
results
}
/// Fix foundry.toml files. Making sure any implicit profile `[name]` becomes
/// `[profile.name]`. Return any warnings
pub fn fix_tomls() -> Vec<Warning> {
let mut warnings = vec![];
let tomls = {
let mut tomls = vec![];
if let Some(global_toml) = Config::foundry_dir_toml().filter(|p| p.exists()) {
tomls.push(global_toml);
}
let local_toml = PathBuf::from(
Env::var("FOUNDRY_CONFIG").unwrap_or_else(|| Config::FILE_NAME.to_string()),
);
if local_toml.exists() {
tomls.push(local_toml);
} else {
warnings.push(Warning::NoLocalToml(local_toml));
}
tomls
};
for toml in tomls {
let mut toml_file = match TomlFile::open(&toml) {
Ok(toml_file) => toml_file,
Err(err) => {
warnings.push(Warning::CouldNotReadToml { path: toml, err: err.to_string() });
continue;
}
};
let results = fix_toml_non_strict_profiles(&mut toml_file);
let was_edited = results.iter().any(|(_, res)| res.is_ok());
for (profile, err) in results
.into_iter()
.filter_map(|(profile, res)| res.err().map(|err| (profile, err.message)))
{
warnings.push(Warning::CouldNotFixProfile {
path: toml_file.path().into(),
profile,
err,
})
}
if was_edited && let Err(err) = toml_file.save() {
warnings.push(Warning::CouldNotWriteToml {
path: toml_file.path().into(),
err: err.to_string(),
});
}
}
warnings
}
#[cfg(test)]
mod tests {
use super::*;
use figment::Jail;
use similar_asserts::assert_eq;
macro_rules! fix_test {
($(#[$attr:meta])* $name:ident, $fun:expr) => {
#[test]
$(#[$attr])*
fn $name() {
Jail::expect_with(|jail| {
// setup home directory,
// **Note** this only has an effect on unix, as [`dirs::home_dir()`] on windows uses `FOLDERID_Profile`
jail.set_env("HOME", jail.directory().display().to_string());
std::fs::create_dir(jail.directory().join(".foundry")).unwrap();
// define function type to allow implicit params / return
let f: Box<dyn FnOnce(&mut Jail) -> Result<(), figment::Error>> = Box::new($fun);
f(jail)?;
Ok(())
});
}
};
}
fix_test!(test_implicit_profile_name_changed, |jail| {
jail.create_file(
"foundry.toml",
r#"
[default]
src = "src"
# comment
[other]
src = "other-src"
"#,
)?;
fix_tomls();
assert_eq!(
fs::read_to_string("foundry.toml").unwrap(),
r#"
[profile.default]
src = "src"
# comment
[profile.other]
src = "other-src"
"#
);
Ok(())
});
fix_test!(test_leave_standalone_sections_alone, |jail| {
jail.create_file(
"foundry.toml",
r#"
[default]
src = "src"
[fmt]
line_length = 100
[rpc_endpoints]
optimism = "https://example.com/"
"#,
)?;
fix_tomls();
assert_eq!(
fs::read_to_string("foundry.toml").unwrap(),
r#"
[profile.default]
src = "src"
[fmt]
line_length = 100
[rpc_endpoints]
optimism = "https://example.com/"
"#
);
Ok(())
});
// mocking the `$HOME` has no effect on windows, see [`dirs::home_dir()`]
fix_test!(
#[cfg(not(windows))]
test_global_toml_is_edited,
|jail| {
jail.create_file(
"foundry.toml",
r#"
[other]
src = "other-src"
"#,
)?;
jail.create_file(
".foundry/foundry.toml",
r#"
[default]
src = "src"
"#,
)?;
fix_tomls();
assert_eq!(
fs::read_to_string("foundry.toml").unwrap(),
r#"
[profile.other]
src = "other-src"
"#
);
assert_eq!(
fs::read_to_string(".foundry/foundry.toml").unwrap(),
r#"
[profile.default]
src = "src"
"#
);
Ok(())
}
);
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/config/src/filter.rs | crates/config/src/filter.rs | //! Helpers for constructing and using [FileFilter]s.
use core::fmt;
use foundry_compilers::FileFilter;
use serde::{Deserialize, Serialize};
use std::{
convert::Infallible,
path::{Path, PathBuf},
str::FromStr,
};
/// Expand globs with a root path.
pub fn expand_globs(
root: &Path,
patterns: impl IntoIterator<Item = impl AsRef<str>>,
) -> eyre::Result<Vec<PathBuf>> {
let mut expanded = Vec::new();
for pattern in patterns {
for paths in glob::glob(&root.join(pattern.as_ref()).display().to_string())? {
expanded.push(paths?);
}
}
Ok(expanded)
}
/// A `globset::Glob` that creates its `globset::GlobMatcher` when its created, so it doesn't need
/// to be compiled when the filter functions `TestFilter` functions are called.
#[derive(Clone, Debug)]
pub struct GlobMatcher {
/// The compiled glob
pub matcher: globset::GlobMatcher,
}
impl GlobMatcher {
/// Creates a new `GlobMatcher` from a `globset::Glob`.
pub fn new(glob: globset::Glob) -> Self {
Self { matcher: glob.compile_matcher() }
}
/// Tests whether the given path matches this pattern or not.
///
/// The glob `./test/*` won't match absolute paths like `test/Contract.sol`, which is common
/// format here, so we also handle this case here
pub fn is_match(&self, path: &Path) -> bool {
if self.matcher.is_match(path) {
return true;
}
if let Some(file_name) = path.file_name().and_then(|n| n.to_str())
&& file_name.contains(self.as_str())
{
return true;
}
if !path.starts_with("./") && self.as_str().starts_with("./") {
return self.matcher.is_match(format!("./{}", path.display()));
}
if path.is_relative() && Path::new(self.glob().glob()).is_absolute() {
if let Ok(canonicalized_path) = dunce::canonicalize(path) {
return self.matcher.is_match(&canonicalized_path);
} else {
return false;
}
}
false
}
/// Matches file only if the filter does not apply.
///
/// This returns the inverse of `self.is_match(file)`.
fn is_match_exclude(&self, path: &Path) -> bool {
!self.is_match(path)
}
/// Returns the `globset::Glob`.
pub fn glob(&self) -> &globset::Glob {
self.matcher.glob()
}
/// Returns the `Glob` string used to compile this matcher.
pub fn as_str(&self) -> &str {
self.glob().glob()
}
}
impl fmt::Display for GlobMatcher {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.glob().fmt(f)
}
}
impl FromStr for GlobMatcher {
type Err = globset::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
s.parse::<globset::Glob>().map(Self::new)
}
}
impl From<globset::Glob> for GlobMatcher {
fn from(glob: globset::Glob) -> Self {
Self::new(glob)
}
}
impl Serialize for GlobMatcher {
fn serialize<S: serde::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
self.glob().glob().serialize(serializer)
}
}
impl<'de> Deserialize<'de> for GlobMatcher {
fn deserialize<D: serde::Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
let s = String::deserialize(deserializer)?;
s.parse().map_err(serde::de::Error::custom)
}
}
impl PartialEq for GlobMatcher {
fn eq(&self, other: &Self) -> bool {
self.as_str() == other.as_str()
}
}
impl Eq for GlobMatcher {}
/// Bundles multiple `SkipBuildFilter` into a single `FileFilter`
#[derive(Clone, Debug)]
pub struct SkipBuildFilters {
/// All provided filters.
pub matchers: Vec<GlobMatcher>,
/// Root of the project.
pub project_root: PathBuf,
}
impl FileFilter for SkipBuildFilters {
/// Only returns a match if _no_ exclusion filter matches
fn is_match(&self, file: &Path) -> bool {
self.matchers.iter().all(|matcher| {
if !matcher.is_match_exclude(file) {
false
} else {
file.strip_prefix(&self.project_root)
.map_or(true, |stripped| matcher.is_match_exclude(stripped))
}
})
}
}
impl SkipBuildFilters {
/// Creates a new `SkipBuildFilters` from multiple `SkipBuildFilter`.
pub fn new<G: Into<GlobMatcher>>(
filters: impl IntoIterator<Item = G>,
project_root: PathBuf,
) -> Self {
let matchers = filters.into_iter().map(|m| m.into()).collect();
Self { matchers, project_root }
}
}
/// A filter that excludes matching contracts from the build
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum SkipBuildFilter {
/// Exclude all `.t.sol` contracts
Tests,
/// Exclude all `.s.sol` contracts
Scripts,
/// Exclude if the file matches
Custom(String),
}
impl SkipBuildFilter {
fn new(s: &str) -> Self {
match s {
"test" | "tests" => Self::Tests,
"script" | "scripts" => Self::Scripts,
s => Self::Custom(s.to_string()),
}
}
/// Returns the pattern to match against a file
pub fn file_pattern(&self) -> &str {
match self {
Self::Tests => ".t.sol",
Self::Scripts => ".s.sol",
Self::Custom(s) => s.as_str(),
}
}
}
impl FromStr for SkipBuildFilter {
type Err = Infallible;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(Self::new(s))
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_build_filter() {
let tests = GlobMatcher::from_str(SkipBuildFilter::Tests.file_pattern()).unwrap();
let scripts = GlobMatcher::from_str(SkipBuildFilter::Scripts.file_pattern()).unwrap();
let custom = |s| GlobMatcher::from_str(s).unwrap();
let file = Path::new("A.t.sol");
assert!(!tests.is_match_exclude(file));
assert!(scripts.is_match_exclude(file));
assert!(!custom("A.t").is_match_exclude(file));
let file = Path::new("A.s.sol");
assert!(tests.is_match_exclude(file));
assert!(!scripts.is_match_exclude(file));
assert!(!custom("A.s").is_match_exclude(file));
let file = Path::new("/home/test/Foo.sol");
assert!(!custom("*/test/**").is_match_exclude(file));
let file = Path::new("/home/script/Contract.sol");
assert!(!custom("*/script/**").is_match_exclude(file));
}
#[test]
fn can_match_relative_glob_paths() {
let matcher: GlobMatcher = "./test/*".parse().unwrap();
// Absolute path that should match the pattern
assert!(matcher.is_match(Path::new("test/Contract.t.sol")));
// Relative path that should match the pattern
assert!(matcher.is_match(Path::new("./test/Contract.t.sol")));
}
#[test]
fn can_match_absolute_glob_paths() {
let matcher: GlobMatcher = "/home/user/projects/project/test/*".parse().unwrap();
// Absolute path that should match the pattern
assert!(matcher.is_match(Path::new("/home/user/projects/project/test/Contract.t.sol")));
// Absolute path that should not match the pattern
assert!(!matcher.is_match(Path::new("/home/user/other/project/test/Contract.t.sol")));
// Relative path that should not match an absolute pattern
assert!(!matcher.is_match(Path::new("projects/project/test/Contract.t.sol")));
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/config/src/doc.rs | crates/config/src/doc.rs | //! Configuration specific to the `forge doc` command and the `forge_doc` package
use serde::{Deserialize, Serialize};
use std::path::PathBuf;
/// Contains the config for parsing and rendering docs
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct DocConfig {
/// Doc output path.
pub out: PathBuf,
/// The documentation title.
pub title: String,
/// Path to user provided `book.toml`.
pub book: PathBuf,
/// Path to user provided welcome markdown.
///
/// If none is provided, it defaults to `README.md`.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub homepage: Option<PathBuf>,
/// The repository url.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub repository: Option<String>,
/// The path to source code (e.g. `tree/main/packages/contracts`).
/// Useful for monorepos or for projects with source code located in specific directories.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub path: Option<String>,
/// Globs to ignore
pub ignore: Vec<String>,
}
impl Default for DocConfig {
fn default() -> Self {
Self {
out: PathBuf::from("docs"),
book: PathBuf::from("book.toml"),
homepage: Some(PathBuf::from("README.md")),
title: String::default(),
repository: None,
path: None,
ignore: Vec::default(),
}
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/config/src/fuzz.rs | crates/config/src/fuzz.rs | //! Configuration for fuzz testing.
use alloy_primitives::U256;
use foundry_compilers::utils::canonicalized;
use serde::{Deserialize, Serialize};
use std::path::PathBuf;
/// Contains for fuzz testing
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct FuzzConfig {
/// The number of test cases that must execute for each property test
pub runs: u32,
/// Fails the fuzzed test if a revert occurs.
pub fail_on_revert: bool,
/// The maximum number of test case rejections allowed by proptest, to be
/// encountered during usage of `vm.assume` cheatcode. This will be used
/// to set the `max_global_rejects` value in proptest test runner config.
/// `max_local_rejects` option isn't exposed here since we're not using
/// `prop_filter`.
pub max_test_rejects: u32,
/// Optional seed for the fuzzing RNG algorithm
pub seed: Option<U256>,
/// The fuzz dictionary configuration
#[serde(flatten)]
pub dictionary: FuzzDictionaryConfig,
/// Number of runs to execute and include in the gas report.
pub gas_report_samples: u32,
/// The fuzz corpus configuration.
#[serde(flatten)]
pub corpus: FuzzCorpusConfig,
/// Path where fuzz failures are recorded and replayed.
pub failure_persist_dir: Option<PathBuf>,
/// show `console.log` in fuzz test, defaults to `false`
pub show_logs: bool,
/// Optional timeout (in seconds) for each property test
pub timeout: Option<u32>,
}
impl Default for FuzzConfig {
fn default() -> Self {
Self {
runs: 256,
fail_on_revert: true,
max_test_rejects: 65536,
seed: None,
dictionary: FuzzDictionaryConfig::default(),
gas_report_samples: 256,
corpus: FuzzCorpusConfig::default(),
failure_persist_dir: None,
show_logs: false,
timeout: None,
}
}
}
impl FuzzConfig {
/// Creates fuzz configuration to write failures in `{PROJECT_ROOT}/cache/fuzz` dir.
pub fn new(cache_dir: PathBuf) -> Self {
Self { failure_persist_dir: Some(cache_dir), ..Default::default() }
}
}
/// Contains for fuzz testing
#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct FuzzDictionaryConfig {
/// The weight of the dictionary
#[serde(deserialize_with = "crate::deserialize_stringified_percent")]
pub dictionary_weight: u32,
/// The flag indicating whether to include values from storage
pub include_storage: bool,
/// The flag indicating whether to include push bytes values
pub include_push_bytes: bool,
/// How many addresses to record at most.
/// Once the fuzzer exceeds this limit, it will start evicting random entries
///
/// This limit is put in place to prevent memory blowup.
#[serde(deserialize_with = "crate::deserialize_usize_or_max")]
pub max_fuzz_dictionary_addresses: usize,
/// How many values to record at most.
/// Once the fuzzer exceeds this limit, it will start evicting random entries
#[serde(deserialize_with = "crate::deserialize_usize_or_max")]
pub max_fuzz_dictionary_values: usize,
/// How many literal values to seed from the AST, at most.
///
/// This value is independent from the max amount of addresses and values.
#[serde(deserialize_with = "crate::deserialize_usize_or_max")]
pub max_fuzz_dictionary_literals: usize,
}
impl Default for FuzzDictionaryConfig {
fn default() -> Self {
const MB: usize = 1024 * 1024;
Self {
dictionary_weight: 40,
include_storage: true,
include_push_bytes: true,
max_fuzz_dictionary_addresses: 300 * MB / 20,
max_fuzz_dictionary_values: 300 * MB / 32,
max_fuzz_dictionary_literals: 200 * MB / 32,
}
}
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct FuzzCorpusConfig {
// Path to corpus directory, enabled coverage guided fuzzing mode.
// If not set then sequences producing new coverage are not persisted and mutated.
pub corpus_dir: Option<PathBuf>,
// Whether corpus to use gzip file compression and decompression.
pub corpus_gzip: bool,
// Number of mutations until entry marked as eligible to be flushed from in-memory corpus.
// Mutations will be performed at least `corpus_min_mutations` times.
pub corpus_min_mutations: usize,
// Number of corpus that won't be evicted from memory.
pub corpus_min_size: usize,
/// Whether to collect and display edge coverage metrics.
pub show_edge_coverage: bool,
}
impl FuzzCorpusConfig {
pub fn with_test(&mut self, contract: &str, test: &str) {
if let Some(corpus_dir) = &self.corpus_dir {
self.corpus_dir = Some(canonicalized(corpus_dir.join(contract).join(test)));
}
}
/// Whether edge coverage should be collected and displayed.
pub fn collect_edge_coverage(&self) -> bool {
self.corpus_dir.is_some() || self.show_edge_coverage
}
/// Whether coverage guided fuzzing is enabled.
pub fn is_coverage_guided(&self) -> bool {
self.corpus_dir.is_some()
}
}
impl Default for FuzzCorpusConfig {
fn default() -> Self {
Self {
corpus_dir: None,
corpus_gzip: true,
corpus_min_mutations: 5,
corpus_min_size: 0,
show_edge_coverage: false,
}
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/config/src/warning.rs | crates/config/src/warning.rs | use figment::Profile;
use serde::{Deserialize, Serialize};
use std::{fmt, path::PathBuf};
/// Warnings emitted during loading or managing Configuration
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[serde(tag = "type")]
pub enum Warning {
/// An unknown section was encountered in a TOML file
UnknownSection {
/// The unknown key
unknown_section: Profile,
/// The source where the key was found
source: Option<String>,
},
/// No local TOML file found, with location tried
NoLocalToml(PathBuf),
/// Could not read TOML
CouldNotReadToml {
/// The path of the TOML file
path: PathBuf,
/// The error message that occurred
err: String,
},
/// Could not write TOML
CouldNotWriteToml {
/// The path of the TOML file
path: PathBuf,
/// The error message that occurred
err: String,
},
/// Invalid profile. Profile should be a table
CouldNotFixProfile {
/// The path of the TOML file
path: PathBuf,
/// The profile to be fixed
profile: String,
/// The error message that occurred
err: String,
},
/// Deprecated key.
DeprecatedKey {
/// The key being deprecated
old: String,
/// The new key replacing the deprecated one if not empty, otherwise, meaning the old one
/// is being removed completely without replacement
new: String,
},
/// An unknown key was encountered in a profile in a TOML file
UnknownKey {
/// The unknown key name
key: String,
/// The profile where the key was found, if applicable
profile: String,
/// The config file where the key was found
source: String,
},
}
impl fmt::Display for Warning {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::UnknownSection { unknown_section, source } => {
let source = source.as_ref().map(|src| format!(" in {src}")).unwrap_or_default();
write!(
f,
"Found unknown config section{source}: [{unknown_section}]\n\
This notation for profiles has been deprecated and may result in the profile \
not being registered in future versions.\n\
Please use [profile.{unknown_section}] instead or run `forge config --fix`."
)
}
Self::NoLocalToml(path) => write!(
f,
"No local TOML found to fix at {}.\n\
Change the current directory to a project path or set the foundry.toml path with \
the `FOUNDRY_CONFIG` environment variable",
path.display()
),
Self::CouldNotReadToml { path, err } => {
write!(f, "Could not read TOML at {}: {err}", path.display())
}
Self::CouldNotWriteToml { path, err } => {
write!(f, "Could not write TOML to {}: {err}", path.display())
}
Self::CouldNotFixProfile { path, profile, err } => {
write!(f, "Could not fix [{profile}] in TOML at {}: {err}", path.display())
}
Self::DeprecatedKey { old, new } if new.is_empty() => {
write!(f, "Key `{old}` is being deprecated and will be removed in future versions.")
}
Self::DeprecatedKey { old, new } => {
write!(
f,
"Key `{old}` is being deprecated in favor of `{new}`. It will be removed in future versions."
)
}
Self::UnknownKey { key, profile, source } => {
write!(
f,
"Found unknown `{key}` config for profile `{profile}` defined in {source}."
)
}
}
}
}
impl std::error::Error for Warning {}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/config/src/lint.rs | crates/config/src/lint.rs | //! Configuration specific to the `forge lint` command and the `forge_lint` package
use clap::ValueEnum;
use core::fmt;
use serde::{Deserialize, Deserializer, Serialize};
use solar::interface::diagnostics::Level;
use std::str::FromStr;
use yansi::Paint;
/// Contains the config and rule set.
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct LinterConfig {
/// Specifies which lints to run based on severity.
///
/// If uninformed, all severities are checked.
pub severity: Vec<Severity>,
/// Deny specific lints based on their ID (e.g. "mixed-case-function").
pub exclude_lints: Vec<String>,
/// Globs to ignore.
pub ignore: Vec<String>,
/// Whether to run linting during `forge build`.
///
/// Defaults to true. Set to false to disable automatic linting during builds.
pub lint_on_build: bool,
/// Configurable patterns that should be excluded when performing `mixedCase` lint checks.
///
/// Default's to ["ERC", "URI"] to allow common names like `rescueERC20`, `ERC721TokenReceiver`
/// or `tokenURI`.
pub mixed_case_exceptions: Vec<String>,
}
impl Default for LinterConfig {
fn default() -> Self {
Self {
lint_on_build: true,
severity: vec![Severity::High, Severity::Med, Severity::Low],
exclude_lints: Vec::new(),
ignore: Vec::new(),
mixed_case_exceptions: vec!["ERC".to_string(), "URI".to_string()],
}
}
}
/// Severity of a lint.
#[derive(Debug, Clone, Copy, PartialEq, Eq, ValueEnum)]
pub enum Severity {
High,
Med,
Low,
Info,
Gas,
CodeSize,
}
impl Severity {
fn to_str(self) -> &'static str {
match self {
Self::High => "High",
Self::Med => "Med",
Self::Low => "Low",
Self::Info => "Info",
Self::Gas => "Gas",
Self::CodeSize => "CodeSize",
}
}
fn to_str_kebab(self) -> &'static str {
match self {
Self::High => "high",
Self::Med => "medium",
Self::Low => "low",
Self::Info => "info",
Self::Gas => "gas",
Self::CodeSize => "code-size",
}
}
pub fn color(&self, message: &str) -> String {
match self {
Self::High => Paint::red(message).bold().to_string(),
Self::Med => Paint::rgb(message, 255, 135, 61).bold().to_string(),
Self::Low => Paint::yellow(message).bold().to_string(),
Self::Info => Paint::cyan(message).bold().to_string(),
Self::Gas => Paint::green(message).bold().to_string(),
Self::CodeSize => Paint::green(message).bold().to_string(),
}
}
}
impl From<Severity> for Level {
fn from(severity: Severity) -> Self {
match severity {
Severity::High | Severity::Med | Severity::Low => Self::Warning,
Severity::Info | Severity::Gas | Severity::CodeSize => Self::Note,
}
}
}
impl fmt::Display for Severity {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.color(self.to_str()))
}
}
impl Serialize for Severity {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
self.to_str_kebab().serialize(serializer)
}
}
// Custom deserialization to make `Severity` parsing case-insensitive
impl<'de> Deserialize<'de> for Severity {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let s = String::deserialize(deserializer)?;
FromStr::from_str(&s).map_err(serde::de::Error::custom)
}
}
impl FromStr for Severity {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.to_lowercase().as_str() {
"high" => Ok(Self::High),
"med" | "medium" => Ok(Self::Med),
"low" => Ok(Self::Low),
"info" => Ok(Self::Info),
"gas" => Ok(Self::Gas),
"size" | "codesize" | "code-size" => Ok(Self::CodeSize),
_ => Err(format!(
"unknown variant: found `{s}`, expected `one of `High`, `Med`, `Low`, `Info`, `Gas`, `CodeSize`"
)),
}
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/config/src/etherscan.rs | crates/config/src/etherscan.rs | //! Support for multiple Etherscan keys.
use crate::{
Chain, Config, NamedChain,
resolve::{RE_PLACEHOLDER, UnresolvedEnvVarError, interpolate},
};
use figment::{
Error, Metadata, Profile, Provider,
providers::Env,
value::{Dict, Map},
};
use heck::ToKebabCase;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use std::{
collections::BTreeMap,
fmt,
ops::{Deref, DerefMut},
time::Duration,
};
/// The user agent to use when querying the etherscan API.
pub const ETHERSCAN_USER_AGENT: &str = concat!("foundry/", env!("CARGO_PKG_VERSION"));
/// A [Provider] that provides Etherscan API key from the environment if it's not empty.
///
/// This prevents `ETHERSCAN_API_KEY=""` if it's set but empty
#[derive(Debug, Clone, PartialEq, Eq, Default)]
#[non_exhaustive]
pub(crate) struct EtherscanEnvProvider;
impl Provider for EtherscanEnvProvider {
fn metadata(&self) -> Metadata {
Env::raw().metadata()
}
fn data(&self) -> Result<Map<Profile, Dict>, Error> {
let mut dict = Dict::default();
let env_provider = Env::raw().only(&["ETHERSCAN_API_KEY"]);
if let Some((key, value)) = env_provider.iter().next()
&& !value.trim().is_empty()
{
dict.insert(key.as_str().to_string(), value.into());
}
Ok(Map::from([(Config::selected_profile(), dict)]))
}
}
/// Errors that can occur when creating an `EtherscanConfig`
#[derive(Clone, Debug, PartialEq, Eq, thiserror::Error)]
pub enum EtherscanConfigError {
#[error(transparent)]
Unresolved(#[from] UnresolvedEnvVarError),
#[error(
"No known Etherscan API URL for chain `{1}`. To fix this, please:\n\
1. Specify a `url` {0}\n\
2. Verify the chain `{1}` is correct"
)]
UnknownChain(String, Chain),
#[error("At least one of `url` or `chain` must be present{0}")]
MissingUrlOrChain(String),
}
/// Container type for Etherscan API keys and URLs.
#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
#[serde(transparent)]
pub struct EtherscanConfigs {
configs: BTreeMap<String, EtherscanConfig>,
}
impl EtherscanConfigs {
/// Creates a new list of etherscan configs
pub fn new(configs: impl IntoIterator<Item = (impl Into<String>, EtherscanConfig)>) -> Self {
Self { configs: configs.into_iter().map(|(name, config)| (name.into(), config)).collect() }
}
/// Returns `true` if this type doesn't contain any configs
pub fn is_empty(&self) -> bool {
self.configs.is_empty()
}
/// Returns the first config that matches the chain
pub fn find_chain(&self, chain: Chain) -> Option<&EtherscanConfig> {
self.configs.values().find(|config| config.chain == Some(chain))
}
/// Returns all (alias -> url) pairs
pub fn resolved(self) -> ResolvedEtherscanConfigs {
ResolvedEtherscanConfigs {
configs: self
.configs
.into_iter()
.map(|(name, e)| {
let resolved = e.resolve(Some(&name));
(name, resolved)
})
.collect(),
}
}
}
impl Deref for EtherscanConfigs {
type Target = BTreeMap<String, EtherscanConfig>;
fn deref(&self) -> &Self::Target {
&self.configs
}
}
impl DerefMut for EtherscanConfigs {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.configs
}
}
/// Container type for _resolved_ etherscan keys, see [`EtherscanConfigs::resolved`].
#[derive(Clone, Debug, Default, PartialEq, Eq)]
pub struct ResolvedEtherscanConfigs {
/// contains all named `ResolvedEtherscanConfig` or an error if we failed to resolve the env
/// var alias
configs: BTreeMap<String, Result<ResolvedEtherscanConfig, EtherscanConfigError>>,
}
impl ResolvedEtherscanConfigs {
/// Creates a new list of resolved etherscan configs
pub fn new(
configs: impl IntoIterator<Item = (impl Into<String>, ResolvedEtherscanConfig)>,
) -> Self {
Self {
configs: configs.into_iter().map(|(name, config)| (name.into(), Ok(config))).collect(),
}
}
/// Returns the first config that matches the chain
pub fn find_chain(
self,
chain: Chain,
) -> Option<Result<ResolvedEtherscanConfig, EtherscanConfigError>> {
for (_, config) in self.configs.into_iter() {
match config {
Ok(c) if c.chain == Some(chain) => return Some(Ok(c)),
Err(e) => return Some(Err(e)),
_ => continue,
}
}
None
}
/// Returns true if there's a config that couldn't be resolved
pub fn has_unresolved(&self) -> bool {
self.configs.values().any(|val| val.is_err())
}
}
impl Deref for ResolvedEtherscanConfigs {
type Target = BTreeMap<String, Result<ResolvedEtherscanConfig, EtherscanConfigError>>;
fn deref(&self) -> &Self::Target {
&self.configs
}
}
impl DerefMut for ResolvedEtherscanConfigs {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.configs
}
}
/// Represents all info required to create an etherscan client
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct EtherscanConfig {
/// The chain name or EIP-155 chain ID used to derive the API URL.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub chain: Option<Chain>,
/// Etherscan API URL
#[serde(default, skip_serializing_if = "Option::is_none")]
pub url: Option<String>,
/// The etherscan API KEY that's required to make requests
pub key: EtherscanApiKey,
}
impl EtherscanConfig {
/// Returns the etherscan config required to create a client.
///
/// # Errors
///
/// Returns an error if the type holds a reference to an env var and the env var is not set or
/// no chain or url is configured
pub fn resolve(
self,
alias: Option<&str>,
) -> Result<ResolvedEtherscanConfig, EtherscanConfigError> {
let Self { chain, mut url, key } = self;
if let Some(url) = &mut url {
*url = interpolate(url)?;
}
let (chain, alias) = match (chain, alias) {
// fill one with the other
(Some(chain), None) => (Some(chain), Some(chain.to_string())),
(None, Some(alias)) => {
// alloy chain is parsed as kebab case
(
alias.to_kebab_case().parse().ok().or_else(|| {
// if this didn't work try to parse as json because the deserialize impl
// supports more aliases
serde_json::from_str::<NamedChain>(&format!("\"{alias}\""))
.map(Into::into)
.ok()
}),
Some(alias.into()),
)
}
// leave as is
(Some(chain), Some(alias)) => (Some(chain), Some(alias.into())),
(None, None) => (None, None),
};
let key = key.resolve()?;
match (chain, url) {
(Some(chain), Some(api_url)) => Ok(ResolvedEtherscanConfig {
api_url,
browser_url: chain.etherscan_urls().map(|(_, url)| url.to_string()),
key,
chain: Some(chain),
}),
(Some(chain), None) => ResolvedEtherscanConfig::create(key, chain).ok_or_else(|| {
let msg = alias.map(|a| format!("for `{a}`")).unwrap_or_default();
EtherscanConfigError::UnknownChain(msg, chain)
}),
(None, Some(api_url)) => {
Ok(ResolvedEtherscanConfig { api_url, browser_url: None, key, chain: None })
}
(None, None) => {
let msg = alias
.map(|a| format!(" for Etherscan config with unknown alias `{a}`"))
.unwrap_or_default();
Err(EtherscanConfigError::MissingUrlOrChain(msg))
}
}
}
}
/// Contains required url + api key to set up an etherscan client
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct ResolvedEtherscanConfig {
/// Etherscan API URL.
#[serde(rename = "url")]
pub api_url: String,
/// Optional browser URL.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub browser_url: Option<String>,
/// The resolved API key.
pub key: String,
/// The chain name or EIP-155 chain ID.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub chain: Option<Chain>,
}
impl ResolvedEtherscanConfig {
/// Creates a new instance using the api key and chain
pub fn create(api_key: impl Into<String>, chain: impl Into<Chain>) -> Option<Self> {
let chain = chain.into();
let (api_url, browser_url) = chain.etherscan_urls()?;
Some(Self {
api_url: api_url.to_string(),
browser_url: Some(browser_url.to_string()),
key: api_key.into(),
chain: Some(chain),
})
}
/// Sets the chain value and consumes the type
///
/// This is only used to set derive the appropriate Cache path for the etherscan client
pub fn with_chain(mut self, chain: impl Into<Chain>) -> Self {
self.set_chain(chain);
self
}
/// Sets the chain value
pub fn set_chain(&mut self, chain: impl Into<Chain>) -> &mut Self {
let chain = chain.into();
if let Some((api, browser)) = chain.etherscan_urls() {
self.api_url = api.to_string();
self.browser_url = Some(browser.to_string());
}
self.chain = Some(chain);
self
}
/// Returns the corresponding `foundry_block_explorers::Client`, configured with the `api_url`,
/// `api_key` and cache
pub fn into_client(
self,
) -> Result<foundry_block_explorers::Client, foundry_block_explorers::errors::EtherscanError>
{
let Self { api_url, browser_url, key: api_key, chain } = self;
let chain = chain.unwrap_or_default();
let cache = Config::foundry_etherscan_chain_cache_dir(chain);
if let Some(cache_path) = &cache {
// we also create the `sources` sub dir here
if let Err(err) = std::fs::create_dir_all(cache_path.join("sources")) {
warn!("could not create etherscan cache dir: {:?}", err);
}
}
let api_url = into_url(&api_url)?;
let client = reqwest::Client::builder()
.user_agent(ETHERSCAN_USER_AGENT)
.tls_built_in_root_certs(api_url.scheme() == "https")
.build()?;
let mut client_builder = foundry_block_explorers::Client::builder()
.with_client(client)
.with_api_key(api_key)
.with_cache(cache, Duration::from_secs(24 * 60 * 60));
if let Some(browser_url) = browser_url {
client_builder = client_builder.with_url(browser_url)?;
}
client_builder.chain(chain)?.build()
}
}
/// Represents a single etherscan API key
///
/// This type preserves the value as it's stored in the config. If the value is a reference to an
/// env var, then the `EtherscanKey::Key` var will hold the reference (`${MAIN_NET}`) and _not_ the
/// value of the env var itself.
/// In other words, this type does not resolve env vars when it's being deserialized
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum EtherscanApiKey {
/// A raw key
Key(String),
/// An endpoint that contains at least one `${ENV_VAR}` placeholder
///
/// **Note:** this contains the key or `${ETHERSCAN_KEY}`
Env(String),
}
impl EtherscanApiKey {
/// Returns the key variant
pub fn as_key(&self) -> Option<&str> {
match self {
Self::Key(url) => Some(url),
Self::Env(_) => None,
}
}
/// Returns the env variant
pub fn as_env(&self) -> Option<&str> {
match self {
Self::Env(val) => Some(val),
Self::Key(_) => None,
}
}
/// Returns the key this type holds
///
/// # Error
///
/// Returns an error if the type holds a reference to an env var and the env var is not set
pub fn resolve(self) -> Result<String, UnresolvedEnvVarError> {
match self {
Self::Key(key) => Ok(key),
Self::Env(val) => interpolate(&val),
}
}
}
impl Serialize for EtherscanApiKey {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(&self.to_string())
}
}
impl<'de> Deserialize<'de> for EtherscanApiKey {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let val = String::deserialize(deserializer)?;
let endpoint = if RE_PLACEHOLDER.is_match(&val) { Self::Env(val) } else { Self::Key(val) };
Ok(endpoint)
}
}
impl fmt::Display for EtherscanApiKey {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Key(key) => key.fmt(f),
Self::Env(var) => var.fmt(f),
}
}
}
/// This is a hack to work around `IntoUrl`'s sealed private functions, which can't be called
/// normally.
#[inline]
fn into_url(url: impl reqwest::IntoUrl) -> std::result::Result<reqwest::Url, reqwest::Error> {
url.into_url()
}
#[cfg(test)]
mod tests {
use super::*;
use NamedChain::Mainnet;
#[test]
fn can_create_client_via_chain() {
let mut configs = EtherscanConfigs::default();
configs.insert(
"mainnet".to_string(),
EtherscanConfig {
chain: Some(Mainnet.into()),
url: None,
key: EtherscanApiKey::Key("ABCDEFG".to_string()),
},
);
let mut resolved = configs.resolved();
let config = resolved.remove("mainnet").unwrap().unwrap();
let client = config.into_client().unwrap();
assert_eq!(
client.etherscan_api_url().as_str(),
"https://api.etherscan.io/v2/api?chainid=1"
);
}
#[test]
fn can_create_client_via_url_and_chain() {
let mut configs = EtherscanConfigs::default();
configs.insert(
"mainnet".to_string(),
EtherscanConfig {
chain: Some(Mainnet.into()),
url: Some("https://api.etherscan.io/api".to_string()),
key: EtherscanApiKey::Key("ABCDEFG".to_string()),
},
);
let mut resolved = configs.resolved();
let config = resolved.remove("mainnet").unwrap().unwrap();
let _ = config.into_client().unwrap();
}
#[test]
fn can_create_client_via_url_and_chain_env_var() {
let mut configs = EtherscanConfigs::default();
let env = "_CONFIG_ETHERSCAN_API_KEY";
configs.insert(
"mainnet".to_string(),
EtherscanConfig {
chain: Some(Mainnet.into()),
url: Some("https://api.etherscan.io/api".to_string()),
key: EtherscanApiKey::Env(format!("${{{env}}}")),
},
);
let mut resolved = configs.clone().resolved();
let config = resolved.remove("mainnet").unwrap();
assert!(config.is_err());
unsafe {
std::env::set_var(env, "ABCDEFG");
}
let mut resolved = configs.resolved();
let config = resolved.remove("mainnet").unwrap().unwrap();
assert_eq!(config.key, "ABCDEFG");
let client = config.into_client().unwrap();
assert_eq!(
client.etherscan_api_url().as_str(),
"https://api.etherscan.io/v2/api?chainid=1"
);
unsafe {
std::env::remove_var(env);
}
}
#[test]
fn resolve_etherscan_alias_config() {
let mut configs = EtherscanConfigs::default();
configs.insert(
"blast_sepolia".to_string(),
EtherscanConfig {
chain: None,
url: Some("https://api.etherscan.io/api".to_string()),
key: EtherscanApiKey::Key("ABCDEFG".to_string()),
},
);
let mut resolved = configs.clone().resolved();
let config = resolved.remove("blast_sepolia").unwrap().unwrap();
assert_eq!(config.chain, Some(Chain::blast_sepolia()));
}
#[test]
fn resolve_etherscan_alias() {
let config = EtherscanConfig {
chain: None,
url: Some("https://api.etherscan.io/api".to_string()),
key: EtherscanApiKey::Key("ABCDEFG".to_string()),
};
let resolved = config.clone().resolve(Some("base_sepolia")).unwrap();
assert_eq!(resolved.chain, Some(Chain::base_sepolia()));
let resolved = config.resolve(Some("base-sepolia")).unwrap();
assert_eq!(resolved.chain, Some(Chain::base_sepolia()));
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/config/src/error.rs | crates/config/src/error.rs | //! error handling and solc error codes
use alloy_primitives::map::HashSet;
use figment::providers::{Format, Toml};
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use std::{error::Error, fmt, str::FromStr};
/// Represents a failed attempt to extract `Config` from a `Figment`
#[derive(Clone, PartialEq)]
pub struct ExtractConfigError {
/// error thrown when extracting the `Config`
pub(crate) error: figment::Error,
}
impl ExtractConfigError {
/// Wraps the figment error
pub fn new(error: figment::Error) -> Self {
Self { error }
}
}
impl fmt::Display for ExtractConfigError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut unique_errors = Vec::with_capacity(self.error.count());
let mut unique = HashSet::with_capacity(self.error.count());
for err in self.error.clone().into_iter() {
let err = if err
.metadata
.as_ref()
.map(|meta| meta.name.contains(Toml::NAME))
.unwrap_or_default()
{
FoundryConfigError::Toml(err)
} else {
FoundryConfigError::Other(err)
};
if unique.insert(err.to_string()) {
unique_errors.push(err);
}
}
writeln!(f, "failed to extract foundry config:")?;
for err in unique_errors {
writeln!(f, "{err}")?;
}
Ok(())
}
}
impl fmt::Debug for ExtractConfigError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(self, f)
}
}
impl Error for ExtractConfigError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
Error::source(&self.error)
}
}
/// Represents an error that can occur when constructing the `Config`
#[derive(Clone, Debug, PartialEq)]
pub enum FoundryConfigError {
/// An error thrown during toml parsing
Toml(figment::Error),
/// Any other error thrown when constructing the config's figment
Other(figment::Error),
}
impl fmt::Display for FoundryConfigError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let fmt_err = |err: &figment::Error, f: &mut fmt::Formatter<'_>| {
write!(f, "{err}")?;
if !err.path.is_empty() {
// the path will contain the setting value like `["etherscan_api_key"]`
write!(f, " for setting `{}`", err.path.join("."))?;
}
Ok(())
};
match self {
Self::Toml(err) => {
f.write_str("foundry.toml error: ")?;
fmt_err(err, f)
}
Self::Other(err) => {
f.write_str("foundry config error: ")?;
fmt_err(err, f)
}
}
}
}
impl Error for FoundryConfigError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
match self {
Self::Other(error) | Self::Toml(error) => Error::source(error),
}
}
}
/// A non-exhaustive list of solidity error codes
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum SolidityErrorCode {
/// Warning that SPDX license identifier not provided in source file
SpdxLicenseNotProvided,
/// Warning: Visibility for constructor is ignored. If you want the contract to be
/// non-deployable, making it "abstract" is sufficient
VisibilityForConstructorIsIgnored,
/// Warning that contract code size exceeds 24576 bytes (a limit introduced in Spurious
/// Dragon).
ContractExceeds24576Bytes,
/// Warning after shanghai if init code size exceeds 49152 bytes
ContractInitCodeSizeExceeds49152Bytes,
/// Warning that Function state mutability can be restricted to view/pure.
FunctionStateMutabilityCanBeRestricted,
/// Warning: Unused local variable
UnusedLocalVariable,
/// Warning: Unused function parameter. Remove or comment out the variable name to silence this
/// warning.
UnusedFunctionParameter,
/// Warning: Return value of low-level calls not used.
ReturnValueOfCallsNotUsed,
/// Warning: Interface functions are implicitly "virtual"
InterfacesExplicitlyVirtual,
/// Warning: This contract has a payable fallback function, but no receive ether function.
/// Consider adding a receive ether function.
PayableNoReceiveEther,
/// Warning: This declaration shadows an existing declaration.
ShadowsExistingDeclaration,
/// This declaration has the same name as another declaration.
DeclarationSameNameAsAnother,
/// Unnamed return variable can remain unassigned
UnnamedReturnVariable,
/// Unreachable code
Unreachable,
/// Missing pragma solidity
PragmaSolidity,
/// Uses transient opcodes
TransientStorageUsed,
/// There are more than 256 warnings. Ignoring the rest.
TooManyWarnings,
/// All other error codes
Other(u64),
}
impl SolidityErrorCode {
/// The textual identifier for this error
///
/// Returns `Err(code)` if unknown error
pub fn as_str(&self) -> Result<&'static str, u64> {
let s = match self {
Self::SpdxLicenseNotProvided => "license",
Self::VisibilityForConstructorIsIgnored => "constructor-visibility",
Self::ContractExceeds24576Bytes => "code-size",
Self::ContractInitCodeSizeExceeds49152Bytes => "init-code-size",
Self::FunctionStateMutabilityCanBeRestricted => "func-mutability",
Self::UnusedLocalVariable => "unused-var",
Self::UnusedFunctionParameter => "unused-param",
Self::ReturnValueOfCallsNotUsed => "unused-return",
Self::InterfacesExplicitlyVirtual => "virtual-interfaces",
Self::PayableNoReceiveEther => "missing-receive-ether",
Self::ShadowsExistingDeclaration => "shadowing",
Self::DeclarationSameNameAsAnother => "same-varname",
Self::UnnamedReturnVariable => "unnamed-return",
Self::Unreachable => "unreachable",
Self::PragmaSolidity => "pragma-solidity",
Self::TransientStorageUsed => "transient-storage",
Self::TooManyWarnings => "too-many-warnings",
Self::Other(code) => return Err(*code),
};
Ok(s)
}
}
impl From<SolidityErrorCode> for u64 {
fn from(code: SolidityErrorCode) -> Self {
match code {
SolidityErrorCode::SpdxLicenseNotProvided => 1878,
SolidityErrorCode::VisibilityForConstructorIsIgnored => 2462,
SolidityErrorCode::ContractExceeds24576Bytes => 5574,
SolidityErrorCode::ContractInitCodeSizeExceeds49152Bytes => 3860,
SolidityErrorCode::FunctionStateMutabilityCanBeRestricted => 2018,
SolidityErrorCode::UnusedLocalVariable => 2072,
SolidityErrorCode::UnusedFunctionParameter => 5667,
SolidityErrorCode::ReturnValueOfCallsNotUsed => 9302,
SolidityErrorCode::InterfacesExplicitlyVirtual => 5815,
SolidityErrorCode::PayableNoReceiveEther => 3628,
SolidityErrorCode::ShadowsExistingDeclaration => 2519,
SolidityErrorCode::DeclarationSameNameAsAnother => 8760,
SolidityErrorCode::UnnamedReturnVariable => 6321,
SolidityErrorCode::Unreachable => 5740,
SolidityErrorCode::PragmaSolidity => 3420,
SolidityErrorCode::TransientStorageUsed => 2394,
SolidityErrorCode::TooManyWarnings => 4591,
SolidityErrorCode::Other(code) => code,
}
}
}
impl fmt::Display for SolidityErrorCode {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.as_str() {
Ok(name) => name.fmt(f),
Err(code) => code.fmt(f),
}
}
}
impl FromStr for SolidityErrorCode {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let code = match s {
"license" => Self::SpdxLicenseNotProvided,
"constructor-visibility" => Self::VisibilityForConstructorIsIgnored,
"code-size" => Self::ContractExceeds24576Bytes,
"init-code-size" => Self::ContractInitCodeSizeExceeds49152Bytes,
"func-mutability" => Self::FunctionStateMutabilityCanBeRestricted,
"unused-var" => Self::UnusedLocalVariable,
"unused-param" => Self::UnusedFunctionParameter,
"unused-return" => Self::ReturnValueOfCallsNotUsed,
"virtual-interfaces" => Self::InterfacesExplicitlyVirtual,
"missing-receive-ether" => Self::PayableNoReceiveEther,
"shadowing" => Self::ShadowsExistingDeclaration,
"same-varname" => Self::DeclarationSameNameAsAnother,
"unnamed-return" => Self::UnnamedReturnVariable,
"unreachable" => Self::Unreachable,
"pragma-solidity" => Self::PragmaSolidity,
"transient-storage" => Self::TransientStorageUsed,
"too-many-warnings" => Self::TooManyWarnings,
_ => return Err(format!("Unknown variant {s}")),
};
Ok(code)
}
}
impl From<u64> for SolidityErrorCode {
fn from(code: u64) -> Self {
match code {
1878 => Self::SpdxLicenseNotProvided,
2462 => Self::VisibilityForConstructorIsIgnored,
5574 => Self::ContractExceeds24576Bytes,
3860 => Self::ContractInitCodeSizeExceeds49152Bytes,
2018 => Self::FunctionStateMutabilityCanBeRestricted,
2072 => Self::UnusedLocalVariable,
5667 => Self::UnusedFunctionParameter,
9302 => Self::ReturnValueOfCallsNotUsed,
5815 => Self::InterfacesExplicitlyVirtual,
3628 => Self::PayableNoReceiveEther,
2519 => Self::ShadowsExistingDeclaration,
8760 => Self::DeclarationSameNameAsAnother,
6321 => Self::UnnamedReturnVariable,
5740 => Self::Unreachable,
3420 => Self::PragmaSolidity,
2394 => Self::TransientStorageUsed,
4591 => Self::TooManyWarnings,
other => Self::Other(other),
}
}
}
impl Serialize for SolidityErrorCode {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match self.as_str() {
Ok(alias) => serializer.serialize_str(alias),
Err(code) => serializer.serialize_u64(code),
}
}
}
impl<'de> Deserialize<'de> for SolidityErrorCode {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
/// Helper deserializer for error codes as names and codes
#[derive(Deserialize)]
#[serde(untagged)]
enum SolCode {
Name(String),
Code(u64),
}
match SolCode::deserialize(deserializer)? {
SolCode::Code(code) => Ok(code.into()),
SolCode::Name(name) => name.parse().map_err(serde::de::Error::custom),
}
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/config/src/extend.rs | crates/config/src/extend.rs | use std::collections::HashMap;
use serde::{Deserialize, Serialize};
/// Strategy for extending configuration from a base file.
#[derive(Clone, Copy, Debug, Default, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub enum ExtendStrategy {
/// Uses `admerge` figment strategy.
/// Arrays are concatenated (base elements + local elements).
/// Other values are replaced (local values override base values).
#[default]
ExtendArrays,
/// Uses `merge` figment strategy.
/// Arrays are replaced entirely (local arrays replace base arrays).
/// Other values are replaced (local values override base values).
ReplaceArrays,
/// Throws an error if any of the keys in the inherited toml file are also in `foundry.toml`.
NoCollision,
}
/// Configuration for extending from a base file.
///
/// Supports two formats:
/// - String: `extends = "base.toml"`
/// - Object: `extends = { path = "base.toml", strategy = "no-collision" }`
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
#[serde(untagged)]
pub enum Extends {
/// Simple string path to base file
Path(String),
/// Detailed configuration with path and strategy
Config(ExtendConfig),
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ExtendConfig {
pub path: String,
#[serde(default)]
pub strategy: Option<ExtendStrategy>,
}
impl Extends {
/// Get the path to the base file
pub fn path(&self) -> &str {
match self {
Self::Path(path) => path,
Self::Config(config) => &config.path,
}
}
/// Get the strategy to use for extending
pub fn strategy(&self) -> ExtendStrategy {
match self {
Self::Path(_) => ExtendStrategy::default(),
Self::Config(config) => config.strategy.unwrap_or_default(),
}
}
}
// -- HELPERS -----------------------------------------------------------------
// Helper structs to only extract the 'extends' field and its strategy from the profiles
#[derive(Deserialize, Default)]
pub(crate) struct ExtendsPartialConfig {
#[serde(default)]
pub profile: Option<HashMap<String, ExtendsHelper>>,
}
#[derive(Deserialize, Default)]
pub(crate) struct ExtendsHelper {
#[serde(default)]
pub extends: Option<Extends>,
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/config/src/utils.rs | crates/config/src/utils.rs | //! Utility functions
use crate::Config;
use alloy_primitives::U256;
use figment::value::Value;
use foundry_compilers::artifacts::{
EvmVersion,
remappings::{Remapping, RemappingError},
};
use revm::primitives::hardfork::SpecId;
use serde::{Deserialize, Deserializer, Serializer, de::Error};
use std::{
io,
path::{Path, PathBuf},
str::FromStr,
};
// TODO: Why do these exist separately from `Config::load`?
/// Loads the config for the current project workspace.
pub fn load_config() -> eyre::Result<Config> {
load_config_with_root(None)
}
/// Loads the config for the current project workspace or the provided root path.
pub fn load_config_with_root(root: Option<&Path>) -> eyre::Result<Config> {
let root = match root {
Some(root) => root,
None => &find_project_root(None)?,
};
Ok(Config::load_with_root(root)?.sanitized())
}
/// Returns the path of the top-level directory of the working git tree.
pub fn find_git_root(relative_to: &Path) -> io::Result<Option<PathBuf>> {
let root =
if relative_to.is_absolute() { relative_to } else { &dunce::canonicalize(relative_to)? };
Ok(root.ancestors().find(|p| p.join(".git").exists()).map(Path::to_path_buf))
}
/// Returns the root path to set for the project root.
///
/// Traverse the dir tree up and look for a `foundry.toml` file starting at the given path or cwd,
/// but only until the root dir of the current repo so that:
///
/// ```text
/// -- foundry.toml
///
/// -- repo
/// |__ .git
/// |__sub
/// |__ [given_path | cwd]
/// ```
///
/// will still detect `repo` as root.
///
/// Returns `repo` or `cwd` if no `foundry.toml` is found in the tree.
///
/// Returns an error if:
/// - `cwd` is `Some` and is not a valid directory;
/// - `cwd` is `None` and the [`std::env::current_dir`] call fails.
pub fn find_project_root(cwd: Option<&Path>) -> io::Result<PathBuf> {
let cwd = match cwd {
Some(path) => path,
None => &std::env::current_dir()?,
};
let boundary = find_git_root(cwd)?;
let found = cwd
.ancestors()
// Don't look outside of the git repo if it exists.
.take_while(|p| if let Some(boundary) = &boundary { p.starts_with(boundary) } else { true })
.find(|p| p.join(Config::FILE_NAME).is_file())
.map(Path::to_path_buf);
Ok(found.or(boundary).unwrap_or_else(|| cwd.to_path_buf()))
}
/// Returns all [`Remapping`]s contained in the `remappings` str separated by newlines
///
/// # Example
///
/// ```
/// use foundry_config::remappings_from_newline;
/// let remappings: Result<Vec<_>, _> = remappings_from_newline(
/// r#"
/// file-ds-test/=lib/ds-test/
/// file-other/=lib/other/
/// "#,
/// )
/// .collect();
/// ```
pub fn remappings_from_newline(
remappings: &str,
) -> impl Iterator<Item = Result<Remapping, RemappingError>> + '_ {
remappings.lines().map(|x| x.trim()).filter(|x| !x.is_empty()).map(Remapping::from_str)
}
/// Returns the remappings from the given var
///
/// Returns `None` if the env var is not set, otherwise all Remappings, See
/// `remappings_from_newline`
pub fn remappings_from_env_var(env_var: &str) -> Option<Result<Vec<Remapping>, RemappingError>> {
let val = std::env::var(env_var).ok()?;
Some(remappings_from_newline(&val).collect())
}
/// Converts the `val` into a `figment::Value::Array`
///
/// The values should be separated by commas, surrounding brackets are also supported `[a,b,c]`
pub fn to_array_value(val: &str) -> Result<Value, figment::Error> {
let value: Value = match Value::from(val) {
Value::String(_, val) => val
.trim_start_matches('[')
.trim_end_matches(']')
.split(',')
.map(|s| s.to_string())
.collect::<Vec<_>>()
.into(),
Value::Empty(_, _) => Vec::<Value>::new().into(),
val @ Value::Array(_, _) => val,
_ => return Err(format!("Invalid value `{val}`, expected an array").into()),
};
Ok(value)
}
/// Returns a list of _unique_ paths to all folders under `root` that contain a `foundry.toml` file
///
/// This will also resolve symlinks
///
/// # Example
///
/// ```no_run
/// use foundry_config::utils;
/// let dirs = utils::foundry_toml_dirs("./lib");
/// ```
///
/// for following layout this will return
/// `["lib/dep1", "lib/dep2"]`
///
/// ```text
/// lib
/// └── dep1
/// │ ├── foundry.toml
/// └── dep2
/// ├── foundry.toml
/// ```
pub fn foundry_toml_dirs(root: impl AsRef<Path>) -> Vec<PathBuf> {
walkdir::WalkDir::new(root)
.max_depth(1)
.into_iter()
.filter_map(Result::ok)
.filter(|e| e.file_type().is_dir())
.filter_map(|e| dunce::canonicalize(e.path()).ok())
.filter(|p| p.join(Config::FILE_NAME).exists())
.collect()
}
/// Returns a remapping for the given dir
pub(crate) fn get_dir_remapping(dir: impl AsRef<Path>) -> Option<Remapping> {
let dir = dir.as_ref();
if let Some(dir_name) = dir.file_name().and_then(|s| s.to_str()).filter(|s| !s.is_empty()) {
let mut r = Remapping {
context: None,
name: format!("{dir_name}/"),
path: format!("{}", dir.display()),
};
if !r.path.ends_with('/') {
r.path.push('/')
}
Some(r)
} else {
None
}
}
/// Deserialize stringified percent. The value must be between 0 and 100 inclusive.
pub(crate) fn deserialize_stringified_percent<'de, D>(deserializer: D) -> Result<u32, D::Error>
where
D: Deserializer<'de>,
{
let num: U256 = Numeric::deserialize(deserializer)?.into();
let num: u64 = num.try_into().map_err(serde::de::Error::custom)?;
if num <= 100 {
num.try_into().map_err(serde::de::Error::custom)
} else {
Err(serde::de::Error::custom("percent must be lte 100"))
}
}
/// Deserialize a `u64` or "max" for `u64::MAX`.
pub(crate) fn deserialize_u64_or_max<'de, D>(deserializer: D) -> Result<u64, D::Error>
where
D: Deserializer<'de>,
{
#[derive(Deserialize)]
#[serde(untagged)]
enum Val {
Number(u64),
String(String),
}
match Val::deserialize(deserializer)? {
Val::Number(num) => Ok(num),
Val::String(s) if s.eq_ignore_ascii_case("max") => Ok(u64::MAX),
Val::String(s) => s.parse::<u64>().map_err(D::Error::custom),
}
}
/// Deserialize a `usize` or "max" for `usize::MAX`.
pub(crate) fn deserialize_usize_or_max<'de, D>(deserializer: D) -> Result<usize, D::Error>
where
D: Deserializer<'de>,
{
deserialize_u64_or_max(deserializer)?.try_into().map_err(D::Error::custom)
}
/// Deserialize into `U256` from either a `u64` or a `U256` hex string.
pub fn deserialize_u64_to_u256<'de, D>(deserializer: D) -> Result<U256, D::Error>
where
D: Deserializer<'de>,
{
#[derive(Deserialize)]
#[serde(untagged)]
enum NumericValue {
U256(U256),
U64(u64),
}
match NumericValue::deserialize(deserializer)? {
NumericValue::U64(n) => Ok(U256::from(n)),
NumericValue::U256(n) => Ok(n),
}
}
/// Serialize `U256` as `u64` if it fits, otherwise as a hex string.
/// If the number fits into a i64, serialize it as number without quotation marks.
/// If the number fits into a u64, serialize it as a stringified number with quotation marks.
/// Otherwise, serialize it as a hex string with quotation marks.
pub fn serialize_u64_or_u256<S>(n: &U256, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
// The TOML specification handles integers as i64 so the number representation is limited to
// i64. If the number is larger than `i64::MAX` and up to `u64::MAX`, we serialize it as a
// string to avoid losing precision.
if let Ok(n_i64) = i64::try_from(*n) {
serializer.serialize_i64(n_i64)
} else if let Ok(n_u64) = u64::try_from(*n) {
serializer.serialize_str(&n_u64.to_string())
} else {
serializer.serialize_str(&format!("{n:#x}"))
}
}
/// Helper type to parse both `u64` and `U256`
#[derive(Clone, Copy, Deserialize)]
#[serde(untagged)]
pub enum Numeric {
/// A [U256] value.
U256(U256),
/// A `u64` value.
Num(u64),
}
impl From<Numeric> for U256 {
fn from(n: Numeric) -> Self {
match n {
Numeric::U256(n) => n,
Numeric::Num(n) => Self::from(n),
}
}
}
impl FromStr for Numeric {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
if s.starts_with("0x") {
U256::from_str_radix(s, 16).map(Numeric::U256).map_err(|err| err.to_string())
} else {
U256::from_str(s).map(Numeric::U256).map_err(|err| err.to_string())
}
}
}
/// Returns the [SpecId] derived from [EvmVersion]
pub fn evm_spec_id(evm_version: EvmVersion) -> SpecId {
match evm_version {
EvmVersion::Homestead => SpecId::HOMESTEAD,
EvmVersion::TangerineWhistle => SpecId::TANGERINE,
EvmVersion::SpuriousDragon => SpecId::SPURIOUS_DRAGON,
EvmVersion::Byzantium => SpecId::BYZANTIUM,
EvmVersion::Constantinople => SpecId::CONSTANTINOPLE,
EvmVersion::Petersburg => SpecId::PETERSBURG,
EvmVersion::Istanbul => SpecId::ISTANBUL,
EvmVersion::Berlin => SpecId::BERLIN,
EvmVersion::London => SpecId::LONDON,
EvmVersion::Paris => SpecId::MERGE,
EvmVersion::Shanghai => SpecId::SHANGHAI,
EvmVersion::Cancun => SpecId::CANCUN,
EvmVersion::Prague => SpecId::PRAGUE,
EvmVersion::Osaka => SpecId::OSAKA,
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/config/src/macros.rs | crates/config/src/macros.rs | /// A macro to implement converters from a type to [`Config`](crate::Config) and
/// [`figment::Figment`].
///
/// This can be used to remove some boilerplate code that's necessary to add additional layer(s) to
/// the `Config`'s default `Figment`.
///
/// `impl_figment` takes the default `Config` and merges additional `Provider`, therefore the
/// targeted type, requires an implementation of `figment::Profile`.
///
/// # Example
///
/// Use `impl_figment` on a type with a `root: Option<PathBuf>` field, which will be used for
/// [`Config::figment_with_root()`](crate::Config::figment_with_root).
///
/// ```rust
/// use std::path::PathBuf;
/// use serde::Serialize;
/// use foundry_config::{Config, impl_figment_convert};
/// use foundry_config::figment::*;
/// use foundry_config::figment::error::Kind::InvalidType;
/// use foundry_config::figment::value::*;
/// #[derive(Default, Serialize)]
/// struct MyArgs {
/// #[serde(skip_serializing_if = "Option::is_none")]
/// root: Option<PathBuf>,
/// }
/// impl_figment_convert!(MyArgs);
///
/// impl Provider for MyArgs {
/// fn metadata(&self) -> Metadata {
/// Metadata::default()
/// }
///
/// fn data(&self) -> std::result::Result<Map<Profile, Dict>, Error> {
/// let value = Value::serialize(self)?;
/// let error = InvalidType(value.to_actual(), "map".into());
/// let mut dict = value.into_dict().ok_or(error)?;
/// Ok(Map::from([(Config::selected_profile(), dict)]))
/// }
/// }
///
/// let figment: Figment = From::from(&MyArgs::default());
///
/// // Use `impl_figment` on a type that has several nested `Provider` as fields but is _not_ a `Provider` itself
///
/// #[derive(Default)]
/// struct Outer {
/// start: MyArgs,
/// second: MyArgs,
/// third: MyArgs,
/// }
/// impl_figment_convert!(Outer, start, second, third);
///
/// let figment: Figment = From::from(&Outer::default());
/// ```
#[macro_export]
macro_rules! impl_figment_convert {
($name:ty) => {
impl<'a> From<&'a $name> for $crate::figment::Figment {
fn from(args: &'a $name) -> Self {
$crate::Config::figment_with_root_opt(args.root.as_deref()).merge(args)
}
}
};
($name:ty, $start:ident $(, $more:ident)*) => {
impl<'a> From<&'a $name> for $crate::figment::Figment {
fn from(args: &'a $name) -> Self {
let mut figment: $crate::figment::Figment = From::from(&args.$start);
$(
figment = figment.merge(&args.$more);
)*
figment
}
}
};
($name:ty, self, $start:ident $(, $more:ident)*) => {
impl<'a> From<&'a $name> for $crate::figment::Figment {
fn from(args: &'a $name) -> Self {
let mut figment: $crate::figment::Figment = From::from(&args.$start);
$(
figment = figment.merge(&args.$more);
)*
figment = figment.merge(args);
figment
}
}
};
}
/// Same as `impl_figment_convert` but also merges the type itself into the figment
///
/// # Example
///
/// Merge several nested `Provider` together with the type itself
///
/// ```rust
/// use foundry_config::{
/// Config,
/// figment::{value::*, *},
/// impl_figment_convert, merge_impl_figment_convert,
/// };
/// use std::path::PathBuf;
///
/// #[derive(Default)]
/// struct MyArgs {
/// root: Option<PathBuf>,
/// }
///
/// impl Provider for MyArgs {
/// fn metadata(&self) -> Metadata {
/// Metadata::default()
/// }
///
/// fn data(&self) -> std::result::Result<Map<Profile, Dict>, Error> {
/// todo!()
/// }
/// }
///
/// impl_figment_convert!(MyArgs);
///
/// #[derive(Default)]
/// struct OuterArgs {
/// value: u64,
/// inner: MyArgs,
/// }
///
/// impl Provider for OuterArgs {
/// fn metadata(&self) -> Metadata {
/// Metadata::default()
/// }
///
/// fn data(&self) -> std::result::Result<Map<Profile, Dict>, Error> {
/// todo!()
/// }
/// }
///
/// merge_impl_figment_convert!(OuterArgs, inner);
/// ```
#[macro_export]
macro_rules! merge_impl_figment_convert {
($name:ty, $start:ident $(, $more:ident)*) => {
impl<'a> From<&'a $name> for $crate::figment::Figment {
fn from(args: &'a $name) -> Self {
let mut figment: $crate::figment::Figment = From::from(&args.$start);
$ (
figment = figment.merge(&args.$more);
)*
figment = figment.merge(args);
figment
}
}
};
}
/// A macro to implement converters from a type to [`Config`](crate::Config) and
/// [`figment::Figment`].
///
/// Via [Config::to_figment](crate::Config::to_figment) and the
/// [Cast](crate::FigmentProviders::Cast) profile.
#[macro_export]
macro_rules! impl_figment_convert_cast {
($name:ty) => {
impl<'a> From<&'a $name> for $crate::figment::Figment {
fn from(args: &'a $name) -> Self {
let root =
$crate::find_project_root(None).expect("could not determine project root");
$crate::Config::with_root(&root)
.to_figment($crate::FigmentProviders::Cast)
.merge(args)
}
}
};
}
/// Same as `impl_figment_convert` but also implies `Provider` for the given `Serialize` type for
/// convenience. The `Provider` only provides the "root" value for the current profile
#[macro_export]
macro_rules! impl_figment_convert_basic {
($name:ty) => {
$crate::impl_figment_convert!($name);
impl $crate::figment::Provider for $name {
fn metadata(&self) -> $crate::figment::Metadata {
$crate::figment::Metadata::named(stringify!($name))
}
fn data(
&self,
) -> Result<
$crate::figment::value::Map<$crate::figment::Profile, $crate::figment::value::Dict>,
$crate::figment::Error,
> {
let mut dict = $crate::figment::value::Dict::new();
if let Some(root) = self.root.as_ref() {
dict.insert(
"root".to_string(),
$crate::figment::value::Value::serialize(root)?,
);
}
Ok($crate::figment::value::Map::from([($crate::Config::selected_profile(), dict)]))
}
}
};
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/config/src/compilation.rs | crates/config/src/compilation.rs | use crate::{filter::GlobMatcher, serde_helpers};
use foundry_compilers::{
RestrictionsWithVersion,
artifacts::{BytecodeHash, EvmVersion},
multi::{MultiCompilerRestrictions, MultiCompilerSettings},
settings::VyperRestrictions,
solc::{Restriction, SolcRestrictions},
};
use semver::VersionReq;
use serde::{Deserialize, Deserializer, Serialize};
/// Keeps possible overrides for default settings which users may configure to construct additional
/// settings profile.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct SettingsOverrides {
pub name: String,
pub via_ir: Option<bool>,
#[serde(default, with = "serde_helpers::display_from_str_opt")]
pub evm_version: Option<EvmVersion>,
pub optimizer: Option<bool>,
pub optimizer_runs: Option<usize>,
pub bytecode_hash: Option<BytecodeHash>,
}
impl SettingsOverrides {
/// Applies the overrides to the given settings.
pub fn apply(&self, settings: &mut MultiCompilerSettings) {
if let Some(via_ir) = self.via_ir {
settings.solc.via_ir = Some(via_ir);
}
if let Some(evm_version) = self.evm_version {
settings.solc.evm_version = Some(evm_version);
settings.vyper.evm_version = Some(evm_version);
}
if let Some(enabled) = self.optimizer {
settings.solc.optimizer.enabled = Some(enabled);
}
if let Some(optimizer_runs) = self.optimizer_runs {
settings.solc.optimizer.runs = Some(optimizer_runs);
// Enable optimizer in optimizer runs set to a higher value than 0.
if optimizer_runs > 0 && self.optimizer.is_none() {
settings.solc.optimizer.enabled = Some(true);
}
}
if let Some(bytecode_hash) = self.bytecode_hash {
if let Some(metadata) = settings.solc.metadata.as_mut() {
metadata.bytecode_hash = Some(bytecode_hash);
} else {
settings.solc.metadata = Some(bytecode_hash.into());
}
}
}
}
#[derive(Debug, thiserror::Error)]
pub enum RestrictionsError {
#[error("specified both exact and relative restrictions for {0}")]
BothExactAndRelative(&'static str),
}
/// Restrictions for compilation of given paths.
///
/// Only purpose of this type is to accept user input to later construct
/// `RestrictionsWithVersion<MultiCompilerRestrictions>`.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct CompilationRestrictions {
pub paths: GlobMatcher,
#[serde(default, deserialize_with = "deserialize_version_req")]
pub version: Option<VersionReq>,
pub via_ir: Option<bool>,
pub bytecode_hash: Option<BytecodeHash>,
pub min_optimizer_runs: Option<usize>,
pub optimizer_runs: Option<usize>,
pub max_optimizer_runs: Option<usize>,
#[serde(default, with = "serde_helpers::display_from_str_opt")]
pub min_evm_version: Option<EvmVersion>,
#[serde(default, with = "serde_helpers::display_from_str_opt")]
pub evm_version: Option<EvmVersion>,
#[serde(default, with = "serde_helpers::display_from_str_opt")]
pub max_evm_version: Option<EvmVersion>,
}
/// Custom deserializer for version field that rejects ambiguous bare version numbers.
fn deserialize_version_req<'de, D>(deserializer: D) -> Result<Option<VersionReq>, D::Error>
where
D: Deserializer<'de>,
{
let opt_string: Option<String> = Option::deserialize(deserializer)?;
let Some(opt_string) = opt_string else {
return Ok(None);
};
let version = opt_string.trim();
// Reject bare versions like "0.8.11" that lack an operator prefix
if version.chars().next().is_some_and(|c| c.is_ascii_digit()) {
return Err(serde::de::Error::custom(format!(
"Invalid version format '{opt_string}' in compilation_restrictions. \
Bare version numbers are ambiguous and default to caret requirements (e.g. '^{version}'). \
Use an explicit constraint such as '={version}' for an exact version or '>={version}' for a minimum version."
)));
}
let req = VersionReq::parse(&opt_string).map_err(|e| {
serde::de::Error::custom(format!(
"Invalid version requirement '{opt_string}': {e}. \
Examples: '=0.8.11' (exact), '>=0.8.11' (minimum), '>=0.8.11 <0.9.0' (range)."
))
})?;
Ok(Some(req))
}
impl TryFrom<CompilationRestrictions> for RestrictionsWithVersion<MultiCompilerRestrictions> {
type Error = RestrictionsError;
fn try_from(value: CompilationRestrictions) -> Result<Self, Self::Error> {
let (min_evm, max_evm) =
match (value.min_evm_version, value.max_evm_version, value.evm_version) {
(None, None, Some(exact)) => (Some(exact), Some(exact)),
(min, max, None) => (min, max),
_ => return Err(RestrictionsError::BothExactAndRelative("evm_version")),
};
let (min_opt, max_opt) =
match (value.min_optimizer_runs, value.max_optimizer_runs, value.optimizer_runs) {
(None, None, Some(exact)) => (Some(exact), Some(exact)),
(min, max, None) => (min, max),
_ => return Err(RestrictionsError::BothExactAndRelative("optimizer_runs")),
};
Ok(Self {
restrictions: MultiCompilerRestrictions {
solc: SolcRestrictions {
evm_version: Restriction { min: min_evm, max: max_evm },
via_ir: value.via_ir,
optimizer_runs: Restriction { min: min_opt, max: max_opt },
bytecode_hash: value.bytecode_hash,
},
vyper: VyperRestrictions {
evm_version: Restriction { min: min_evm, max: max_evm },
},
},
version: value.version,
})
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/config/src/resolve.rs | crates/config/src/resolve.rs | //! Helper for resolving env vars
use regex::Regex;
use std::{env, env::VarError, fmt, sync::LazyLock};
/// A regex that matches `${val}` placeholders
pub static RE_PLACEHOLDER: LazyLock<Regex> =
LazyLock::new(|| Regex::new(r"(?m)(?P<outer>\$\{\s*(?P<inner>.*?)\s*})").unwrap());
/// Error when we failed to resolve an env var
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct UnresolvedEnvVarError {
/// The unresolved input string
pub unresolved: String,
/// Var that couldn't be resolved
pub var: String,
/// the `env::var` error
pub source: VarError,
}
impl UnresolvedEnvVarError {
/// Tries to resolve a value
pub fn try_resolve(&self) -> Result<String, Self> {
interpolate(&self.unresolved)
}
fn is_simple(&self) -> bool {
RE_PLACEHOLDER.captures_iter(&self.unresolved).count() <= 1
}
}
impl fmt::Display for UnresolvedEnvVarError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "environment variable `{}` ", self.var)?;
f.write_str(match self.source {
VarError::NotPresent => "not found",
VarError::NotUnicode(_) => "is not valid unicode",
})?;
if !self.is_simple() {
write!(f, " in `{}`", self.unresolved)?;
}
Ok(())
}
}
impl std::error::Error for UnresolvedEnvVarError {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
Some(&self.source)
}
}
/// Replaces all Env var placeholders in the input string with the values they hold
pub fn interpolate(input: &str) -> Result<String, UnresolvedEnvVarError> {
let mut res = input.to_string();
// loop over all placeholders in the input and replace them one by one
for caps in RE_PLACEHOLDER.captures_iter(input) {
let var = &caps["inner"];
let value = env::var(var).map_err(|source| UnresolvedEnvVarError {
unresolved: input.to_string(),
var: var.to_string(),
source,
})?;
res = res.replacen(&caps["outer"], &value, 1);
}
Ok(res)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn can_find_placeholder() {
let val = "https://eth-mainnet.alchemyapi.io/v2/346273846238426342";
assert!(!RE_PLACEHOLDER.is_match(val));
let val = "${RPC_ENV}";
assert!(RE_PLACEHOLDER.is_match(val));
let val = "https://eth-mainnet.alchemyapi.io/v2/${API_KEY}";
assert!(RE_PLACEHOLDER.is_match(val));
let cap = RE_PLACEHOLDER.captures(val).unwrap();
assert_eq!(cap.name("outer").unwrap().as_str(), "${API_KEY}");
assert_eq!(cap.name("inner").unwrap().as_str(), "API_KEY");
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/config/src/fs_permissions.rs | crates/config/src/fs_permissions.rs | //! Support for controlling fs access
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use std::{
fmt,
path::{Path, PathBuf},
str::FromStr,
};
/// Configures file system access
///
/// E.g. for cheat codes (`vm.writeFile`)
#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
#[serde(transparent)]
pub struct FsPermissions {
/// what kind of access is allowed
pub permissions: Vec<PathPermission>,
}
impl FsPermissions {
/// Creates anew instance with the given `permissions`
pub fn new(permissions: impl IntoIterator<Item = PathPermission>) -> Self {
Self { permissions: permissions.into_iter().collect() }
}
/// Adds a new permission
pub fn add(&mut self, permission: PathPermission) {
self.permissions.push(permission)
}
/// Returns true if access to the specified path is allowed with the specified.
///
/// This first checks permission, and only if it is granted, whether the path is allowed.
///
/// We only allow paths that are inside allowed paths.
///
/// Caution: This should be called with normalized paths if the `allowed_paths` are also
/// normalized.
pub fn is_path_allowed(&self, path: &Path, kind: FsAccessKind) -> bool {
self.find_permission(path).is_some_and(|perm| perm.is_granted(kind))
}
/// Returns the permission for the matching path.
///
/// This finds the longest matching path with resolved sym links and returns the highest
/// privilege permission. The algorithm works as follows:
///
/// 1. Find all permissions where the path matches (using longest path match)
/// 2. Return the highest privilege permission from those matches
///
/// Example scenarios:
///
/// ```text
/// ./out = read
/// ./out/contracts = read-write
/// ```
/// Checking `./out/contracts/MyContract.sol` returns `read-write` (longest path match)
///
/// ```text
/// ./out/contracts = read
/// ./out/contracts = write
/// ```
/// Checking `./out/contracts/MyContract.sol` returns `write` (highest privilege, which also
/// grants read access)
pub fn find_permission(&self, path: &Path) -> Option<FsAccessPermission> {
let mut max_path_len = 0;
let mut highest_permission = FsAccessPermission::None;
// Find all matching permissions at the longest matching path
for perm in &self.permissions {
let permission_path = dunce::canonicalize(&perm.path).unwrap_or(perm.path.clone());
if path.starts_with(&permission_path) {
let path_len = permission_path.components().count();
if path_len > max_path_len {
// Found a longer matching path, reset to this permission
max_path_len = path_len;
highest_permission = perm.access;
} else if path_len == max_path_len {
// Same path length, keep the highest privilege
highest_permission = match (highest_permission, perm.access) {
(FsAccessPermission::ReadWrite, _)
| (FsAccessPermission::Read, FsAccessPermission::Write)
| (FsAccessPermission::Write, FsAccessPermission::Read) => {
FsAccessPermission::ReadWrite
}
(FsAccessPermission::None, perm) => perm,
(existing_perm, _) => existing_perm,
}
}
}
}
if max_path_len > 0 { Some(highest_permission) } else { None }
}
/// Updates all `allowed_paths` and joins ([`Path::join`]) the `root` with all entries
pub fn join_all(&mut self, root: &Path) {
self.permissions.iter_mut().for_each(|perm| {
perm.path = root.join(&perm.path);
})
}
/// Same as [`Self::join_all`] but consumes the type
pub fn joined(mut self, root: &Path) -> Self {
self.join_all(root);
self
}
/// Removes all existing permissions for the given path
pub fn remove(&mut self, path: &Path) {
self.permissions.retain(|permission| permission.path != path)
}
/// Returns true if no permissions are configured
pub fn is_empty(&self) -> bool {
self.permissions.is_empty()
}
/// Returns the number of configured permissions
pub fn len(&self) -> usize {
self.permissions.len()
}
}
/// Represents an access permission to a single path
#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
pub struct PathPermission {
/// Permission level to access the `path`
pub access: FsAccessPermission,
/// The targeted path guarded by the permission
pub path: PathBuf,
}
impl PathPermission {
/// Returns a new permission for the path and the given access
pub fn new(path: impl Into<PathBuf>, access: FsAccessPermission) -> Self {
Self { path: path.into(), access }
}
/// Returns a new read-only permission for the path
pub fn read(path: impl Into<PathBuf>) -> Self {
Self::new(path, FsAccessPermission::Read)
}
/// Returns a new read-write permission for the path
pub fn read_write(path: impl Into<PathBuf>) -> Self {
Self::new(path, FsAccessPermission::ReadWrite)
}
/// Returns a new write-only permission for the path
pub fn write(path: impl Into<PathBuf>) -> Self {
Self::new(path, FsAccessPermission::Write)
}
/// Returns a non permission for the path
pub fn none(path: impl Into<PathBuf>) -> Self {
Self::new(path, FsAccessPermission::None)
}
/// Returns true if the access is allowed
pub fn is_granted(&self, kind: FsAccessKind) -> bool {
self.access.is_granted(kind)
}
}
/// Represents the operation on the fs
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum FsAccessKind {
/// read from fs (`vm.readFile`)
Read,
/// write to fs (`vm.writeFile`)
Write,
}
impl fmt::Display for FsAccessKind {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Read => f.write_str("read"),
Self::Write => f.write_str("write"),
}
}
}
/// Determines the status of file system access
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
pub enum FsAccessPermission {
/// FS access is _not_ allowed
#[default]
None,
/// Only reading is allowed
Read,
/// Only writing is allowed
Write,
/// FS access is allowed, this includes `read` + `write`
ReadWrite,
}
impl FsAccessPermission {
/// Returns true if the access is allowed
pub fn is_granted(&self, kind: FsAccessKind) -> bool {
match (self, kind) {
(Self::ReadWrite, _) => true,
(Self::Write, FsAccessKind::Write) => true,
(Self::Read, FsAccessKind::Read) => true,
(Self::None, _) => false,
_ => false,
}
}
}
impl FromStr for FsAccessPermission {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"true" | "read-write" | "readwrite" => Ok(Self::ReadWrite),
"false" | "none" => Ok(Self::None),
"read" => Ok(Self::Read),
"write" => Ok(Self::Write),
_ => Err(format!("Unknown variant {s}")),
}
}
}
impl fmt::Display for FsAccessPermission {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::ReadWrite => f.write_str("read-write"),
Self::None => f.write_str("none"),
Self::Read => f.write_str("read"),
Self::Write => f.write_str("write"),
}
}
}
impl Serialize for FsAccessPermission {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match self {
Self::ReadWrite => serializer.serialize_bool(true),
Self::None => serializer.serialize_bool(false),
Self::Read => serializer.serialize_str("read"),
Self::Write => serializer.serialize_str("write"),
}
}
}
impl<'de> Deserialize<'de> for FsAccessPermission {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
#[derive(Deserialize)]
#[serde(untagged)]
enum Status {
Bool(bool),
String(String),
}
match Status::deserialize(deserializer)? {
Status::Bool(enabled) => {
let status = if enabled { Self::ReadWrite } else { Self::None };
Ok(status)
}
Status::String(val) => val.parse().map_err(serde::de::Error::custom),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn can_parse_permission() {
assert_eq!(FsAccessPermission::ReadWrite, "true".parse().unwrap());
assert_eq!(FsAccessPermission::ReadWrite, "readwrite".parse().unwrap());
assert_eq!(FsAccessPermission::ReadWrite, "read-write".parse().unwrap());
assert_eq!(FsAccessPermission::None, "false".parse().unwrap());
assert_eq!(FsAccessPermission::None, "none".parse().unwrap());
assert_eq!(FsAccessPermission::Read, "read".parse().unwrap());
assert_eq!(FsAccessPermission::Write, "write".parse().unwrap());
}
#[test]
fn nested_permissions() {
let permissions = FsPermissions::new(vec![
PathPermission::read("./"),
PathPermission::write("./out"),
PathPermission::read_write("./out/contracts"),
]);
let permission =
permissions.find_permission(Path::new("./out/contracts/MyContract.sol")).unwrap();
assert_eq!(FsAccessPermission::ReadWrite, permission);
let permission = permissions.find_permission(Path::new("./out/MyContract.sol")).unwrap();
assert_eq!(FsAccessPermission::Write, permission);
}
#[test]
fn read_write_permission_combination() {
// When multiple permissions are defined for the same path, highest privilege wins
let permissions = FsPermissions::new(vec![
PathPermission::read("./out/contracts"),
PathPermission::write("./out/contracts"),
]);
let permission =
permissions.find_permission(Path::new("./out/contracts/MyContract.sol")).unwrap();
assert_eq!(FsAccessPermission::ReadWrite, permission);
}
#[test]
fn longest_path_takes_precedence() {
let permissions = FsPermissions::new(vec![
PathPermission::read_write("./out"),
PathPermission::read("./out/contracts"),
]);
// More specific path (./out/contracts) takes precedence even with lower privilege
let permission =
permissions.find_permission(Path::new("./out/contracts/MyContract.sol")).unwrap();
assert_eq!(FsAccessPermission::Read, permission);
// Broader path still applies to its own files
let permission = permissions.find_permission(Path::new("./out/other.sol")).unwrap();
assert_eq!(FsAccessPermission::ReadWrite, permission);
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/config/src/bind_json.rs | crates/config/src/bind_json.rs | use crate::filter::GlobMatcher;
use serde::{Deserialize, Serialize};
use std::path::PathBuf;
/// Contains the config for `forge bind-json`
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct BindJsonConfig {
/// Path for the generated bindings file.
pub out: PathBuf,
/// Globs to include.
///
/// If provided, only the files matching the globs will be included. Otherwise, defaults to
/// including all project files.
pub include: Vec<GlobMatcher>,
/// Globs to ignore
pub exclude: Vec<GlobMatcher>,
}
impl Default for BindJsonConfig {
fn default() -> Self {
Self {
out: PathBuf::from("utils/JsonBindings.sol"),
exclude: Vec::new(),
include: Vec::new(),
}
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/config/src/cache.rs | crates/config/src/cache.rs | //! Support types for configuring storage caching
use crate::Chain;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use std::{fmt, fmt::Formatter, str::FromStr};
use unit_prefix::NumberPrefix;
/// Settings to configure caching of remote.
#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
pub struct StorageCachingConfig {
/// Chains to cache.
pub chains: CachedChains,
/// Endpoints to cache.
pub endpoints: CachedEndpoints,
}
impl StorageCachingConfig {
/// Whether caching should be enabled for the endpoint
pub fn enable_for_endpoint(&self, endpoint: impl AsRef<str>) -> bool {
self.endpoints.is_match(endpoint)
}
/// Whether caching should be enabled for the chain id
pub fn enable_for_chain_id(&self, chain_id: u64) -> bool {
// ignore dev chains
if [99, 1337, 31337].contains(&chain_id) {
return false;
}
self.chains.is_match(chain_id)
}
}
/// What chains to cache
#[derive(Clone, Debug, Default, PartialEq, Eq)]
pub enum CachedChains {
/// Cache all chains
#[default]
All,
/// Don't cache anything
None,
/// Only cache these chains
Chains(Vec<Chain>),
}
impl CachedChains {
/// Whether the `endpoint` matches
pub fn is_match(&self, chain: u64) -> bool {
match self {
Self::All => true,
Self::None => false,
Self::Chains(chains) => chains.iter().any(|c| c.id() == chain),
}
}
}
impl Serialize for CachedChains {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match self {
Self::All => serializer.serialize_str("all"),
Self::None => serializer.serialize_str("none"),
Self::Chains(chains) => chains.serialize(serializer),
}
}
}
impl<'de> Deserialize<'de> for CachedChains {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
#[derive(Deserialize)]
#[serde(untagged)]
enum Chains {
All(String),
Chains(Vec<Chain>),
}
match Chains::deserialize(deserializer)? {
Chains::All(s) => match s.as_str() {
"all" => Ok(Self::All),
"none" => Ok(Self::None),
s => Err(serde::de::Error::unknown_variant(s, &["all", "none"])),
},
Chains::Chains(chains) => Ok(Self::Chains(chains)),
}
}
}
/// What endpoints to enable caching for
#[derive(Clone, Debug, Default)]
pub enum CachedEndpoints {
/// Cache all endpoints
#[default]
All,
/// Only cache non-local host endpoints
Remote,
/// Only cache these chains
Pattern(regex::Regex),
}
impl CachedEndpoints {
/// Whether the `endpoint` matches
pub fn is_match(&self, endpoint: impl AsRef<str>) -> bool {
let endpoint = endpoint.as_ref();
match self {
Self::All => true,
Self::Remote => !endpoint.contains("localhost:") && !endpoint.contains("127.0.0.1:"),
Self::Pattern(re) => re.is_match(endpoint),
}
}
}
impl PartialEq for CachedEndpoints {
fn eq(&self, other: &Self) -> bool {
match (self, other) {
(Self::Pattern(a), Self::Pattern(b)) => a.as_str() == b.as_str(),
(&Self::All, &Self::All) => true,
(&Self::Remote, &Self::Remote) => true,
_ => false,
}
}
}
impl Eq for CachedEndpoints {}
impl fmt::Display for CachedEndpoints {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::All => f.write_str("all"),
Self::Remote => f.write_str("remote"),
Self::Pattern(s) => s.fmt(f),
}
}
}
impl FromStr for CachedEndpoints {
type Err = regex::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"all" => Ok(Self::All),
"remote" => Ok(Self::Remote),
_ => Ok(Self::Pattern(s.parse()?)),
}
}
}
impl<'de> Deserialize<'de> for CachedEndpoints {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
String::deserialize(deserializer)?.parse().map_err(serde::de::Error::custom)
}
}
impl Serialize for CachedEndpoints {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match self {
Self::All => serializer.serialize_str("all"),
Self::Remote => serializer.serialize_str("remote"),
Self::Pattern(pattern) => serializer.serialize_str(pattern.as_str()),
}
}
}
/// Content of the foundry cache folder
#[derive(Debug, Default)]
pub struct Cache {
/// The list of chains in the cache
pub chains: Vec<ChainCache>,
}
impl fmt::Display for Cache {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
for chain in &self.chains {
match NumberPrefix::decimal(
chain.block_explorer as f32 + chain.blocks.iter().map(|x| x.1).sum::<u64>() as f32,
) {
NumberPrefix::Standalone(size) => {
writeln!(f, "- {} ({size:.1} B)", chain.name)?;
}
NumberPrefix::Prefixed(prefix, size) => {
writeln!(f, "- {} ({size:.1} {prefix}B)", chain.name)?;
}
}
match NumberPrefix::decimal(chain.block_explorer as f32) {
NumberPrefix::Standalone(size) => {
writeln!(f, "\t- Block Explorer ({size:.1} B)\n")?;
}
NumberPrefix::Prefixed(prefix, size) => {
writeln!(f, "\t- Block Explorer ({size:.1} {prefix}B)\n")?;
}
}
for block in &chain.blocks {
match NumberPrefix::decimal(block.1 as f32) {
NumberPrefix::Standalone(size) => {
writeln!(f, "\t- Block {} ({size:.1} B)", block.0)?;
}
NumberPrefix::Prefixed(prefix, size) => {
writeln!(f, "\t- Block {} ({size:.1} {prefix}B)", block.0)?;
}
}
}
}
Ok(())
}
}
/// A representation of data for a given chain in the foundry cache
#[derive(Debug)]
pub struct ChainCache {
/// The name of the chain
pub name: String,
/// A tuple containing block number and the block directory size in bytes
pub blocks: Vec<(String, u64)>,
/// The size of the block explorer directory in bytes
pub block_explorer: u64,
}
#[cfg(test)]
mod tests {
use super::*;
use similar_asserts::assert_eq;
#[test]
fn can_parse_storage_config() {
#[derive(Serialize, Deserialize)]
pub struct Wrapper {
pub rpc_storage_caching: StorageCachingConfig,
}
let s = r#"rpc_storage_caching = { chains = "all", endpoints = "remote"}"#;
let w: Wrapper = toml::from_str(s).unwrap();
assert_eq!(
w.rpc_storage_caching,
StorageCachingConfig { chains: CachedChains::All, endpoints: CachedEndpoints::Remote }
);
let s = r#"rpc_storage_caching = { chains = [1, "optimism", 999999], endpoints = "all"}"#;
let w: Wrapper = toml::from_str(s).unwrap();
assert_eq!(
w.rpc_storage_caching,
StorageCachingConfig {
chains: CachedChains::Chains(vec![
Chain::mainnet(),
Chain::optimism_mainnet(),
Chain::from_id(999999)
]),
endpoints: CachedEndpoints::All,
}
)
}
#[test]
fn cache_to_string() {
let cache = Cache {
chains: vec![
ChainCache {
name: "mainnet".to_string(),
blocks: vec![("1".to_string(), 1), ("2".to_string(), 2)],
block_explorer: 500,
},
ChainCache {
name: "ropsten".to_string(),
blocks: vec![("1".to_string(), 1), ("2".to_string(), 2)],
block_explorer: 4567,
},
ChainCache {
name: "rinkeby".to_string(),
blocks: vec![("1".to_string(), 1032), ("2".to_string(), 2000000)],
block_explorer: 4230000,
},
ChainCache {
name: "amoy".to_string(),
blocks: vec![("1".to_string(), 1), ("2".to_string(), 2)],
block_explorer: 0,
},
],
};
let expected = "\
- mainnet (503.0 B)\n\t\
- Block Explorer (500.0 B)\n\n\t\
- Block 1 (1.0 B)\n\t\
- Block 2 (2.0 B)\n\
- ropsten (4.6 kB)\n\t\
- Block Explorer (4.6 kB)\n\n\t\
- Block 1 (1.0 B)\n\t\
- Block 2 (2.0 B)\n\
- rinkeby (6.2 MB)\n\t\
- Block Explorer (4.2 MB)\n\n\t\
- Block 1 (1.0 kB)\n\t\
- Block 2 (2.0 MB)\n\
- amoy (3.0 B)\n\t\
- Block Explorer (0.0 B)\n\n\t\
- Block 1 (1.0 B)\n\t\
- Block 2 (2.0 B)\n";
assert_eq!(format!("{cache}"), expected);
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/config/src/invariant.rs | crates/config/src/invariant.rs | //! Configuration for invariant testing
use crate::fuzz::{FuzzCorpusConfig, FuzzDictionaryConfig};
use serde::{Deserialize, Serialize};
use std::path::PathBuf;
/// Contains for invariant testing
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct InvariantConfig {
/// The number of runs that must execute for each invariant test group.
pub runs: u32,
/// The number of calls executed to attempt to break invariants in one run.
pub depth: u32,
/// Fails the invariant fuzzing if a revert occurs
pub fail_on_revert: bool,
/// Allows overriding an unsafe external call when running invariant tests. eg. reentrancy
/// checks
pub call_override: bool,
/// The fuzz dictionary configuration
#[serde(flatten)]
pub dictionary: FuzzDictionaryConfig,
/// The maximum number of attempts to shrink the sequence
pub shrink_run_limit: u32,
/// The maximum number of rejects via `vm.assume` which can be encountered during a single
/// invariant run.
pub max_assume_rejects: u32,
/// Number of runs to execute and include in the gas report.
pub gas_report_samples: u32,
/// The fuzz corpus configuration.
#[serde(flatten)]
pub corpus: FuzzCorpusConfig,
/// Path where invariant failures are recorded and replayed.
pub failure_persist_dir: Option<PathBuf>,
/// Whether to collect and display fuzzed selectors metrics.
pub show_metrics: bool,
/// Optional timeout (in seconds) for each invariant test.
pub timeout: Option<u32>,
/// Display counterexample as solidity calls.
pub show_solidity: bool,
/// Maximum time (in seconds) between generated txs.
pub max_time_delay: Option<u32>,
/// Maximum number of blocks elapsed between generated txs.
pub max_block_delay: Option<u32>,
}
impl Default for InvariantConfig {
fn default() -> Self {
Self {
runs: 256,
depth: 500,
fail_on_revert: false,
call_override: false,
dictionary: FuzzDictionaryConfig { dictionary_weight: 80, ..Default::default() },
shrink_run_limit: 5000,
max_assume_rejects: 65536,
gas_report_samples: 256,
corpus: FuzzCorpusConfig::default(),
failure_persist_dir: None,
show_metrics: true,
timeout: None,
show_solidity: false,
max_time_delay: None,
max_block_delay: None,
}
}
}
impl InvariantConfig {
/// Creates invariant configuration to write failures in `{PROJECT_ROOT}/cache/fuzz` dir.
pub fn new(cache_dir: PathBuf) -> Self {
Self { failure_persist_dir: Some(cache_dir), ..Default::default() }
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/config/src/fmt.rs | crates/config/src/fmt.rs | //! Configuration specific to the `forge fmt` command and the `forge_fmt` package
use serde::{Deserialize, Serialize};
/// Contains the config and rule set
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct FormatterConfig {
/// Maximum line length where formatter will try to wrap the line
pub line_length: usize,
/// Number of spaces per indentation level. Ignored if style is Tab
pub tab_width: usize,
/// Style of indent
pub style: IndentStyle,
/// Print spaces between brackets
pub bracket_spacing: bool,
/// Style of uint/int256 types
pub int_types: IntTypes,
/// Style of multiline function header in case it doesn't fit
pub multiline_func_header: MultilineFuncHeaderStyle,
/// Style of quotation marks
pub quote_style: QuoteStyle,
/// Style of underscores in number literals
pub number_underscore: NumberUnderscore,
/// Style of underscores in hex literals
pub hex_underscore: HexUnderscore,
/// Style of single line blocks in statements
pub single_line_statement_blocks: SingleLineBlockStyle,
/// Print space in state variable, function and modifier `override` attribute
pub override_spacing: bool,
/// Wrap comments on `line_length` reached
pub wrap_comments: bool,
/// Style of doc comments
pub docs_style: DocCommentStyle,
/// Globs to ignore
pub ignore: Vec<String>,
/// Add new line at start and end of contract declarations
pub contract_new_lines: bool,
/// Sort import statements alphabetically in groups (a group is separated by a newline).
pub sort_imports: bool,
/// Whether to suppress spaces around the power operator (`**`).
pub pow_no_space: bool,
/// Style that determines if a broken list, should keep its elements together on their own
/// line, before breaking individually.
pub prefer_compact: PreferCompact,
/// Keep single imports on a single line even if they exceed line length.
pub single_line_imports: bool,
}
/// Style of integer types.
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum IntTypes {
/// Use the type defined in the source code.
Preserve,
/// Print the full length `uint256` or `int256`.
#[default]
Long,
/// Print the alias `uint` or `int`.
Short,
}
/// Style of underscores in number literals
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum NumberUnderscore {
/// Use the underscores defined in the source code
#[default]
Preserve,
/// Remove all underscores
Remove,
/// Add an underscore every thousand, if greater than 9999
/// e.g. 1000 -> 1000 and 10000 -> 10_000
Thousands,
}
impl NumberUnderscore {
/// Returns true if the option is `Preserve`
#[inline]
pub fn is_preserve(self) -> bool {
matches!(self, Self::Preserve)
}
/// Returns true if the option is `Remove`
#[inline]
pub fn is_remove(self) -> bool {
matches!(self, Self::Remove)
}
/// Returns true if the option is `Remove`
#[inline]
pub fn is_thousands(self) -> bool {
matches!(self, Self::Thousands)
}
}
/// Style of underscores in hex literals
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum HexUnderscore {
/// Use the underscores defined in the source code
Preserve,
/// Remove all underscores
#[default]
Remove,
/// Add underscore as separator between byte boundaries
Bytes,
}
/// Style of doc comments
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum DocCommentStyle {
/// Preserve the source code style
#[default]
Preserve,
/// Use single-line style (`///`)
Line,
/// Use block style (`/** .. */`)
Block,
}
/// Style of string quotes
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum QuoteStyle {
/// Use quotation mark defined in the source code.
Preserve,
/// Use double quotes where possible.
#[default]
Double,
/// Use single quotes where possible.
Single,
}
impl QuoteStyle {
/// Returns the associated quotation mark character.
pub const fn quote(self) -> Option<char> {
match self {
Self::Preserve => None,
Self::Double => Some('"'),
Self::Single => Some('\''),
}
}
}
/// Style of single line blocks in statements
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum SingleLineBlockStyle {
/// Preserve the original style
#[default]
Preserve,
/// Prefer single line block when possible
Single,
/// Always use multiline block
Multi,
}
/// Style of function header in case it doesn't fit
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum MultilineFuncHeaderStyle {
/// Always write function parameters multiline.
#[serde(alias = "params_first")] // alias for backwards compatibility
ParamsAlways,
/// Write function parameters multiline first when there is more than one param.
ParamsFirstMulti,
/// Write function attributes multiline first.
#[default]
AttributesFirst,
/// If function params or attrs are multiline.
/// split the rest
All,
/// Same as `All` but writes function params multiline even when there is a single param.
AllParams,
}
impl MultilineFuncHeaderStyle {
pub fn all(&self) -> bool {
matches!(self, Self::All | Self::AllParams)
}
pub fn params_first(&self) -> bool {
matches!(self, Self::ParamsAlways | Self::ParamsFirstMulti)
}
pub fn attrib_first(&self) -> bool {
matches!(self, Self::AttributesFirst)
}
}
/// Style that determines if a broken list, should keep its elements together on their own line,
/// before breaking individually.
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum PreferCompact {
/// All elements are preferred consistent.
None,
/// Calls are preferred compact. Events and errors break consistently.
Calls,
/// Events are preferred compact. Calls and errors break consistently.
Events,
/// Errors are preferred compact. Calls and events break consistently.
Errors,
/// Events and errors are preferred compact. Calls break consistently.
EventsErrors,
/// All elements are preferred compact.
#[default]
All,
}
impl PreferCompact {
pub fn calls(&self) -> bool {
matches!(self, Self::All | Self::Calls)
}
pub fn events(&self) -> bool {
matches!(self, Self::All | Self::Events | Self::EventsErrors)
}
pub fn errors(&self) -> bool {
matches!(self, Self::All | Self::Errors | Self::EventsErrors)
}
}
/// Style of indent
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum IndentStyle {
#[default]
Space,
Tab,
}
impl Default for FormatterConfig {
fn default() -> Self {
Self {
line_length: 120,
tab_width: 4,
style: IndentStyle::Space,
bracket_spacing: false,
int_types: IntTypes::default(),
multiline_func_header: MultilineFuncHeaderStyle::default(),
quote_style: QuoteStyle::default(),
number_underscore: NumberUnderscore::default(),
hex_underscore: HexUnderscore::default(),
single_line_statement_blocks: SingleLineBlockStyle::default(),
override_spacing: false,
wrap_comments: false,
ignore: vec![],
contract_new_lines: false,
sort_imports: false,
pow_no_space: false,
prefer_compact: PreferCompact::default(),
docs_style: DocCommentStyle::default(),
single_line_imports: false,
}
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/config/src/inline/natspec.rs | crates/config/src/inline/natspec.rs | use super::{INLINE_CONFIG_PREFIX, InlineConfigError, InlineConfigErrorKind};
use figment::Profile;
use foundry_compilers::{
ProjectCompileOutput,
artifacts::{Node, ast::NodeType},
};
use itertools::Itertools;
use serde_json::Value;
use solar::{
ast::{self, Span},
interface::Session,
};
use std::{collections::BTreeMap, path::Path};
/// Convenient struct to hold in-line per-test configurations
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct NatSpec {
/// The parent contract of the natspec.
pub contract: String,
/// The function annotated with the natspec. None if the natspec is contract-level.
pub function: Option<String>,
/// The line the natspec begins, in the form `line:column`, i.e. `10:21`.
pub line: String,
/// The actual natspec comment, without slashes or block punctuation.
pub docs: String,
}
impl NatSpec {
/// Factory function that extracts a vector of [`NatSpec`] instances from
/// a solc compiler output. The root path is to express contract base dirs.
/// That is essential to match per-test configs at runtime.
#[instrument(name = "NatSpec::parse", skip_all)]
pub fn parse(output: &ProjectCompileOutput, root: &Path) -> Vec<Self> {
let mut natspecs: Vec<Self> = vec![];
let compiler = output.parser().solc().compiler();
let solar = SolarParser::new(compiler.sess());
let solc = SolcParser::new();
for (id, artifact) in output.artifact_ids() {
let path = id.source.as_path();
let path = path.strip_prefix(root).unwrap_or(path);
let abs_path = &*root.join(path);
let contract_name = id.name.split('.').next().unwrap();
// `id.identifier` but with the stripped path.
let contract = format!("{}:{}", path.display(), id.name);
let mut used_solar = false;
compiler.enter_sequential(|compiler| {
if let Some((_, source)) = compiler.gcx().get_ast_source(abs_path)
&& let Some(ast) = &source.ast
{
solar.parse_ast(&mut natspecs, ast, &contract, contract_name);
used_solar = true;
}
});
if !used_solar {
warn!(?abs_path, %contract, "could not parse natspec with solar");
}
let mut used_solc = false;
if !used_solar
&& let Some(ast) = &artifact.ast
&& let Some(node) = solc.contract_root_node(&ast.nodes, &contract)
{
solc.parse(&mut natspecs, &contract, node, true);
used_solc = true;
}
if !used_solar && !used_solc {
warn!(?abs_path, %contract, "could not parse natspec");
}
}
natspecs
}
/// Checks if all configuration lines use a valid profile.
///
/// i.e. Given available profiles
/// ```rust
/// let _profiles = vec!["ci", "default"];
/// ```
/// A configuration like `forge-config: ciii.invariant.depth = 1` would result
/// in an error.
pub fn validate_profiles(&self, profiles: &[Profile]) -> eyre::Result<()> {
for config in self.config_values() {
if !profiles.iter().any(|p| {
config
.strip_prefix(p.as_str().as_str())
.is_some_and(|rest| rest.trim_start().starts_with('.'))
}) {
Err(InlineConfigError {
location: self.location_string(),
kind: InlineConfigErrorKind::InvalidProfile(
config.to_string(),
profiles.iter().format(", ").to_string(),
),
})?
}
}
Ok(())
}
/// Returns the path of the contract.
pub fn path(&self) -> &str {
match self.contract.split_once(':') {
Some((path, _)) => path,
None => self.contract.as_str(),
}
}
/// Returns the location of the natspec as a string.
pub fn location_string(&self) -> String {
format!("{}:{}", self.path(), self.line)
}
/// Returns a list of all the configuration values available in the natspec.
pub fn config_values(&self) -> impl Iterator<Item = &str> {
self.docs.lines().filter_map(|line| {
line.find(INLINE_CONFIG_PREFIX)
.map(|idx| line[idx + INLINE_CONFIG_PREFIX.len()..].trim())
})
}
}
struct SolcParser {
_private: (),
}
impl SolcParser {
fn new() -> Self {
Self { _private: () }
}
/// Given a list of nodes, find a "ContractDefinition" node that matches
/// the provided contract_id.
fn contract_root_node<'a>(&self, nodes: &'a [Node], contract_id: &str) -> Option<&'a Node> {
for n in nodes {
if n.node_type == NodeType::ContractDefinition {
let contract_data = &n.other;
if let Value::String(contract_name) = contract_data.get("name")?
&& contract_id.ends_with(contract_name)
{
return Some(n);
}
}
}
None
}
/// Implements a DFS over a compiler output node and its children.
/// If a natspec is found it is added to `natspecs`
fn parse(&self, natspecs: &mut Vec<NatSpec>, contract: &str, node: &Node, root: bool) {
// If we're at the root contract definition node, try parsing contract-level natspec
if root && let Some((docs, line)) = self.get_node_docs(&node.other) {
natspecs.push(NatSpec { contract: contract.into(), function: None, docs, line })
}
for n in &node.nodes {
if let Some((function, docs, line)) = self.get_fn_data(n) {
natspecs.push(NatSpec {
contract: contract.into(),
function: Some(function),
line,
docs,
})
}
self.parse(natspecs, contract, n, false);
}
}
/// Given a compilation output node, if it is a function definition
/// that also contains a natspec then return a tuple of:
/// - Function name
/// - Natspec text
/// - Natspec position with format "row:col:length"
///
/// Return None otherwise.
fn get_fn_data(&self, node: &Node) -> Option<(String, String, String)> {
if node.node_type == NodeType::FunctionDefinition {
let fn_data = &node.other;
let fn_name: String = self.get_fn_name(fn_data)?;
let (fn_docs, docs_src_line) = self.get_node_docs(fn_data)?;
return Some((fn_name, fn_docs, docs_src_line));
}
None
}
/// Given a dictionary of function data returns the name of the function.
fn get_fn_name(&self, fn_data: &BTreeMap<String, Value>) -> Option<String> {
match fn_data.get("name")? {
Value::String(fn_name) => Some(fn_name.into()),
_ => None,
}
}
/// Inspects Solc compiler output for documentation comments. Returns:
/// - `Some((String, String))` in case the function has natspec comments. First item is a
/// textual natspec representation, the second item is the natspec src line, in the form
/// "raw:col:length".
/// - `None` in case the function has not natspec comments.
fn get_node_docs(&self, data: &BTreeMap<String, Value>) -> Option<(String, String)> {
if let Value::Object(fn_docs) = data.get("documentation")?
&& let Value::String(comment) = fn_docs.get("text")?
&& comment.contains(INLINE_CONFIG_PREFIX)
{
let mut src_line = fn_docs
.get("src")
.map(|src| src.to_string())
.unwrap_or_else(|| String::from("<no-src-line-available>"));
src_line.retain(|c| c != '"');
return Some((comment.into(), src_line));
}
None
}
}
struct SolarParser<'a> {
sess: &'a Session,
}
impl<'a> SolarParser<'a> {
fn new(sess: &'a Session) -> Self {
Self { sess }
}
fn parse_ast(
&self,
natspecs: &mut Vec<NatSpec>,
source_unit: &ast::SourceUnit<'_>,
contract_id: &str,
contract_name: &str,
) {
let mut handle_docs = |item: &ast::Item<'_>| {
if item.docs.is_empty() {
return;
}
let mut span = Span::DUMMY;
let lines = item
.docs
.iter()
.filter_map(|d| {
let s = d.symbol.as_str();
if !s.contains(INLINE_CONFIG_PREFIX) {
return None;
}
span = if span.is_dummy() { d.span } else { span.to(d.span) };
match d.kind {
ast::CommentKind::Line => Some(s.trim().to_string()),
ast::CommentKind::Block => Some(
s.lines()
.filter(|line| line.contains(INLINE_CONFIG_PREFIX))
.map(|line| line.trim_start().trim_start_matches('*').trim())
.collect::<Vec<_>>()
.join("\n"),
),
}
})
.join("\n");
if lines.is_empty() {
return;
}
natspecs.push(NatSpec {
contract: contract_id.to_string(),
function: if let ast::ItemKind::Function(f) = &item.kind {
Some(
f.header
.name
.map(|sym| sym.to_string())
.unwrap_or_else(|| f.kind.to_string()),
)
} else {
None
},
line: {
let (_, loc) = self.sess.source_map().span_to_location_info(span);
format!("{}:{}", loc.lo.line, loc.lo.col.0 + 1)
},
docs: lines,
});
};
for item in source_unit.items.iter() {
let ast::ItemKind::Contract(c) = &item.kind else { continue };
if c.name.as_str() != contract_name {
continue;
}
// Handle contract level doc comments.
handle_docs(item);
// Handle function level doc comments.
for item in c.body.iter() {
let ast::ItemKind::Function(_) = &item.kind else { continue };
handle_docs(item);
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use serde_json::json;
use snapbox::{assert_data_eq, str};
use solar::parse::{
Parser,
ast::{Arena, interface},
};
fn parse(natspecs: &mut Vec<NatSpec>, src: &str, contract_id: &str, contract_name: &str) {
// Fast path to avoid parsing the file.
if !src.contains(INLINE_CONFIG_PREFIX) {
return;
}
let sess = Session::builder()
.with_silent_emitter(Some("Inline config parsing failed".to_string()))
.build();
let solar = SolarParser::new(&sess);
let _ = sess.enter(|| -> interface::Result<()> {
let arena = Arena::new();
let mut parser = Parser::from_source_code(
&sess,
&arena,
interface::source_map::FileName::Custom(contract_id.to_string()),
src.to_string(),
)?;
let source_unit = parser.parse_file().map_err(|e| e.emit())?;
solar.parse_ast(natspecs, &source_unit, contract_id, contract_name);
Ok(())
});
}
#[test]
fn can_reject_invalid_profiles() {
let profiles = ["ci".into(), "default".into()];
let natspec = NatSpec {
contract: Default::default(),
function: Default::default(),
line: Default::default(),
docs: r"
forge-config: ciii.invariant.depth = 1
forge-config: default.invariant.depth = 1
"
.into(),
};
let result = natspec.validate_profiles(&profiles);
assert!(result.is_err());
}
#[test]
fn can_accept_valid_profiles() {
let profiles = ["ci".into(), "default".into()];
let natspec = NatSpec {
contract: Default::default(),
function: Default::default(),
line: Default::default(),
docs: r"
forge-config: ci.invariant.depth = 1
forge-config: default.invariant.depth = 1
"
.into(),
};
let result = natspec.validate_profiles(&profiles);
assert!(result.is_ok());
}
#[test]
fn parse_solar() {
let src = "
contract C { /// forge-config: default.fuzz.runs = 600
\t\t\t\t /// forge-config: default.fuzz.runs = 601
function f1() {}
/** forge-config: default.fuzz.runs = 700 */
function f2() {} /** forge-config: default.fuzz.runs = 800 */ function f3() {}
/**
* forge-config: default.fuzz.runs = 1024
* forge-config: default.fuzz.max-test-rejects = 500
*/
function f4() {}
}
";
let mut natspecs = vec![];
parse(&mut natspecs, src, "path.sol:C", "C");
assert_data_eq!(
format!("{natspecs:#?}"),
str![[r#"
[
NatSpec {
contract: "path.sol:C",
function: Some(
"f1",
),
line: "2:14",
docs: "forge-config: default.fuzz.runs = 600/nforge-config: default.fuzz.runs = 601",
},
NatSpec {
contract: "path.sol:C",
function: Some(
"f2",
),
line: "7:8",
docs: "forge-config: default.fuzz.runs = 700",
},
NatSpec {
contract: "path.sol:C",
function: Some(
"f3",
),
line: "8:18",
docs: "forge-config: default.fuzz.runs = 800",
},
NatSpec {
contract: "path.sol:C",
function: Some(
"f4",
),
line: "10:1",
docs: "forge-config: default.fuzz.runs = 1024/nforge-config: default.fuzz.max-test-rejects = 500",
},
]
"#]]
);
}
#[test]
fn parse_solar_2() {
let src = r#"
// SPDX-License-Identifier: MIT OR Apache-2.0
pragma solidity >=0.8.0;
import "ds-test/test.sol";
contract FuzzInlineConf is DSTest {
/**
* forge-config: default.fuzz.runs = 1024
* forge-config: default.fuzz.max-test-rejects = 500
*/
function testInlineConfFuzz(uint8 x) public {
require(true, "this is not going to revert");
}
}
"#;
let mut natspecs = vec![];
parse(&mut natspecs, src, "inline/FuzzInlineConf.t.sol:FuzzInlineConf", "FuzzInlineConf");
assert_data_eq!(
format!("{natspecs:#?}"),
str![[r#"
[
NatSpec {
contract: "inline/FuzzInlineConf.t.sol:FuzzInlineConf",
function: Some(
"testInlineConfFuzz",
),
line: "8:5",
docs: "forge-config: default.fuzz.runs = 1024/nforge-config: default.fuzz.max-test-rejects = 500",
},
]
"#]]
);
}
#[test]
fn config_lines() {
let natspec = natspec();
let config_lines = natspec.config_values();
assert_eq!(
config_lines.collect::<Vec<_>>(),
[
"default.fuzz.runs = 600".to_string(),
"ci.fuzz.runs = 500".to_string(),
"default.invariant.runs = 1".to_string()
]
)
}
#[test]
fn can_handle_unavailable_src_line_with_fallback() {
let mut fn_data: BTreeMap<String, Value> = BTreeMap::new();
let doc_without_src_field = json!({ "text": "forge-config:default.fuzz.runs=600" });
fn_data.insert("documentation".into(), doc_without_src_field);
let (_, src_line) = SolcParser::new().get_node_docs(&fn_data).expect("Some docs");
assert_eq!(src_line, "<no-src-line-available>".to_string());
}
#[test]
fn can_handle_available_src_line() {
let mut fn_data: BTreeMap<String, Value> = BTreeMap::new();
let doc_without_src_field =
json!({ "text": "forge-config:default.fuzz.runs=600", "src": "73:21:12" });
fn_data.insert("documentation".into(), doc_without_src_field);
let (_, src_line) = SolcParser::new().get_node_docs(&fn_data).expect("Some docs");
assert_eq!(src_line, "73:21:12".to_string());
}
fn natspec() -> NatSpec {
let conf = r"
forge-config: default.fuzz.runs = 600
forge-config: ci.fuzz.runs = 500
========= SOME NOISY TEXT =============
䩹𧀫Jx닧Ʀ̳盅K擷Ɂw첊}ꏻk86ᖪk-檻ܴ렝[Dz𐤬oᘓƤ
꣖ۻ%Ƅ㪕ς:(饁av/烲ڻ̛߉橞㗡𥺃̹M봓䀖ؿ̄)d
ϊ&»ϿЏ2鞷砕eߥHJ粊머?槿ᴴጅϖ뀓Ӽ츙4
醤㭊r ܖ̹灱녗V*竅⒪苏贗=숽ؓбݧʹ園Ьi
=======================================
forge-config: default.invariant.runs = 1
";
NatSpec {
contract: "dir/TestContract.t.sol:FuzzContract".to_string(),
function: Some("test_myFunction".to_string()),
line: "10:12:111".to_string(),
docs: conf.to_string(),
}
}
#[test]
fn parse_solar_multiple_contracts_from_same_file() {
let src = r#"
// SPDX-License-Identifier: MIT OR Apache-2.0
pragma solidity >=0.8.0;
import "ds-test/test.sol";
contract FuzzInlineConf is DSTest {
/// forge-config: default.fuzz.runs = 1
function testInlineConfFuzz1() {}
}
contract FuzzInlineConf2 is DSTest {
/// forge-config: default.fuzz.runs = 2
function testInlineConfFuzz2() {}
}
"#;
let mut natspecs = vec![];
parse(&mut natspecs, src, "inline/FuzzInlineConf.t.sol:FuzzInlineConf", "FuzzInlineConf");
assert_data_eq!(
format!("{natspecs:#?}"),
str![[r#"
[
NatSpec {
contract: "inline/FuzzInlineConf.t.sol:FuzzInlineConf",
function: Some(
"testInlineConfFuzz1",
),
line: "8:6",
docs: "forge-config: default.fuzz.runs = 1",
},
]
"#]]
);
let mut natspecs = vec![];
parse(
&mut natspecs,
src,
"inline/FuzzInlineConf2.t.sol:FuzzInlineConf2",
"FuzzInlineConf2",
);
assert_data_eq!(
format!("{natspecs:#?}"),
str![[r#"
[
NatSpec {
contract: "inline/FuzzInlineConf2.t.sol:FuzzInlineConf2",
function: Some(
"testInlineConfFuzz2",
),
line: "13:5",
docs: "forge-config: default.fuzz.runs = 2",
},
]
"#]]
);
}
#[test]
fn parse_contract_level_config() {
let src = r#"
// SPDX-License-Identifier: MIT OR Apache-2.0
pragma solidity >=0.8.0;
import "ds-test/test.sol";
/// forge-config: default.fuzz.runs = 1
contract FuzzInlineConf is DSTest {
/// forge-config: default.fuzz.runs = 3
function testInlineConfFuzz1() {}
function testInlineConfFuzz2() {}
}"#;
let mut natspecs = vec![];
parse(&mut natspecs, src, "inline/FuzzInlineConf.t.sol:FuzzInlineConf", "FuzzInlineConf");
assert_data_eq!(
format!("{natspecs:#?}"),
str![[r#"
[
NatSpec {
contract: "inline/FuzzInlineConf.t.sol:FuzzInlineConf",
function: None,
line: "7:1",
docs: "forge-config: default.fuzz.runs = 1",
},
NatSpec {
contract: "inline/FuzzInlineConf.t.sol:FuzzInlineConf",
function: Some(
"testInlineConfFuzz1",
),
line: "9:5",
docs: "forge-config: default.fuzz.runs = 3",
},
]
"#]]
);
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/config/src/inline/mod.rs | crates/config/src/inline/mod.rs | use crate::Config;
use alloy_primitives::map::HashMap;
use figment::{
Figment, Profile, Provider,
value::{Dict, Map, Value},
};
use foundry_compilers::ProjectCompileOutput;
use itertools::Itertools;
mod natspec;
pub use natspec::*;
const INLINE_CONFIG_PREFIX: &str = "forge-config:";
type DataMap = Map<Profile, Dict>;
/// Errors returned when parsing inline config.
#[derive(Clone, Debug, PartialEq, Eq, thiserror::Error)]
pub enum InlineConfigErrorKind {
/// Failed to parse inline config as TOML.
#[error(transparent)]
Parse(#[from] toml::de::Error),
/// An invalid profile has been provided.
#[error("invalid profile `{0}`; valid profiles: {1}")]
InvalidProfile(String, String),
}
/// Wrapper error struct that catches config parsing errors, enriching them with context information
/// reporting the misconfigured line.
#[derive(Debug, thiserror::Error)]
#[error("Inline config error at {location}: {kind}")]
pub struct InlineConfigError {
/// The span of the error in the format:
/// `dir/TestContract.t.sol:FuzzContract:10:12:111`
pub location: String,
/// The inner error
pub kind: InlineConfigErrorKind,
}
/// Represents per-test configurations, declared inline
/// as structured comments in Solidity test files. This allows
/// to create configs directly bound to a solidity test.
#[derive(Clone, Debug, Default)]
pub struct InlineConfig {
/// Contract-level configuration.
contract_level: HashMap<String, DataMap>,
/// Function-level configuration.
fn_level: HashMap<(String, String), DataMap>,
}
impl InlineConfig {
/// Creates a new, empty [`InlineConfig`].
pub fn new() -> Self {
Self::default()
}
/// Tries to create a new instance by detecting inline configurations from the project compile
/// output.
pub fn new_parsed(output: &ProjectCompileOutput, config: &Config) -> eyre::Result<Self> {
let natspecs: Vec<NatSpec> = NatSpec::parse(output, &config.root);
let profiles = &config.profiles;
let mut inline = Self::new();
for natspec in &natspecs {
inline.insert(natspec)?;
// Validate after parsing as TOML.
natspec.validate_profiles(profiles)?;
}
Ok(inline)
}
/// Inserts a new [`NatSpec`] into the [`InlineConfig`].
pub fn insert(&mut self, natspec: &NatSpec) -> Result<(), InlineConfigError> {
let map = if let Some(function) = &natspec.function {
self.fn_level.entry((natspec.contract.clone(), function.clone())).or_default()
} else {
self.contract_level.entry(natspec.contract.clone()).or_default()
};
let joined = natspec
.config_values()
.map(|s| {
// Replace `-` with `_` for backwards compatibility with the old parser.
if let Some(idx) = s.find('=') {
s[..idx].replace('-', "_") + &s[idx..]
} else {
s.to_string()
}
})
.format("\n")
.to_string();
let data = toml::from_str::<DataMap>(&joined).map_err(|e| InlineConfigError {
location: natspec.location_string(),
kind: InlineConfigErrorKind::Parse(e),
})?;
extend_data_map(map, &data);
Ok(())
}
/// Returns a [`figment::Provider`] for this [`InlineConfig`] at the given contract and function
/// level.
pub fn provide<'a>(&'a self, contract: &'a str, function: &'a str) -> InlineConfigProvider<'a> {
InlineConfigProvider { inline: self, contract, function }
}
/// Merges the inline configuration at the given contract and function level with the provided
/// base configuration.
pub fn merge(&self, contract: &str, function: &str, base: &Config) -> Figment {
Figment::from(base).merge(self.provide(contract, function))
}
/// Returns `true` if a configuration is present at the given contract level.
pub fn contains_contract(&self, contract: &str) -> bool {
self.get_contract(contract).is_some_and(|map| !map.is_empty())
}
/// Returns `true` if a configuration is present at the function level.
///
/// Does not include contract-level configurations.
pub fn contains_function(&self, contract: &str, function: &str) -> bool {
self.get_function(contract, function).is_some_and(|map| !map.is_empty())
}
fn get_contract(&self, contract: &str) -> Option<&DataMap> {
self.contract_level.get(contract)
}
fn get_function(&self, contract: &str, function: &str) -> Option<&DataMap> {
let key = (contract.to_string(), function.to_string());
self.fn_level.get(&key)
}
}
/// [`figment::Provider`] for [`InlineConfig`] at a given contract and function level.
///
/// Created by [`InlineConfig::provide`].
#[derive(Clone, Debug)]
pub struct InlineConfigProvider<'a> {
inline: &'a InlineConfig,
contract: &'a str,
function: &'a str,
}
impl Provider for InlineConfigProvider<'_> {
fn metadata(&self) -> figment::Metadata {
figment::Metadata::named("inline config")
}
fn data(&self) -> figment::Result<DataMap> {
let mut map = DataMap::new();
if let Some(new) = self.inline.get_contract(self.contract) {
extend_data_map(&mut map, new);
}
if let Some(new) = self.inline.get_function(self.contract, self.function) {
extend_data_map(&mut map, new);
}
Ok(map)
}
}
fn extend_data_map(map: &mut DataMap, new: &DataMap) {
for (profile, data) in new {
extend_dict(map.entry(profile.clone()).or_default(), data);
}
}
fn extend_dict(dict: &mut Dict, new: &Dict) {
for (k, v) in new {
match dict.entry(k.clone()) {
std::collections::btree_map::Entry::Vacant(entry) => {
entry.insert(v.clone());
}
std::collections::btree_map::Entry::Occupied(entry) => {
extend_value(entry.into_mut(), v);
}
}
}
}
fn extend_value(value: &mut Value, new: &Value) {
match (value, new) {
(Value::Dict(tag, dict), Value::Dict(new_tag, new_dict)) => {
*tag = *new_tag;
extend_dict(dict, new_dict);
}
(value, new) => *value = new.clone(),
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/config/src/providers/ext.rs | crates/config/src/providers/ext.rs | use crate::{Config, extend, utils};
use figment::{
Error, Figment, Metadata, Profile, Provider,
providers::{Env, Format, Toml},
value::{Dict, Map, Value},
};
use foundry_compilers::ProjectPathsConfig;
use heck::ToSnakeCase;
use std::{
cell::OnceCell,
path::{Path, PathBuf},
};
pub(crate) trait ProviderExt: Provider + Sized {
fn rename(
self,
from: impl Into<Profile>,
to: impl Into<Profile>,
) -> RenameProfileProvider<Self> {
RenameProfileProvider::new(self, from, to)
}
fn wrap(
self,
wrapping_key: impl Into<Profile>,
profile: impl Into<Profile>,
) -> WrapProfileProvider<Self> {
WrapProfileProvider::new(self, wrapping_key, profile)
}
fn strict_select(
self,
profiles: impl IntoIterator<Item = impl Into<Profile>>,
) -> OptionalStrictProfileProvider<Self> {
OptionalStrictProfileProvider::new(self, profiles)
}
fn fallback(
self,
profile: impl Into<Profile>,
fallback: impl Into<Profile>,
) -> FallbackProfileProvider<Self> {
FallbackProfileProvider::new(self, profile, fallback)
}
}
impl<P: Provider> ProviderExt for P {}
/// A convenience provider to retrieve a toml file.
/// This will return an error if the env var is set but the file does not exist
pub(crate) struct TomlFileProvider {
env_var: Option<&'static str>,
env_val: OnceCell<Option<String>>,
default: PathBuf,
cache: OnceCell<Result<Map<Profile, Dict>, Error>>,
}
impl TomlFileProvider {
pub(crate) fn new(env_var: Option<&'static str>, default: PathBuf) -> Self {
Self { env_var, env_val: OnceCell::new(), default, cache: OnceCell::new() }
}
fn env_val(&self) -> Option<&str> {
self.env_val.get_or_init(|| self.env_var.and_then(Env::var)).as_deref()
}
fn file(&self) -> PathBuf {
self.env_val().map(PathBuf::from).unwrap_or_else(|| self.default.clone())
}
fn is_missing(&self) -> bool {
if let Some(file) = self.env_val() {
let path = Path::new(&file);
if !path.exists() {
return true;
}
}
false
}
/// Reads and processes the TOML configuration file, handling inheritance if configured.
fn read(&self) -> Result<Map<Profile, Dict>, Error> {
use serde::de::Error as _;
// Get the config file path and validate it exists
let local_path = self.file();
if !local_path.exists() {
if let Some(file) = self.env_val() {
return Err(Error::custom(format!(
"Config file `{}` set in env var `{}` does not exist",
file,
self.env_var.unwrap()
)));
}
return Ok(Map::new());
}
// Create a provider for the local config file
let local_provider = Toml::file(local_path.clone()).nested();
// Parse the local config to check for extends field
let local_path_str = local_path.to_string_lossy();
let local_content = std::fs::read_to_string(&local_path)
.map_err(|e| Error::custom(e.to_string()).with_path(&local_path_str))?;
let partial_config: extend::ExtendsPartialConfig = toml::from_str(&local_content)
.map_err(|e| Error::custom(e.to_string()).with_path(&local_path_str))?;
// Check if the currently active profile has an 'extends' field
let selected_profile = Config::selected_profile();
let extends_config = partial_config.profile.as_ref().and_then(|profiles| {
let profile_str = selected_profile.to_string();
profiles.get(&profile_str).and_then(|cfg| cfg.extends.as_ref())
});
// If inheritance is configured, load and merge the base config
if let Some(extends_config) = extends_config {
let extends_path = extends_config.path();
let extends_strategy = extends_config.strategy();
let relative_base_path = PathBuf::from(extends_path);
let local_dir = local_path.parent().ok_or_else(|| {
Error::custom(format!(
"Could not determine parent directory of config file: {}",
local_path.display()
))
})?;
let base_path =
foundry_compilers::utils::canonicalize(local_dir.join(&relative_base_path))
.map_err(|e| {
Error::custom(format!(
"Failed to resolve inherited config path: {}: {e}",
relative_base_path.display()
))
})?;
// Validate the base config file exists
if !base_path.is_file() {
return Err(Error::custom(format!(
"Inherited config file does not exist or is not a file: {}",
base_path.display()
)));
}
// Prevent self-inheritance which would cause infinite recursion
if foundry_compilers::utils::canonicalize(&local_path).ok().as_ref() == Some(&base_path)
{
return Err(Error::custom(format!(
"Config file {} cannot inherit from itself.",
local_path.display()
)));
}
// Parse the base config to check for nested inheritance
let base_path_str = base_path.to_string_lossy();
let base_content = std::fs::read_to_string(&base_path)
.map_err(|e| Error::custom(e.to_string()).with_path(&base_path_str))?;
let base_partial: extend::ExtendsPartialConfig = toml::from_str(&base_content)
.map_err(|e| Error::custom(e.to_string()).with_path(&base_path_str))?;
// Check if the base file's same profile also has extends (nested inheritance)
let base_extends = base_partial
.profile
.as_ref()
.and_then(|profiles| {
let profile_str = selected_profile.to_string();
profiles.get(&profile_str)
})
.and_then(|profile| profile.extends.as_ref());
// Prevent nested inheritance to avoid complexity and potential cycles
if base_extends.is_some() {
return Err(Error::custom(format!(
"Nested inheritance is not allowed. Base file '{}' cannot have an 'extends' field in profile '{selected_profile}'.",
base_path.display()
)));
}
// Load base configuration as a Figment provider
let base_provider = Toml::file(base_path).nested();
// Apply the selected merge strategy
match extends_strategy {
extend::ExtendStrategy::ExtendArrays => {
// Using 'admerge' strategy:
// - Arrays are concatenated (base elements + local elements)
// - Other values are replaced (local values override base values)
// - The extends field is preserved in the final configuration
Figment::new().merge(base_provider).admerge(local_provider).data()
}
extend::ExtendStrategy::ReplaceArrays => {
// Using 'merge' strategy:
// - Arrays are replaced entirely (local arrays replace base arrays)
// - Other values are replaced (local values override base values)
Figment::new().merge(base_provider).merge(local_provider).data()
}
extend::ExtendStrategy::NoCollision => {
// Check for key collisions between base and local configs
let base_data = base_provider.data()?;
let local_data = local_provider.data()?;
let profile_key = Profile::new("profile");
if let (Some(local_profiles), Some(base_profiles)) =
(local_data.get(&profile_key), base_data.get(&profile_key))
{
// Extract dicts for the selected profile
let profile_str = selected_profile.to_string();
let base_dict = base_profiles.get(&profile_str).and_then(|v| v.as_dict());
let local_dict = local_profiles.get(&profile_str).and_then(|v| v.as_dict());
// Find colliding keys
if let (Some(local_dict), Some(base_dict)) = (local_dict, base_dict) {
let collisions: Vec<&String> = local_dict
.keys()
.filter(|key| {
// Ignore the "extends" key as it's expected
*key != "extends" && base_dict.contains_key(*key)
})
.collect();
if !collisions.is_empty() {
return Err(Error::custom(format!(
"Key collision detected in profile '{profile_str}' when extending '{extends_path}'. \
Conflicting keys: {collisions:?}. Use 'extends.strategy' or 'extends_strategy' to specify how to handle conflicts."
)));
}
}
}
// Safe to merge the configs without collisions
Figment::new().merge(base_provider).merge(local_provider).data()
}
}
} else {
// No inheritance - return the local config as-is
local_provider.data()
}
}
}
impl Provider for TomlFileProvider {
fn metadata(&self) -> Metadata {
if self.is_missing() {
Metadata::named("TOML file provider")
} else {
Toml::file(self.file()).nested().metadata()
}
}
fn data(&self) -> Result<Map<Profile, Dict>, Error> {
self.cache.get_or_init(|| self.read()).clone()
}
}
/// A Provider that ensures all keys are snake case if they're not standalone sections, See
/// `Config::STANDALONE_SECTIONS`
pub(crate) struct ForcedSnakeCaseData<P>(pub(crate) P);
impl<P: Provider> Provider for ForcedSnakeCaseData<P> {
fn metadata(&self) -> Metadata {
self.0.metadata()
}
fn data(&self) -> Result<Map<Profile, Dict>, Error> {
let mut map = self.0.data()?;
for (profile, dict) in &mut map {
if Config::STANDALONE_SECTIONS.contains(&profile.as_ref()) {
// don't force snake case for keys in standalone sections
continue;
}
let dict2 = std::mem::take(dict);
*dict = dict2.into_iter().map(|(k, v)| (k.to_snake_case(), v)).collect();
}
Ok(map)
}
fn profile(&self) -> Option<Profile> {
self.0.profile()
}
}
/// A Provider that handles breaking changes in toml files
pub(crate) struct BackwardsCompatTomlProvider<P>(pub(crate) P);
impl<P: Provider> Provider for BackwardsCompatTomlProvider<P> {
fn metadata(&self) -> Metadata {
self.0.metadata()
}
fn data(&self) -> Result<Map<Profile, Dict>, Error> {
let mut map = Map::new();
let solc_env = std::env::var("FOUNDRY_SOLC_VERSION")
.or_else(|_| std::env::var("DAPP_SOLC_VERSION"))
.map(Value::from)
.ok();
for (profile, mut dict) in self.0.data()? {
if let Some(v) = solc_env.clone() {
// ENV var takes precedence over config file
dict.insert("solc".to_string(), v);
} else if let Some(v) = dict.remove("solc_version") {
// only insert older variant if not already included
if !dict.contains_key("solc") {
dict.insert("solc".to_string(), v);
}
}
if let Some(v) = dict.remove("deny_warnings")
&& !dict.contains_key("deny")
{
dict.insert("deny".to_string(), v);
}
map.insert(profile, dict);
}
Ok(map)
}
fn profile(&self) -> Option<Profile> {
self.0.profile()
}
}
/// A provider that sets the `src` and `output` path depending on their existence.
pub(crate) struct DappHardhatDirProvider<'a>(pub(crate) &'a Path);
impl Provider for DappHardhatDirProvider<'_> {
fn metadata(&self) -> Metadata {
Metadata::named("Dapp Hardhat dir compat")
}
fn data(&self) -> Result<Map<Profile, Dict>, Error> {
let mut dict = Dict::new();
dict.insert(
"src".to_string(),
ProjectPathsConfig::find_source_dir(self.0)
.file_name()
.unwrap()
.to_string_lossy()
.to_string()
.into(),
);
dict.insert(
"out".to_string(),
ProjectPathsConfig::find_artifacts_dir(self.0)
.file_name()
.unwrap()
.to_string_lossy()
.to_string()
.into(),
);
// detect libs folders:
// if `lib` _and_ `node_modules` exists: include both
// if only `node_modules` exists: include `node_modules`
// include `lib` otherwise
let mut libs = vec![];
let node_modules = self.0.join("node_modules");
let lib = self.0.join("lib");
if node_modules.exists() {
if lib.exists() {
libs.push(lib.file_name().unwrap().to_string_lossy().to_string());
}
libs.push(node_modules.file_name().unwrap().to_string_lossy().to_string());
} else {
libs.push(lib.file_name().unwrap().to_string_lossy().to_string());
}
dict.insert("libs".to_string(), libs.into());
Ok(Map::from([(Config::selected_profile(), dict)]))
}
}
/// A provider that checks for DAPP_ env vars that are named differently than FOUNDRY_
pub(crate) struct DappEnvCompatProvider;
impl Provider for DappEnvCompatProvider {
fn metadata(&self) -> Metadata {
Metadata::named("Dapp env compat")
}
fn data(&self) -> Result<Map<Profile, Dict>, Error> {
use serde::de::Error as _;
use std::env;
let mut dict = Dict::new();
if let Ok(val) = env::var("DAPP_TEST_NUMBER") {
dict.insert(
"block_number".to_string(),
val.parse::<u64>().map_err(figment::Error::custom)?.into(),
);
}
if let Ok(val) = env::var("DAPP_TEST_ADDRESS") {
dict.insert("sender".to_string(), val.into());
}
if let Ok(val) = env::var("DAPP_FORK_BLOCK") {
dict.insert(
"fork_block_number".to_string(),
val.parse::<u64>().map_err(figment::Error::custom)?.into(),
);
} else if let Ok(val) = env::var("DAPP_TEST_NUMBER") {
dict.insert(
"fork_block_number".to_string(),
val.parse::<u64>().map_err(figment::Error::custom)?.into(),
);
}
if let Ok(val) = env::var("DAPP_TEST_TIMESTAMP") {
dict.insert(
"block_timestamp".to_string(),
val.parse::<u64>().map_err(figment::Error::custom)?.into(),
);
}
if let Ok(val) = env::var("DAPP_BUILD_OPTIMIZE_RUNS") {
dict.insert(
"optimizer_runs".to_string(),
val.parse::<u64>().map_err(figment::Error::custom)?.into(),
);
}
if let Ok(val) = env::var("DAPP_BUILD_OPTIMIZE") {
// Activate Solidity optimizer (0 or 1)
let val = val.parse::<u8>().map_err(figment::Error::custom)?;
if val > 1 {
return Err(
format!("Invalid $DAPP_BUILD_OPTIMIZE value `{val}`, expected 0 or 1").into()
);
}
dict.insert("optimizer".to_string(), (val == 1).into());
}
// libraries in env vars either as `[..]` or single string separated by comma
if let Ok(val) = env::var("DAPP_LIBRARIES").or_else(|_| env::var("FOUNDRY_LIBRARIES")) {
dict.insert("libraries".to_string(), utils::to_array_value(&val)?);
}
let mut fuzz_dict = Dict::new();
if let Ok(val) = env::var("DAPP_TEST_FUZZ_RUNS") {
fuzz_dict.insert(
"runs".to_string(),
val.parse::<u32>().map_err(figment::Error::custom)?.into(),
);
}
dict.insert("fuzz".to_string(), fuzz_dict.into());
let mut invariant_dict = Dict::new();
if let Ok(val) = env::var("DAPP_TEST_DEPTH") {
invariant_dict.insert(
"depth".to_string(),
val.parse::<u32>().map_err(figment::Error::custom)?.into(),
);
}
dict.insert("invariant".to_string(), invariant_dict.into());
Ok(Map::from([(Config::selected_profile(), dict)]))
}
}
/// Renames a profile from `from` to `to`.
///
/// For example given:
///
/// ```toml
/// [from]
/// key = "value"
/// ```
///
/// RenameProfileProvider will output
///
/// ```toml
/// [to]
/// key = "value"
/// ```
pub(crate) struct RenameProfileProvider<P> {
provider: P,
from: Profile,
to: Profile,
}
impl<P> RenameProfileProvider<P> {
pub(crate) fn new(provider: P, from: impl Into<Profile>, to: impl Into<Profile>) -> Self {
Self { provider, from: from.into(), to: to.into() }
}
}
impl<P: Provider> Provider for RenameProfileProvider<P> {
fn metadata(&self) -> Metadata {
self.provider.metadata()
}
fn data(&self) -> Result<Map<Profile, Dict>, Error> {
let mut data = self.provider.data()?;
if let Some(data) = data.remove(&self.from) {
return Ok(Map::from([(self.to.clone(), data)]));
}
Ok(Default::default())
}
fn profile(&self) -> Option<Profile> {
Some(self.to.clone())
}
}
/// Unwraps a profile reducing the key depth
///
/// For example given:
///
/// ```toml
/// [wrapping_key.profile]
/// key = "value"
/// ```
///
/// UnwrapProfileProvider will output:
///
/// ```toml
/// [profile]
/// key = "value"
/// ```
struct UnwrapProfileProvider<P> {
provider: P,
wrapping_key: Profile,
profile: Profile,
}
impl<P> UnwrapProfileProvider<P> {
pub fn new(provider: P, wrapping_key: impl Into<Profile>, profile: impl Into<Profile>) -> Self {
Self { provider, wrapping_key: wrapping_key.into(), profile: profile.into() }
}
}
impl<P: Provider> Provider for UnwrapProfileProvider<P> {
fn metadata(&self) -> Metadata {
self.provider.metadata()
}
fn data(&self) -> Result<Map<Profile, Dict>, Error> {
let mut data = self.provider.data()?;
if let Some(profiles) = data.remove(&self.wrapping_key) {
for (profile_str, profile_val) in profiles {
let profile = Profile::new(&profile_str);
if profile != self.profile {
continue;
}
match profile_val {
Value::Dict(_, dict) => return Ok(profile.collect(dict)),
bad_val => {
let mut err = Error::from(figment::error::Kind::InvalidType(
bad_val.to_actual(),
"dict".into(),
));
err.metadata = Some(self.provider.metadata());
err.profile = Some(self.profile.clone());
return Err(err);
}
}
}
}
Ok(Default::default())
}
fn profile(&self) -> Option<Profile> {
Some(self.profile.clone())
}
}
/// Wraps a profile in another profile
///
/// For example given:
///
/// ```toml
/// [profile]
/// key = "value"
/// ```
///
/// WrapProfileProvider will output:
///
/// ```toml
/// [wrapping_key.profile]
/// key = "value"
/// ```
pub(crate) struct WrapProfileProvider<P> {
provider: P,
wrapping_key: Profile,
profile: Profile,
}
impl<P> WrapProfileProvider<P> {
pub fn new(provider: P, wrapping_key: impl Into<Profile>, profile: impl Into<Profile>) -> Self {
Self { provider, wrapping_key: wrapping_key.into(), profile: profile.into() }
}
}
impl<P: Provider> Provider for WrapProfileProvider<P> {
fn metadata(&self) -> Metadata {
self.provider.metadata()
}
fn data(&self) -> Result<Map<Profile, Dict>, Error> {
if let Some(inner) = self.provider.data()?.remove(&self.profile) {
let value = Value::from(inner);
let mut dict = Dict::new();
dict.insert(self.profile.as_str().as_str().to_snake_case(), value);
Ok(self.wrapping_key.collect(dict))
} else {
Ok(Default::default())
}
}
fn profile(&self) -> Option<Profile> {
Some(self.profile.clone())
}
}
/// Extracts the profile from the `profile` key and using the original key as backup, merging
/// values where necessary
///
/// For example given:
///
/// ```toml
/// [profile.cool]
/// key = "value"
///
/// [cool]
/// key2 = "value2"
/// ```
///
/// OptionalStrictProfileProvider will output:
///
/// ```toml
/// [cool]
/// key = "value"
/// key2 = "value2"
/// ```
///
/// And emit a deprecation warning
pub(crate) struct OptionalStrictProfileProvider<P> {
provider: P,
profiles: Vec<Profile>,
}
impl<P> OptionalStrictProfileProvider<P> {
pub const PROFILE_PROFILE: Profile = Profile::const_new("profile");
pub fn new(provider: P, profiles: impl IntoIterator<Item = impl Into<Profile>>) -> Self {
Self { provider, profiles: profiles.into_iter().map(|profile| profile.into()).collect() }
}
}
impl<P: Provider> Provider for OptionalStrictProfileProvider<P> {
fn metadata(&self) -> Metadata {
self.provider.metadata()
}
fn data(&self) -> Result<Map<Profile, Dict>, Error> {
let mut figment = Figment::from(&self.provider);
for profile in &self.profiles {
figment = figment.merge(UnwrapProfileProvider::new(
&self.provider,
Self::PROFILE_PROFILE,
profile.clone(),
));
}
figment.data().map_err(|err| {
// figment does tag metadata and tries to map metadata to an error, since we use a new
// figment in this provider this new figment does not know about the metadata of the
// provider and can't map the metadata to the error. Therefore we return the root error
// if this error originated in the provider's data.
if let Err(root_err) = self.provider.data() {
return root_err;
}
err
})
}
fn profile(&self) -> Option<Profile> {
self.profiles.last().cloned()
}
}
/// Extracts the profile from the `profile` key and sets unset values according to the fallback
/// provider
pub struct FallbackProfileProvider<P> {
provider: P,
profile: Profile,
fallback: Profile,
}
impl<P> FallbackProfileProvider<P> {
/// Creates a new fallback profile provider.
pub fn new(provider: P, profile: impl Into<Profile>, fallback: impl Into<Profile>) -> Self {
Self { provider, profile: profile.into(), fallback: fallback.into() }
}
}
impl<P: Provider> Provider for FallbackProfileProvider<P> {
fn metadata(&self) -> Metadata {
self.provider.metadata()
}
fn data(&self) -> Result<Map<Profile, Dict>, Error> {
let mut data = self.provider.data()?;
if let Some(fallback) = data.remove(&self.fallback) {
let mut inner = data.remove(&self.profile).unwrap_or_default();
for (k, v) in fallback {
inner.entry(k).or_insert(v);
}
Ok(self.profile.collect(inner))
} else {
Ok(data)
}
}
fn profile(&self) -> Option<Profile> {
Some(self.profile.clone())
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/config/src/providers/mod.rs | crates/config/src/providers/mod.rs | //! Config providers.
mod ext;
pub use ext::*;
mod remappings;
pub use remappings::*;
mod warnings;
pub use warnings::*;
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/config/src/providers/remappings.rs | crates/config/src/providers/remappings.rs | use crate::{Config, foundry_toml_dirs, remappings_from_env_var, remappings_from_newline};
use figment::{
Error, Figment, Metadata, Profile, Provider,
value::{Dict, Map},
};
use foundry_compilers::artifacts::remappings::{RelativeRemapping, Remapping};
use rayon::prelude::*;
use std::{
borrow::Cow,
collections::{BTreeMap, HashSet, btree_map::Entry},
fs,
path::{Path, PathBuf},
};
/// Wrapper types over a `Vec<Remapping>` that only appends unique remappings.
#[derive(Clone, Debug, Default)]
pub struct Remappings {
/// Remappings.
remappings: Vec<Remapping>,
/// Source, test and script configured project dirs.
/// Remappings of these dirs from libs are ignored.
project_paths: Vec<Remapping>,
}
impl Remappings {
/// Create a new `Remappings` wrapper with an empty vector.
pub fn new() -> Self {
Self { remappings: Vec::new(), project_paths: Vec::new() }
}
/// Create a new `Remappings` wrapper with a vector of remappings.
pub fn new_with_remappings(remappings: Vec<Remapping>) -> Self {
Self { remappings, project_paths: Vec::new() }
}
/// Extract project paths that cannot be remapped by dependencies.
pub fn with_figment(mut self, figment: &Figment) -> Self {
let mut add_project_remapping = |path: &str| {
if let Ok(path) = figment.find_value(path)
&& let Some(path) = path.into_string()
{
let remapping =
Remapping { context: None, name: format!("{path}/"), path: format!("{path}/") };
self.project_paths.push(remapping);
}
};
add_project_remapping("src");
add_project_remapping("test");
add_project_remapping("script");
self
}
/// Filters the remappings vector by name and context.
fn filter_key(r: &Remapping) -> String {
match &r.context {
Some(str) => str.clone() + &r.name.clone(),
None => r.name.clone(),
}
}
/// Consumes the wrapper and returns the inner remappings vector.
pub fn into_inner(self) -> Vec<Remapping> {
let mut seen = HashSet::new();
self.remappings.iter().filter(|r| seen.insert(Self::filter_key(r))).cloned().collect()
}
/// Push an element to the remappings vector, but only if it's not already present.
fn push(&mut self, remapping: Remapping) {
// Special handling for .sol file remappings, only allow one remapping per source file.
if remapping.name.ends_with(".sol") && !remapping.path.ends_with(".sol") {
return;
}
if self.remappings.iter().any(|existing| {
if remapping.name.ends_with(".sol") {
// For .sol files, only prevent duplicate source names in the same context
return existing.name == remapping.name
&& existing.context == remapping.context
&& existing.path == remapping.path;
}
// What we're doing here is filtering for ambiguous paths. For example, if we have
// @prb/math/=node_modules/@prb/math/src/ as existing, and
// @prb/=node_modules/@prb/ as the one being checked,
// we want to keep the already existing one, which is the first one. This way we avoid
// having to deal with ambiguous paths which is unwanted when autodetecting remappings.
// Remappings are added from root of the project down to libraries, so
// we also want to exclude any conflicting remappings added from libraries. For example,
// if we have `@utils/=src/` added in project remappings and `@utils/libraries/=src/`
// added in a dependency, we don't want to add the new one as it conflicts with project
// existing remapping.
let mut existing_name_path = existing.name.clone();
if !existing_name_path.ends_with('/') {
existing_name_path.push('/')
}
let is_conflicting = remapping.name.starts_with(&existing_name_path)
|| existing.name.starts_with(&remapping.name);
is_conflicting && existing.context == remapping.context
}) {
return;
};
// Ignore remappings of root project src, test or script dir.
// See <https://github.com/foundry-rs/foundry/issues/3440>.
if self
.project_paths
.iter()
.any(|project_path| remapping.name.eq_ignore_ascii_case(&project_path.name))
{
return;
};
self.remappings.push(remapping);
}
/// Extend the remappings vector, leaving out the remappings that are already present.
pub fn extend(&mut self, remappings: Vec<Remapping>) {
for remapping in remappings {
self.push(remapping);
}
}
}
/// A figment provider that checks if the remappings were previously set and if they're unset looks
/// up the fs via
/// - `DAPP_REMAPPINGS` || `FOUNDRY_REMAPPINGS` env var
/// - `<root>/remappings.txt` file
/// - `Remapping::find_many`.
pub struct RemappingsProvider<'a> {
/// Whether to auto detect remappings from the `lib_paths`
pub auto_detect_remappings: bool,
/// The lib/dependency directories to scan for remappings
pub lib_paths: Cow<'a, Vec<PathBuf>>,
/// the root path used to turn an absolute `Remapping`, as we're getting it from
/// `Remapping::find_many` into a relative one.
pub root: &'a Path,
/// This contains either:
/// - previously set remappings
/// - a `MissingField` error, which means previous provider didn't set the "remappings" field
/// - other error, like formatting
pub remappings: Result<Vec<Remapping>, Error>,
}
impl RemappingsProvider<'_> {
/// Find and parse remappings for the projects
///
/// **Order**
///
/// Remappings are built in this order (last item takes precedence)
/// - Autogenerated remappings
/// - toml remappings
/// - `remappings.txt`
/// - Environment variables
/// - CLI parameters
fn get_remappings(&self, remappings: Vec<Remapping>) -> Result<Vec<Remapping>, Error> {
trace!("get all remappings from {:?}", self.root);
/// prioritizes remappings that are closer: shorter `path`
/// - ("a", "1/2") over ("a", "1/2/3")
///
/// grouped by remapping context
fn insert_closest(
mappings: &mut BTreeMap<Option<String>, BTreeMap<String, PathBuf>>,
context: Option<String>,
key: String,
path: PathBuf,
) {
let context_mappings = mappings.entry(context).or_default();
match context_mappings.entry(key) {
Entry::Occupied(mut e) => {
if e.get().components().count() > path.components().count() {
e.insert(path);
}
}
Entry::Vacant(e) => {
e.insert(path);
}
}
}
// Let's first just extend the remappings with the ones that were passed in,
// without any filtering.
let mut user_remappings = Vec::new();
// check env vars
if let Some(env_remappings) = remappings_from_env_var("DAPP_REMAPPINGS")
.or_else(|| remappings_from_env_var("FOUNDRY_REMAPPINGS"))
{
user_remappings
.extend(env_remappings.map_err::<Error, _>(|err| err.to_string().into())?);
}
// check remappings.txt file
let remappings_file = self.root.join("remappings.txt");
if remappings_file.is_file() {
let content = fs::read_to_string(remappings_file).map_err(|err| err.to_string())?;
let remappings_from_file: Result<Vec<_>, _> =
remappings_from_newline(&content).collect();
user_remappings
.extend(remappings_from_file.map_err::<Error, _>(|err| err.to_string().into())?);
}
user_remappings.extend(remappings);
// Let's now use the wrapper to conditionally extend the remappings with the autodetected
// ones. We want to avoid duplicates, and the wrapper will handle this for us.
let mut all_remappings = Remappings::new_with_remappings(user_remappings);
// scan all library dirs and autodetect remappings
// TODO: if a lib specifies contexts for remappings manually, we need to figure out how to
// resolve that
if self.auto_detect_remappings {
let (nested_foundry_remappings, auto_detected_remappings) = rayon::join(
|| self.find_nested_foundry_remappings(),
|| self.auto_detect_remappings(),
);
let mut lib_remappings = BTreeMap::new();
for r in nested_foundry_remappings {
insert_closest(&mut lib_remappings, r.context, r.name, r.path.into());
}
for r in auto_detected_remappings {
// this is an additional safety check for weird auto-detected remappings
if ["lib/", "src/", "contracts/"].contains(&r.name.as_str()) {
trace!(target: "forge", "- skipping the remapping");
continue;
}
insert_closest(&mut lib_remappings, r.context, r.name, r.path.into());
}
all_remappings.extend(
lib_remappings
.into_iter()
.flat_map(|(context, remappings)| {
remappings.into_iter().map(move |(name, path)| Remapping {
context: context.clone(),
name,
path: path.to_string_lossy().into(),
})
})
.collect(),
);
}
Ok(all_remappings.into_inner())
}
/// Returns all remappings declared in foundry.toml files of libraries
fn find_nested_foundry_remappings(&self) -> impl Iterator<Item = Remapping> + '_ {
self.lib_paths
.par_iter()
.map(|p| if p.is_absolute() { self.root.join("lib") } else { self.root.join(p) })
.flat_map(foundry_toml_dirs)
.flat_map_iter(|lib| {
trace!(?lib, "find all remappings of nested foundry.toml");
self.nested_foundry_remappings(&lib)
})
.collect::<Vec<_>>()
.into_iter()
}
fn nested_foundry_remappings(&self, lib: &Path) -> Vec<Remapping> {
// load config, of the nested lib if it exists
let Ok(config) = Config::load_with_root(lib) else { return vec![] };
let config = config.sanitized();
// if the configured _src_ directory is set to something that
// `Remapping::find_many` doesn't classify as a src directory (src, contracts,
// lib), then we need to manually add a remapping here
let mut src_remapping = None;
if ![Path::new("src"), Path::new("contracts"), Path::new("lib")]
.contains(&config.src.as_path())
&& let Some(name) = lib.file_name().and_then(|s| s.to_str())
{
let mut r = Remapping {
context: None,
name: format!("{name}/"),
path: format!("{}", lib.join(&config.src).display()),
};
if !r.path.ends_with('/') {
r.path.push('/')
}
src_remapping = Some(r);
}
// Eventually, we could set context for remappings at this location,
// taking into account the OS platform. We'll need to be able to handle nested
// contexts depending on dependencies for this to work.
// For now, we just leave the default context (none).
let mut remappings =
config.remappings.into_iter().map(Remapping::from).collect::<Vec<Remapping>>();
if let Some(r) = src_remapping {
remappings.push(r);
}
remappings
}
/// Auto detect remappings from the lib paths
fn auto_detect_remappings(&self) -> impl Iterator<Item = Remapping> + '_ {
self.lib_paths
.par_iter()
.flat_map_iter(|lib| {
let lib = self.root.join(lib);
trace!(?lib, "find all remappings");
Remapping::find_many(&lib)
})
.collect::<Vec<_>>()
.into_iter()
}
}
impl Provider for RemappingsProvider<'_> {
fn metadata(&self) -> Metadata {
Metadata::named("Remapping Provider")
}
fn data(&self) -> Result<Map<Profile, Dict>, Error> {
let remappings = match &self.remappings {
Ok(remappings) => self.get_remappings(remappings.clone()),
Err(err) => {
if let figment::error::Kind::MissingField(_) = err.kind {
self.get_remappings(vec![])
} else {
return Err(err.clone());
}
}
}?;
// turn the absolute remapping into a relative one by stripping the `root`
let remappings = remappings
.into_iter()
.map(|r| RelativeRemapping::new(r, self.root).to_string())
.collect::<Vec<_>>();
Ok(Map::from([(
Config::selected_profile(),
Dict::from([("remappings".to_string(), figment::value::Value::from(remappings))]),
)]))
}
fn profile(&self) -> Option<Profile> {
Some(Config::selected_profile())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_sol_file_remappings() {
let mut remappings = Remappings::new();
// First valid remapping
remappings.push(Remapping {
context: None,
name: "MyContract.sol".to_string(),
path: "implementations/Contract1.sol".to_string(),
});
// Same source to different target (should be rejected)
remappings.push(Remapping {
context: None,
name: "MyContract.sol".to_string(),
path: "implementations/Contract2.sol".to_string(),
});
// Different source to same target (should be allowed)
remappings.push(Remapping {
context: None,
name: "OtherContract.sol".to_string(),
path: "implementations/Contract1.sol".to_string(),
});
// Exact duplicate (should be silently ignored)
remappings.push(Remapping {
context: None,
name: "MyContract.sol".to_string(),
path: "implementations/Contract1.sol".to_string(),
});
// Invalid .sol remapping (target not .sol)
remappings.push(Remapping {
context: None,
name: "Invalid.sol".to_string(),
path: "implementations/Contract1.txt".to_string(),
});
let result = remappings.into_inner();
assert_eq!(result.len(), 2, "Should only have 2 valid remappings");
// Verify the correct remappings exist
assert!(
result
.iter()
.any(|r| r.name == "MyContract.sol" && r.path == "implementations/Contract1.sol"),
"Should keep first mapping of MyContract.sol"
);
assert!(
!result
.iter()
.any(|r| r.name == "MyContract.sol" && r.path == "implementations/Contract2.sol"),
"Should keep first mapping of MyContract.sol"
);
assert!(result.iter().any(|r| r.name == "OtherContract.sol" && r.path == "implementations/Contract1.sol"),
"Should allow different source to same target");
// Verify the rejected remapping doesn't exist
assert!(
!result
.iter()
.any(|r| r.name == "MyContract.sol" && r.path == "implementations/Contract2.sol"),
"Should reject same source to different target"
);
}
#[test]
fn test_mixed_remappings() {
let mut remappings = Remappings::new();
remappings.push(Remapping {
context: None,
name: "@openzeppelin-contracts/".to_string(),
path: "lib/openzeppelin-contracts/".to_string(),
});
remappings.push(Remapping {
context: None,
name: "@openzeppelin/contracts/".to_string(),
path: "lib/openzeppelin/contracts/".to_string(),
});
remappings.push(Remapping {
context: None,
name: "MyContract.sol".to_string(),
path: "os/Contract.sol".to_string(),
});
let result = remappings.into_inner();
assert_eq!(result.len(), 3, "Should have 3 remappings");
assert_eq!(result.first().unwrap().name, "@openzeppelin-contracts/");
assert_eq!(result.first().unwrap().path, "lib/openzeppelin-contracts/");
assert_eq!(result.get(1).unwrap().name, "@openzeppelin/contracts/");
assert_eq!(result.get(1).unwrap().path, "lib/openzeppelin/contracts/");
assert_eq!(result.get(2).unwrap().name, "MyContract.sol");
assert_eq!(result.get(2).unwrap().path, "os/Contract.sol");
}
#[test]
fn test_remappings_with_context() {
let mut remappings = Remappings::new();
// Same name but different contexts
remappings.push(Remapping {
context: Some("test/".to_string()),
name: "MyContract.sol".to_string(),
path: "test/Contract.sol".to_string(),
});
remappings.push(Remapping {
context: Some("prod/".to_string()),
name: "MyContract.sol".to_string(),
path: "prod/Contract.sol".to_string(),
});
let result = remappings.into_inner();
assert_eq!(result.len(), 2, "Should allow same name with different contexts");
assert!(
result
.iter()
.any(|r| r.context == Some("test/".to_string()) && r.path == "test/Contract.sol")
);
assert!(
result
.iter()
.any(|r| r.context == Some("prod/".to_string()) && r.path == "prod/Contract.sol")
);
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/config/src/providers/warnings.rs | crates/config/src/providers/warnings.rs | use crate::{Config, DEPRECATIONS, Warning};
use figment::{
Error, Figment, Metadata, Profile, Provider,
value::{Dict, Map, Value},
};
use heck::ToSnakeCase;
use std::collections::BTreeMap;
/// Generate warnings for unknown sections and deprecated keys
pub struct WarningsProvider<P> {
provider: P,
profile: Profile,
old_warnings: Result<Vec<Warning>, Error>,
}
impl<P: Provider> WarningsProvider<P> {
const WARNINGS_KEY: &'static str = "__warnings";
/// Creates a new warnings provider.
pub fn new(
provider: P,
profile: impl Into<Profile>,
old_warnings: Result<Vec<Warning>, Error>,
) -> Self {
Self { provider, profile: profile.into(), old_warnings }
}
/// Creates a new figment warnings provider.
pub fn for_figment(provider: P, figment: &Figment) -> Self {
let old_warnings = {
let warnings_res = figment.extract_inner(Self::WARNINGS_KEY);
if warnings_res.as_ref().err().map(|err| err.missing()).unwrap_or(false) {
Ok(vec![])
} else {
warnings_res
}
};
Self::new(provider, figment.profile().clone(), old_warnings)
}
/// Collects all warnings.
pub fn collect_warnings(&self) -> Result<Vec<Warning>, Error> {
let data = self.provider.data().unwrap_or_default();
let mut out = self.old_warnings.clone()?;
// Add warning for unknown sections.
out.extend(data.keys().filter(|k| !Config::is_standalone_section(k.as_str())).map(
|unknown_section| {
let source = self.provider.metadata().source.map(|s| s.to_string());
Warning::UnknownSection { unknown_section: unknown_section.clone(), source }
},
));
// Add warning for deprecated keys.
let deprecated_key_warning = |key| {
DEPRECATIONS.iter().find_map(|(deprecated_key, new_value)| {
if key == *deprecated_key {
Some(Warning::DeprecatedKey {
old: deprecated_key.to_string(),
new: new_value.to_string(),
})
} else {
None
}
})
};
let profiles = data
.iter()
.filter(|(profile, _)| **profile == Config::PROFILE_SECTION)
.map(|(_, dict)| dict);
out.extend(profiles.clone().flat_map(BTreeMap::keys).filter_map(deprecated_key_warning));
out.extend(
profiles
.clone()
.filter_map(|dict| dict.get(self.profile.as_str().as_str()))
.filter_map(Value::as_dict)
.flat_map(BTreeMap::keys)
.filter_map(deprecated_key_warning),
);
// Add warning for unknown keys within profiles (root keys only here).
if let Ok(default_map) = figment::providers::Serialized::defaults(&Config::default()).data()
&& let Some(default_dict) = default_map.get(&Config::DEFAULT_PROFILE)
{
let allowed_keys: std::collections::BTreeSet<String> =
default_dict.keys().cloned().collect();
for profile_map in profiles {
for (profile, value) in profile_map {
let Some(profile_dict) = value.as_dict() else {
continue;
};
let source = self
.provider
.metadata()
.source
.map(|s| s.to_string())
.unwrap_or(Config::FILE_NAME.to_string());
for key in profile_dict.keys() {
let is_not_deprecated =
!DEPRECATIONS.iter().any(|(deprecated_key, _)| *deprecated_key == key);
let is_not_allowed = !allowed_keys.contains(key)
&& !allowed_keys.contains(&key.to_snake_case());
let is_not_reserved = key != "extends" && key != Self::WARNINGS_KEY;
let is_not_backward_compatible = key != "solc_version";
if is_not_deprecated
&& is_not_allowed
&& is_not_reserved
&& is_not_backward_compatible
{
out.push(Warning::UnknownKey {
key: key.clone(),
profile: profile.clone(),
source: source.clone(),
});
}
}
}
}
}
Ok(out)
}
}
impl<P: Provider> Provider for WarningsProvider<P> {
fn metadata(&self) -> Metadata {
if let Some(source) = self.provider.metadata().source {
Metadata::from("Warnings", source)
} else {
Metadata::named("Warnings")
}
}
fn data(&self) -> Result<Map<Profile, Dict>, Error> {
let warnings = self.collect_warnings()?;
Ok(Map::from([(
self.profile.clone(),
Dict::from([(Self::WARNINGS_KEY.to_string(), Value::serialize(warnings)?)]),
)]))
}
fn profile(&self) -> Option<Profile> {
Some(self.profile.clone())
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/cli/src/clap.rs | crates/cli/src/clap.rs | use clap_complete::{Shell as ClapCompleteShell, aot::Generator};
use clap_complete_nushell::Nushell;
#[derive(Clone, Copy)]
pub enum Shell {
ClapCompleteShell(ClapCompleteShell),
Nushell,
}
impl clap::ValueEnum for Shell {
fn value_variants<'a>() -> &'a [Self] {
&[
Self::ClapCompleteShell(ClapCompleteShell::Bash),
Self::ClapCompleteShell(ClapCompleteShell::Zsh),
Self::ClapCompleteShell(ClapCompleteShell::Fish),
Self::ClapCompleteShell(ClapCompleteShell::PowerShell),
Self::ClapCompleteShell(ClapCompleteShell::Elvish),
Self::Nushell,
]
}
fn to_possible_value(&self) -> Option<clap::builder::PossibleValue> {
match self {
Self::ClapCompleteShell(shell) => shell.to_possible_value(),
Self::Nushell => Some(clap::builder::PossibleValue::new("nushell")),
}
}
}
impl Generator for Shell {
fn file_name(&self, name: &str) -> String {
match self {
Self::ClapCompleteShell(shell) => shell.file_name(name),
Self::Nushell => Nushell.file_name(name),
}
}
fn generate(&self, cmd: &clap::Command, buf: &mut dyn std::io::Write) {
match self {
Self::ClapCompleteShell(shell) => shell.generate(cmd, buf),
Self::Nushell => Nushell.generate(cmd, buf),
}
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/cli/src/lib.rs | crates/cli/src/lib.rs | //! # foundry-cli
//!
//! Common CLI utilities.
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg))]
#[macro_use]
extern crate foundry_common;
#[macro_use]
extern crate tracing;
pub mod clap;
pub mod handler;
pub mod opts;
pub mod utils;
#[cfg(feature = "tracy")]
tracing_tracy::client::register_demangler!();
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/cli/src/handler.rs | crates/cli/src/handler.rs | use eyre::EyreHandler;
use itertools::Itertools;
use std::{error::Error, fmt};
/// A custom context type for Foundry specific error reporting via `eyre`.
#[derive(Default)]
pub struct Handler {
debug_handler: Option<Box<dyn EyreHandler>>,
}
impl Handler {
/// Create a new instance of the `Handler`.
pub fn new() -> Self {
Self::default()
}
/// Override the debug handler with a custom one.
pub fn debug_handler(mut self, debug_handler: Option<Box<dyn EyreHandler>>) -> Self {
self.debug_handler = debug_handler;
self
}
}
impl EyreHandler for Handler {
fn display(&self, error: &(dyn Error + 'static), f: &mut fmt::Formatter<'_>) -> fmt::Result {
use fmt::Display;
foundry_common::errors::dedup_chain(error).into_iter().format("; ").fmt(f)
}
fn debug(&self, error: &(dyn Error + 'static), f: &mut fmt::Formatter<'_>) -> fmt::Result {
if let Some(debug_handler) = &self.debug_handler {
return debug_handler.debug(error, f);
}
if f.alternate() {
return fmt::Debug::fmt(error, f);
}
let errors = foundry_common::errors::dedup_chain(error);
let (error, sources) = errors.split_first().unwrap();
write!(f, "{error}")?;
if !sources.is_empty() {
write!(f, "\n\nContext:")?;
let multiple = sources.len() > 1;
for (n, error) in sources.iter().enumerate() {
writeln!(f)?;
if multiple {
write!(f, "- Error #{n}: {error}")?;
} else {
write!(f, "- {error}")?;
}
}
}
Ok(())
}
fn track_caller(&mut self, location: &'static std::panic::Location<'static>) {
if let Some(debug_handler) = &mut self.debug_handler {
debug_handler.track_caller(location);
}
}
}
/// Installs the Foundry [`eyre`] and [`panic`](mod@std::panic) hooks as the global ones.
///
/// # Details
///
/// By default a simple user-centric handler is installed, unless
/// `FOUNDRY_DEBUG` is set in the environment, in which case a more
/// verbose debug-centric handler is installed.
///
/// Panics are always caught by the more debug-centric handler.
pub fn install() {
if std::env::var_os("RUST_BACKTRACE").is_none() {
unsafe {
std::env::set_var("RUST_BACKTRACE", "1");
}
}
let panic_section =
"This is a bug. Consider reporting it at https://github.com/foundry-rs/foundry";
let (panic_hook, debug_hook) =
color_eyre::config::HookBuilder::default().panic_section(panic_section).into_hooks();
panic_hook.install();
let debug_hook = debug_hook.into_eyre_hook();
let debug = std::env::var_os("FOUNDRY_DEBUG").is_some();
if let Err(e) = eyre::set_hook(Box::new(move |e| {
Box::new(Handler::new().debug_handler(debug.then(|| debug_hook(e))))
})) {
debug!("failed to install eyre error hook: {e}");
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/cli/src/utils/abi.rs | crates/cli/src/utils/abi.rs | use alloy_chains::Chain;
use alloy_ens::NameOrAddress;
use alloy_json_abi::Function;
use alloy_primitives::{Address, hex};
use alloy_provider::{Provider, network::AnyNetwork};
use eyre::{OptionExt, Result};
use foundry_common::abi::{
encode_function_args, encode_function_args_raw, get_func, get_func_etherscan,
};
use futures::future::join_all;
async fn resolve_name_args<P: Provider<AnyNetwork>>(args: &[String], provider: &P) -> Vec<String> {
join_all(args.iter().map(|arg| async {
if arg.contains('.') {
let addr = NameOrAddress::Name(arg.to_string()).resolve(provider).await;
match addr {
Ok(addr) => addr.to_string(),
Err(_) => arg.to_string(),
}
} else {
arg.to_string()
}
}))
.await
}
pub async fn parse_function_args<P: Provider<AnyNetwork>>(
sig: &str,
args: Vec<String>,
to: Option<Address>,
chain: Chain,
provider: &P,
etherscan_api_key: Option<&str>,
) -> Result<(Vec<u8>, Option<Function>)> {
if sig.trim().is_empty() {
eyre::bail!("Function signature or calldata must be provided.")
}
let args = resolve_name_args(&args, provider).await;
if let Ok(data) = hex::decode(sig) {
return Ok((data, None));
}
let func = if sig.contains('(') {
// a regular function signature with parentheses
get_func(sig)?
} else {
info!(
"function signature does not contain parentheses, fetching function data from Etherscan"
);
let etherscan_api_key = etherscan_api_key.ok_or_eyre(
"Function signature does not contain parentheses. If you wish to fetch function data from Etherscan, please provide an API key.",
)?;
let to = to.ok_or_eyre("A 'to' address must be provided to fetch function data.")?;
get_func_etherscan(sig, to, &args, chain, etherscan_api_key).await?
};
if to.is_none() {
// if this is a CREATE call we must exclude the (constructor) function selector: https://github.com/foundry-rs/foundry/issues/10947
Ok((encode_function_args_raw(&func, &args)?, Some(func)))
} else {
Ok((encode_function_args(&func, &args)?, Some(func)))
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/cli/src/utils/suggestions.rs | crates/cli/src/utils/suggestions.rs | //! Helper functions for suggesting alternative values for a possibly erroneous user input.
/// Filters multiple strings from a given list of possible values which are similar
/// to the passed in value `v` within a certain confidence by least confidence.
///
/// The jaro winkler similarity boosts candidates that have a common prefix, which is often the case
/// in the event of typos. Thus, in a list of possible values like ["foo", "bar"], the value "fop"
/// will yield `Some("foo")`, whereas "blark" would yield `None`.
pub fn did_you_mean<T, I>(v: &str, candidates: I) -> Vec<String>
where
T: AsRef<str>,
I: IntoIterator<Item = T>,
{
let mut candidates: Vec<(f64, String)> = candidates
.into_iter()
.map(|pv| (strsim::jaro_winkler(v, pv.as_ref()), pv.as_ref().to_owned()))
.filter(|(similarity, _)| *similarity > 0.8)
.collect();
candidates.sort_by(|a, b| a.0.total_cmp(&b.0));
candidates.into_iter().map(|(_, pv)| pv).collect()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn possible_artifacts_match() {
let candidates = ["MyContract", "Erc20"];
assert_eq!(
did_you_mean("MyCtrac", candidates.iter()).pop(),
Some("MyContract".to_string())
);
}
#[test]
fn possible_artifacts_nomatch() {
let candidates = ["MyContract", "Erc20", "Greeter"];
assert!(did_you_mean("Vault", candidates.iter()).pop().is_none());
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/cli/src/utils/allocator.rs | crates/cli/src/utils/allocator.rs | //! Abstract global allocator implementation.
#[cfg(feature = "mimalloc")]
use mimalloc as _;
#[cfg(all(feature = "jemalloc", unix))]
use tikv_jemallocator as _;
// If neither jemalloc nor mimalloc are enabled, use explicitly the system allocator.
// By default jemalloc is enabled on Unix systems.
cfg_if::cfg_if! {
if #[cfg(all(feature = "jemalloc", unix))] {
type AllocatorInner = tikv_jemallocator::Jemalloc;
} else if #[cfg(feature = "mimalloc")] {
type AllocatorInner = mimalloc::MiMalloc;
} else {
type AllocatorInner = std::alloc::System;
}
}
// Wrap the allocator if the `tracy-allocator` feature is enabled.
cfg_if::cfg_if! {
if #[cfg(feature = "tracy-allocator")] {
type AllocatorWrapper = tracing_tracy::client::ProfiledAllocator<AllocatorInner>;
const fn new_allocator_wrapper() -> AllocatorWrapper {
AllocatorWrapper::new(AllocatorInner {}, 100)
}
} else {
type AllocatorWrapper = AllocatorInner;
const fn new_allocator_wrapper() -> AllocatorWrapper {
AllocatorInner {}
}
}
}
pub type Allocator = AllocatorWrapper;
/// Creates a new [allocator][Allocator].
pub const fn new_allocator() -> Allocator {
new_allocator_wrapper()
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/cli/src/utils/mod.rs | crates/cli/src/utils/mod.rs | use alloy_json_abi::JsonAbi;
use alloy_primitives::{Address, U256, map::HashMap};
use alloy_provider::{Provider, network::AnyNetwork};
use eyre::{ContextCompat, Result};
use foundry_common::{
provider::{ProviderBuilder, RetryProvider},
shell,
};
use foundry_config::{Chain, Config};
use itertools::Itertools;
use path_slash::PathExt;
use regex::Regex;
use serde::de::DeserializeOwned;
use std::{
ffi::OsStr,
path::{Path, PathBuf},
process::{Command, Output, Stdio},
str::FromStr,
sync::LazyLock,
time::{Duration, SystemTime, UNIX_EPOCH},
};
use tracing_subscriber::prelude::*;
mod cmd;
pub use cmd::*;
mod suggestions;
pub use suggestions::*;
mod abi;
pub use abi::*;
mod allocator;
pub use allocator::*;
// reexport all `foundry_config::utils`
#[doc(hidden)]
pub use foundry_config::utils::*;
/// Deterministic fuzzer seed used for gas snapshots and coverage reports.
///
/// The keccak256 hash of "foundry rulez"
pub const STATIC_FUZZ_SEED: [u8; 32] = [
0x01, 0x00, 0xfa, 0x69, 0xa5, 0xf1, 0x71, 0x0a, 0x95, 0xcd, 0xef, 0x94, 0x88, 0x9b, 0x02, 0x84,
0x5d, 0x64, 0x0b, 0x19, 0xad, 0xf0, 0xe3, 0x57, 0xb8, 0xd4, 0xbe, 0x7d, 0x49, 0xee, 0x70, 0xe6,
];
/// Regex used to parse `.gitmodules` file and capture the submodule path and branch.
pub static SUBMODULE_BRANCH_REGEX: LazyLock<Regex> =
LazyLock::new(|| Regex::new(r#"\[submodule "([^"]+)"\](?:[^\[]*?branch = ([^\s]+))"#).unwrap());
/// Regex used to parse `git submodule status` output.
pub static SUBMODULE_STATUS_REGEX: LazyLock<Regex> =
LazyLock::new(|| Regex::new(r"^[\s+-]?([a-f0-9]+)\s+([^\s]+)(?:\s+\([^)]+\))?$").unwrap());
/// Useful extensions to [`std::path::Path`].
pub trait FoundryPathExt {
/// Returns true if the [`Path`] ends with `.t.sol`
fn is_sol_test(&self) -> bool;
/// Returns true if the [`Path`] has a `sol` extension
fn is_sol(&self) -> bool;
/// Returns true if the [`Path`] has a `yul` extension
fn is_yul(&self) -> bool;
}
impl<T: AsRef<Path>> FoundryPathExt for T {
fn is_sol_test(&self) -> bool {
self.as_ref()
.file_name()
.and_then(|s| s.to_str())
.map(|s| s.ends_with(".t.sol"))
.unwrap_or_default()
}
fn is_sol(&self) -> bool {
self.as_ref().extension() == Some(std::ffi::OsStr::new("sol"))
}
fn is_yul(&self) -> bool {
self.as_ref().extension() == Some(std::ffi::OsStr::new("yul"))
}
}
/// Initializes a tracing Subscriber for logging
pub fn subscriber() {
let registry = tracing_subscriber::Registry::default().with(env_filter());
#[cfg(feature = "tracy")]
let registry = registry.with(tracing_tracy::TracyLayer::default());
registry.with(tracing_subscriber::fmt::layer()).init()
}
fn env_filter() -> tracing_subscriber::EnvFilter {
const DEFAULT_DIRECTIVES: &[&str] = &include!("./default_directives.txt");
let mut filter = tracing_subscriber::EnvFilter::from_default_env();
for &directive in DEFAULT_DIRECTIVES {
filter = filter.add_directive(directive.parse().unwrap());
}
filter
}
/// Returns a [RetryProvider] instantiated using [Config]'s
/// RPC
pub fn get_provider(config: &Config) -> Result<RetryProvider> {
get_provider_builder(config)?.build()
}
/// Returns a [ProviderBuilder] instantiated using [Config] values.
///
/// Defaults to `http://localhost:8545` and `Mainnet`.
pub fn get_provider_builder(config: &Config) -> Result<ProviderBuilder> {
let url = config.get_rpc_url_or_localhost_http()?;
let mut builder = ProviderBuilder::new(url.as_ref());
builder = builder.accept_invalid_certs(config.eth_rpc_accept_invalid_certs);
if let Ok(chain) = config.chain.unwrap_or_default().try_into() {
builder = builder.chain(chain);
}
if let Some(jwt) = config.get_rpc_jwt_secret()? {
builder = builder.jwt(jwt.as_ref());
}
if let Some(rpc_timeout) = config.eth_rpc_timeout {
builder = builder.timeout(Duration::from_secs(rpc_timeout));
}
if let Some(rpc_headers) = config.eth_rpc_headers.clone() {
builder = builder.headers(rpc_headers);
}
Ok(builder)
}
pub async fn get_chain<P>(chain: Option<Chain>, provider: P) -> Result<Chain>
where
P: Provider<AnyNetwork>,
{
match chain {
Some(chain) => Ok(chain),
None => Ok(Chain::from_id(provider.get_chain_id().await?)),
}
}
/// Parses an ether value from a string.
///
/// The amount can be tagged with a unit, e.g. "1ether".
///
/// If the string represents an untagged amount (e.g. "100") then
/// it is interpreted as wei.
pub fn parse_ether_value(value: &str) -> Result<U256> {
Ok(if value.starts_with("0x") {
U256::from_str_radix(value, 16)?
} else {
alloy_dyn_abi::DynSolType::coerce_str(&alloy_dyn_abi::DynSolType::Uint(256), value)?
.as_uint()
.wrap_err("Could not parse ether value from string")?
.0
})
}
/// Parses a `T` from a string using [`serde_json::from_str`].
pub fn parse_json<T: DeserializeOwned>(value: &str) -> serde_json::Result<T> {
serde_json::from_str(value)
}
/// Parses a `Duration` from a &str
pub fn parse_delay(delay: &str) -> Result<Duration> {
let delay = if delay.ends_with("ms") {
let d: u64 = delay.trim_end_matches("ms").parse()?;
Duration::from_millis(d)
} else {
let d: f64 = delay.parse()?;
let delay = (d * 1000.0).round();
if delay.is_infinite() || delay.is_nan() || delay.is_sign_negative() {
eyre::bail!("delay must be finite and non-negative");
}
Duration::from_millis(delay as u64)
};
Ok(delay)
}
/// Returns the current time as a [`Duration`] since the Unix epoch.
pub fn now() -> Duration {
SystemTime::now().duration_since(UNIX_EPOCH).expect("time went backwards")
}
/// Common setup for all CLI tools. Does not include [tracing subscriber](subscriber).
pub fn common_setup() {
install_crypto_provider();
crate::handler::install();
load_dotenv();
enable_paint();
}
/// Loads a dotenv file, from the cwd and the project root, ignoring potential failure.
///
/// We could use `warn!` here, but that would imply that the dotenv file can't configure
/// the logging behavior of Foundry.
///
/// Similarly, we could just use `eprintln!`, but colors are off limits otherwise dotenv is implied
/// to not be able to configure the colors. It would also mess up the JSON output.
pub fn load_dotenv() {
let load = |p: &Path| {
dotenvy::from_path(p.join(".env")).ok();
};
// we only want the .env file of the cwd and project root
// `find_project_root` calls `current_dir` internally so both paths are either both `Ok` or
// both `Err`
if let (Ok(cwd), Ok(prj_root)) = (std::env::current_dir(), find_project_root(None)) {
load(&prj_root);
if cwd != prj_root {
// prj root and cwd can be identical
load(&cwd);
}
};
}
/// Sets the default [`yansi`] color output condition.
pub fn enable_paint() {
let enable = yansi::Condition::os_support() && yansi::Condition::tty_and_color_live();
yansi::whenever(yansi::Condition::cached(enable));
}
/// This force installs the default crypto provider.
///
/// This is necessary in case there are more than one available backends enabled in rustls (ring,
/// aws-lc-rs).
///
/// This should be called high in the main fn.
///
/// See also:
/// <https://github.com/snapview/tokio-tungstenite/issues/353#issuecomment-2455100010>
/// <https://github.com/awslabs/aws-sdk-rust/discussions/1257>
pub fn install_crypto_provider() {
// https://github.com/snapview/tokio-tungstenite/issues/353
rustls::crypto::ring::default_provider()
.install_default()
.expect("Failed to install default rustls crypto provider");
}
/// Fetches the ABI of a contract from Etherscan.
pub async fn fetch_abi_from_etherscan(
address: Address,
config: &foundry_config::Config,
) -> Result<Vec<(JsonAbi, String)>> {
let chain = config.chain.unwrap_or_default();
let api_key = config.get_etherscan_api_key(Some(chain)).unwrap_or_default();
let client = foundry_block_explorers::Client::new(chain, api_key)?;
let source = client.contract_source_code(address).await?;
source.items.into_iter().map(|item| Ok((item.abi()?, item.contract_name))).collect()
}
/// Useful extensions to [`std::process::Command`].
pub trait CommandUtils {
/// Returns the command's output if execution is successful, otherwise, throws an error.
fn exec(&mut self) -> Result<Output>;
/// Returns the command's stdout if execution is successful, otherwise, throws an error.
fn get_stdout_lossy(&mut self) -> Result<String>;
}
impl CommandUtils for Command {
#[track_caller]
fn exec(&mut self) -> Result<Output> {
trace!(command=?self, "executing");
let output = self.output()?;
trace!(code=?output.status.code(), ?output);
if output.status.success() {
Ok(output)
} else {
let stdout = String::from_utf8_lossy(&output.stdout);
let stdout = stdout.trim();
let stderr = String::from_utf8_lossy(&output.stderr);
let stderr = stderr.trim();
let msg = if stdout.is_empty() {
stderr.to_string()
} else if stderr.is_empty() {
stdout.to_string()
} else {
format!("stdout:\n{stdout}\n\nstderr:\n{stderr}")
};
let mut name = self.get_program().to_string_lossy();
if let Some(arg) = self.get_args().next() {
let arg = arg.to_string_lossy();
if !arg.starts_with('-') {
let name = name.to_mut();
name.push(' ');
name.push_str(&arg);
}
}
let mut err = match output.status.code() {
Some(code) => format!("{name} exited with code {code}"),
None => format!("{name} terminated by a signal"),
};
if !msg.is_empty() {
err.push(':');
err.push(if msg.lines().count() == 0 { ' ' } else { '\n' });
err.push_str(&msg);
}
Err(eyre::eyre!(err))
}
}
#[track_caller]
fn get_stdout_lossy(&mut self) -> Result<String> {
let output = self.exec()?;
let stdout = String::from_utf8_lossy(&output.stdout);
Ok(stdout.trim().into())
}
}
#[derive(Clone, Copy, Debug)]
pub struct Git<'a> {
pub root: &'a Path,
pub quiet: bool,
pub shallow: bool,
}
impl<'a> Git<'a> {
pub fn new(root: &'a Path) -> Self {
Self { root, quiet: shell::is_quiet(), shallow: false }
}
pub fn from_config(config: &'a Config) -> Self {
Self::new(config.root.as_path())
}
pub fn root_of(relative_to: &Path) -> Result<PathBuf> {
let output = Self::cmd_no_root()
.current_dir(relative_to)
.args(["rev-parse", "--show-toplevel"])
.get_stdout_lossy()?;
Ok(PathBuf::from(output))
}
pub fn clone_with_branch(
shallow: bool,
from: impl AsRef<OsStr>,
branch: impl AsRef<OsStr>,
to: Option<impl AsRef<OsStr>>,
) -> Result<()> {
Self::cmd_no_root()
.stderr(Stdio::inherit())
.args(["clone", "--recurse-submodules"])
.args(shallow.then_some("--depth=1"))
.args(shallow.then_some("--shallow-submodules"))
.arg("-b")
.arg(branch)
.arg(from)
.args(to)
.exec()
.map(drop)
}
pub fn clone(
shallow: bool,
from: impl AsRef<OsStr>,
to: Option<impl AsRef<OsStr>>,
) -> Result<()> {
Self::cmd_no_root()
.stderr(Stdio::inherit())
.args(["clone", "--recurse-submodules"])
.args(shallow.then_some("--depth=1"))
.args(shallow.then_some("--shallow-submodules"))
.arg(from)
.args(to)
.exec()
.map(drop)
}
pub fn fetch(
self,
shallow: bool,
remote: impl AsRef<OsStr>,
branch: Option<impl AsRef<OsStr>>,
) -> Result<()> {
self.cmd()
.stderr(Stdio::inherit())
.arg("fetch")
.args(shallow.then_some("--no-tags"))
.args(shallow.then_some("--depth=1"))
.arg(remote)
.args(branch)
.exec()
.map(drop)
}
pub fn root(self, root: &Path) -> Git<'_> {
Git { root, ..self }
}
pub fn quiet(self, quiet: bool) -> Self {
Self { quiet, ..self }
}
/// True to perform shallow clones
pub fn shallow(self, shallow: bool) -> Self {
Self { shallow, ..self }
}
pub fn checkout(self, recursive: bool, tag: impl AsRef<OsStr>) -> Result<()> {
self.cmd()
.arg("checkout")
.args(recursive.then_some("--recurse-submodules"))
.arg(tag)
.exec()
.map(drop)
}
/// Returns the current HEAD commit hash of the current branch.
pub fn head(self) -> Result<String> {
self.cmd().args(["rev-parse", "HEAD"]).get_stdout_lossy()
}
pub fn checkout_at(self, tag: impl AsRef<OsStr>, at: &Path) -> Result<()> {
self.cmd_at(at).arg("checkout").arg(tag).exec().map(drop)
}
pub fn init(self) -> Result<()> {
self.cmd().arg("init").exec().map(drop)
}
pub fn current_rev_branch(self, at: &Path) -> Result<(String, String)> {
let rev = self.cmd_at(at).args(["rev-parse", "HEAD"]).get_stdout_lossy()?;
let branch =
self.cmd_at(at).args(["rev-parse", "--abbrev-ref", "HEAD"]).get_stdout_lossy()?;
Ok((rev, branch))
}
#[expect(clippy::should_implement_trait)] // this is not std::ops::Add clippy
pub fn add<I, S>(self, paths: I) -> Result<()>
where
I: IntoIterator<Item = S>,
S: AsRef<OsStr>,
{
self.cmd().arg("add").args(paths).exec().map(drop)
}
pub fn reset(self, hard: bool, tree: impl AsRef<OsStr>) -> Result<()> {
self.cmd().arg("reset").args(hard.then_some("--hard")).arg(tree).exec().map(drop)
}
pub fn commit_tree(
self,
tree: impl AsRef<OsStr>,
msg: Option<impl AsRef<OsStr>>,
) -> Result<String> {
self.cmd()
.arg("commit-tree")
.arg(tree)
.args(msg.as_ref().is_some().then_some("-m"))
.args(msg)
.get_stdout_lossy()
}
pub fn rm<I, S>(self, force: bool, paths: I) -> Result<()>
where
I: IntoIterator<Item = S>,
S: AsRef<OsStr>,
{
self.cmd().arg("rm").args(force.then_some("--force")).args(paths).exec().map(drop)
}
pub fn commit(self, msg: &str) -> Result<()> {
let output = self
.cmd()
.args(["commit", "-m", msg])
.args(cfg!(any(test, debug_assertions)).then_some("--no-gpg-sign"))
.output()?;
if !output.status.success() {
let stdout = String::from_utf8_lossy(&output.stdout);
let stderr = String::from_utf8_lossy(&output.stderr);
// ignore "nothing to commit" error
let msg = "nothing to commit, working tree clean";
if !(stdout.contains(msg) || stderr.contains(msg)) {
return Err(eyre::eyre!(
"failed to commit (code={:?}, stdout={:?}, stderr={:?})",
output.status.code(),
stdout.trim(),
stderr.trim()
));
}
}
Ok(())
}
pub fn is_in_repo(self) -> std::io::Result<bool> {
self.cmd().args(["rev-parse", "--is-inside-work-tree"]).status().map(|s| s.success())
}
pub fn is_repo_root(self) -> Result<bool> {
self.cmd().args(["rev-parse", "--show-cdup"]).exec().map(|out| out.stdout.is_empty())
}
pub fn is_clean(self) -> Result<bool> {
self.cmd().args(["status", "--porcelain"]).exec().map(|out| out.stdout.is_empty())
}
pub fn has_branch(self, branch: impl AsRef<OsStr>, at: &Path) -> Result<bool> {
self.cmd_at(at)
.args(["branch", "--list", "--no-color"])
.arg(branch)
.get_stdout_lossy()
.map(|stdout| !stdout.is_empty())
}
pub fn has_tag(self, tag: impl AsRef<OsStr>, at: &Path) -> Result<bool> {
self.cmd_at(at)
.args(["tag", "--list"])
.arg(tag)
.get_stdout_lossy()
.map(|stdout| !stdout.is_empty())
}
pub fn has_rev(self, rev: impl AsRef<OsStr>, at: &Path) -> Result<bool> {
self.cmd_at(at)
.args(["cat-file", "-t"])
.arg(rev)
.get_stdout_lossy()
.map(|stdout| &stdout == "commit")
}
pub fn get_rev(self, tag_or_branch: impl AsRef<OsStr>, at: &Path) -> Result<String> {
self.cmd_at(at).args(["rev-list", "-n", "1"]).arg(tag_or_branch).get_stdout_lossy()
}
pub fn ensure_clean(self) -> Result<()> {
if self.is_clean()? {
Ok(())
} else {
Err(eyre::eyre!(
"\
The target directory is a part of or on its own an already initialized git repository,
and it requires clean working and staging areas, including no untracked files.
Check the current git repository's status with `git status`.
Then, you can track files with `git add ...` and then commit them with `git commit`,
ignore them in the `.gitignore` file."
))
}
}
pub fn commit_hash(self, short: bool, revision: &str) -> Result<String> {
self.cmd()
.arg("rev-parse")
.args(short.then_some("--short"))
.arg(revision)
.get_stdout_lossy()
}
pub fn tag(self) -> Result<String> {
self.cmd().arg("tag").get_stdout_lossy()
}
/// Returns the tag the commit first appeared in.
///
/// E.g Take rev = `abc1234`. This commit can be found in multiple releases (tags).
/// Consider releases: `v0.1.0`, `v0.2.0`, `v0.3.0` in chronological order, `rev` first appeared
/// in `v0.2.0`.
///
/// Hence, `tag_for_commit("abc1234")` will return `v0.2.0`.
pub fn tag_for_commit(self, rev: &str, at: &Path) -> Result<Option<String>> {
self.cmd_at(at)
.args(["tag", "--contains"])
.arg(rev)
.get_stdout_lossy()
.map(|stdout| stdout.lines().next().map(str::to_string))
}
/// Returns a list of tuples of submodule paths and their respective branches.
///
/// This function reads the `.gitmodules` file and returns the paths of all submodules that have
/// a branch. The paths are relative to the Git::root_of(git.root) and not lib/ directory.
///
/// `at` is the dir in which the `.gitmodules` file is located, this is the git root.
/// `lib` is name of the directory where the submodules are located.
pub fn read_submodules_with_branch(
self,
at: &Path,
lib: &OsStr,
) -> Result<HashMap<PathBuf, String>> {
// Read the .gitmodules file
let gitmodules = foundry_common::fs::read_to_string(at.join(".gitmodules"))?;
let paths = SUBMODULE_BRANCH_REGEX
.captures_iter(&gitmodules)
.map(|cap| {
let path_str = cap.get(1).unwrap().as_str();
let path = PathBuf::from_str(path_str).unwrap();
trace!(path = %path.display(), "unstripped path");
// Keep only the components that come after the lib directory.
// This needs to be done because the lockfile uses paths relative foundry project
// root whereas .gitmodules use paths relative to the git root which may not be the
// project root. e.g monorepo.
// Hence, if path is lib/solady, then `lib/solady` is kept. if path is
// packages/contract-bedrock/lib/solady, then `lib/solady` is kept.
let lib_pos = path.components().find_position(|c| c.as_os_str() == lib);
let path = path
.components()
.skip(lib_pos.map(|(i, _)| i).unwrap_or(0))
.collect::<PathBuf>();
let branch = cap.get(2).unwrap().as_str().to_string();
(path, branch)
})
.collect::<HashMap<_, _>>();
Ok(paths)
}
pub fn has_missing_dependencies<I, S>(self, paths: I) -> Result<bool>
where
I: IntoIterator<Item = S>,
S: AsRef<OsStr>,
{
self.cmd()
.args(["submodule", "status"])
.args(paths)
.get_stdout_lossy()
.map(|stdout| stdout.lines().any(|line| line.starts_with('-')))
}
/// Returns true if the given path has submodules by checking `git submodule status`
pub fn has_submodules<I, S>(self, paths: I) -> Result<bool>
where
I: IntoIterator<Item = S>,
S: AsRef<OsStr>,
{
self.cmd()
.args(["submodule", "status"])
.args(paths)
.get_stdout_lossy()
.map(|stdout| stdout.trim().lines().next().is_some())
}
pub fn submodule_add(
self,
force: bool,
url: impl AsRef<OsStr>,
path: impl AsRef<OsStr>,
) -> Result<()> {
self.cmd()
.stderr(self.stderr())
.args(["submodule", "add"])
.args(self.shallow.then_some("--depth=1"))
.args(force.then_some("--force"))
.arg(url)
.arg(path)
.exec()
.map(drop)
}
pub fn submodule_update<I, S>(
self,
force: bool,
remote: bool,
no_fetch: bool,
recursive: bool,
paths: I,
) -> Result<()>
where
I: IntoIterator<Item = S>,
S: AsRef<OsStr>,
{
self.cmd()
.stderr(self.stderr())
.args(["submodule", "update", "--progress", "--init"])
.args(self.shallow.then_some("--depth=1"))
.args(force.then_some("--force"))
.args(remote.then_some("--remote"))
.args(no_fetch.then_some("--no-fetch"))
.args(recursive.then_some("--recursive"))
.args(paths)
.exec()
.map(drop)
}
pub fn submodule_foreach(self, recursive: bool, cmd: impl AsRef<OsStr>) -> Result<()> {
self.cmd()
.stderr(self.stderr())
.args(["submodule", "foreach"])
.args(recursive.then_some("--recursive"))
.arg(cmd)
.exec()
.map(drop)
}
/// If the status is prefix with `-`, the submodule is not initialized.
///
/// Ref: <https://git-scm.com/docs/git-submodule#Documentation/git-submodule.txt-status--cached--recursive--ltpathgt82308203>
pub fn submodules_uninitialized(self) -> Result<bool> {
self.cmd()
.args(["submodule", "status"])
.get_stdout_lossy()
.map(|stdout| stdout.lines().any(|line| line.starts_with('-')))
}
/// Initializes the git submodules.
pub fn submodule_init(self) -> Result<()> {
self.cmd().stderr(self.stderr()).args(["submodule", "init"]).exec().map(drop)
}
pub fn submodules(&self) -> Result<Submodules> {
self.cmd().args(["submodule", "status"]).get_stdout_lossy().map(|stdout| stdout.parse())?
}
pub fn submodule_sync(self) -> Result<()> {
self.cmd().stderr(self.stderr()).args(["submodule", "sync"]).exec().map(drop)
}
/// Get the URL of a submodule from git config
pub fn submodule_url(self, path: &Path) -> Result<Option<String>> {
self.cmd()
.args(["config", "--get", &format!("submodule.{}.url", path.to_slash_lossy())])
.get_stdout_lossy()
.map(|url| Some(url.trim().to_string()))
}
pub fn cmd(self) -> Command {
let mut cmd = Self::cmd_no_root();
cmd.current_dir(self.root);
cmd
}
pub fn cmd_at(self, path: &Path) -> Command {
let mut cmd = Self::cmd_no_root();
cmd.current_dir(path);
cmd
}
pub fn cmd_no_root() -> Command {
let mut cmd = Command::new("git");
cmd.stdout(Stdio::piped()).stderr(Stdio::piped());
cmd
}
// don't set this in cmd() because it's not wanted for all commands
fn stderr(self) -> Stdio {
if self.quiet { Stdio::piped() } else { Stdio::inherit() }
}
}
/// Deserialized `git submodule status lib/dep` output.
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize, PartialEq, Eq)]
pub struct Submodule {
/// Current commit hash the submodule is checked out at.
rev: String,
/// Relative path to the submodule.
path: PathBuf,
}
impl Submodule {
pub fn new(rev: String, path: PathBuf) -> Self {
Self { rev, path }
}
pub fn rev(&self) -> &str {
&self.rev
}
pub fn path(&self) -> &PathBuf {
&self.path
}
}
impl FromStr for Submodule {
type Err = eyre::Report;
fn from_str(s: &str) -> Result<Self> {
let caps = SUBMODULE_STATUS_REGEX
.captures(s)
.ok_or_else(|| eyre::eyre!("Invalid submodule status format"))?;
Ok(Self {
rev: caps.get(1).unwrap().as_str().to_string(),
path: PathBuf::from(caps.get(2).unwrap().as_str()),
})
}
}
/// Deserialized `git submodule status` output.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Submodules(pub Vec<Submodule>);
impl Submodules {
pub fn len(&self) -> usize {
self.0.len()
}
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
}
impl FromStr for Submodules {
type Err = eyre::Report;
fn from_str(s: &str) -> Result<Self> {
let subs = s.lines().map(str::parse).collect::<Result<Vec<Submodule>>>()?;
Ok(Self(subs))
}
}
impl<'a> IntoIterator for &'a Submodules {
type Item = &'a Submodule;
type IntoIter = std::slice::Iter<'a, Submodule>;
fn into_iter(self) -> Self::IntoIter {
self.0.iter()
}
}
#[cfg(test)]
mod tests {
use super::*;
use foundry_common::fs;
use std::{env, fs::File, io::Write};
use tempfile::tempdir;
#[test]
fn parse_submodule_status() {
let s = "+8829465a08cac423dcf59852f21e448449c1a1a8 lib/openzeppelin-contracts (v4.8.0-791-g8829465a)";
let sub = Submodule::from_str(s).unwrap();
assert_eq!(sub.rev(), "8829465a08cac423dcf59852f21e448449c1a1a8");
assert_eq!(sub.path(), Path::new("lib/openzeppelin-contracts"));
let s = "-8829465a08cac423dcf59852f21e448449c1a1a8 lib/openzeppelin-contracts";
let sub = Submodule::from_str(s).unwrap();
assert_eq!(sub.rev(), "8829465a08cac423dcf59852f21e448449c1a1a8");
assert_eq!(sub.path(), Path::new("lib/openzeppelin-contracts"));
let s = "8829465a08cac423dcf59852f21e448449c1a1a8 lib/openzeppelin-contracts";
let sub = Submodule::from_str(s).unwrap();
assert_eq!(sub.rev(), "8829465a08cac423dcf59852f21e448449c1a1a8");
assert_eq!(sub.path(), Path::new("lib/openzeppelin-contracts"));
}
#[test]
fn parse_multiline_submodule_status() {
let s = r#"+d3db4ef90a72b7d24aa5a2e5c649593eaef7801d lib/forge-std (v1.9.4-6-gd3db4ef)
+8829465a08cac423dcf59852f21e448449c1a1a8 lib/openzeppelin-contracts (v4.8.0-791-g8829465a)
"#;
let subs = Submodules::from_str(s).unwrap().0;
assert_eq!(subs.len(), 2);
assert_eq!(subs[0].rev(), "d3db4ef90a72b7d24aa5a2e5c649593eaef7801d");
assert_eq!(subs[0].path(), Path::new("lib/forge-std"));
assert_eq!(subs[1].rev(), "8829465a08cac423dcf59852f21e448449c1a1a8");
assert_eq!(subs[1].path(), Path::new("lib/openzeppelin-contracts"));
}
#[test]
fn foundry_path_ext_works() {
let p = Path::new("contracts/MyTest.t.sol");
assert!(p.is_sol_test());
assert!(p.is_sol());
let p = Path::new("contracts/Greeter.sol");
assert!(!p.is_sol_test());
}
// loads .env from cwd and project dir, See [`find_project_root()`]
#[test]
fn can_load_dotenv() {
let temp = tempdir().unwrap();
Git::new(temp.path()).init().unwrap();
let cwd_env = temp.path().join(".env");
fs::create_file(temp.path().join("foundry.toml")).unwrap();
let nested = temp.path().join("nested");
fs::create_dir(&nested).unwrap();
let mut cwd_file = File::create(cwd_env).unwrap();
let mut prj_file = File::create(nested.join(".env")).unwrap();
cwd_file.write_all("TESTCWDKEY=cwd_val".as_bytes()).unwrap();
cwd_file.sync_all().unwrap();
prj_file.write_all("TESTPRJKEY=prj_val".as_bytes()).unwrap();
prj_file.sync_all().unwrap();
let cwd = env::current_dir().unwrap();
env::set_current_dir(nested).unwrap();
load_dotenv();
env::set_current_dir(cwd).unwrap();
assert_eq!(env::var("TESTCWDKEY").unwrap(), "cwd_val");
assert_eq!(env::var("TESTPRJKEY").unwrap(), "prj_val");
}
#[test]
fn test_read_gitmodules_regex() {
let gitmodules = r#"
[submodule "lib/solady"]
path = lib/solady
url = ""
branch = v0.1.0
[submodule "lib/openzeppelin-contracts"]
path = lib/openzeppelin-contracts
url = ""
branch = v4.8.0-791-g8829465a
[submodule "lib/forge-std"]
path = lib/forge-std
url = ""
"#;
let paths = SUBMODULE_BRANCH_REGEX
.captures_iter(gitmodules)
.map(|cap| {
(
PathBuf::from_str(cap.get(1).unwrap().as_str()).unwrap(),
String::from(cap.get(2).unwrap().as_str()),
)
})
.collect::<HashMap<_, _>>();
assert_eq!(paths.get(Path::new("lib/solady")).unwrap(), "v0.1.0");
assert_eq!(
paths.get(Path::new("lib/openzeppelin-contracts")).unwrap(),
"v4.8.0-791-g8829465a"
);
let no_branch_gitmodules = r#"
[submodule "lib/solady"]
path = lib/solady
url = ""
[submodule "lib/openzeppelin-contracts"]
path = lib/openzeppelin-contracts
url = ""
[submodule "lib/forge-std"]
path = lib/forge-std
url = ""
"#;
let paths = SUBMODULE_BRANCH_REGEX
.captures_iter(no_branch_gitmodules)
.map(|cap| {
(
PathBuf::from_str(cap.get(1).unwrap().as_str()).unwrap(),
String::from(cap.get(2).unwrap().as_str()),
)
})
.collect::<HashMap<_, _>>();
assert!(paths.is_empty());
let branch_in_between = r#"
[submodule "lib/solady"]
path = lib/solady
url = ""
[submodule "lib/openzeppelin-contracts"]
path = lib/openzeppelin-contracts
url = ""
branch = v4.8.0-791-g8829465a
[submodule "lib/forge-std"]
path = lib/forge-std
url = ""
"#;
let paths = SUBMODULE_BRANCH_REGEX
.captures_iter(branch_in_between)
.map(|cap| {
(
PathBuf::from_str(cap.get(1).unwrap().as_str()).unwrap(),
String::from(cap.get(2).unwrap().as_str()),
)
})
.collect::<HashMap<_, _>>();
assert_eq!(paths.len(), 1);
assert_eq!(
paths.get(Path::new("lib/openzeppelin-contracts")).unwrap(),
"v4.8.0-791-g8829465a"
);
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/cli/src/utils/cmd.rs | crates/cli/src/utils/cmd.rs | use alloy_json_abi::JsonAbi;
use eyre::{Result, WrapErr};
use foundry_common::{TestFunctionExt, fs, fs::json_files, selectors::SelectorKind, shell};
use foundry_compilers::{
Artifact, ArtifactId, ProjectCompileOutput,
artifacts::{CompactBytecode, Settings},
cache::{CacheEntry, CompilerCache},
utils::read_json_file,
};
use foundry_config::{Chain, Config, NamedChain, error::ExtractConfigError, figment::Figment};
use foundry_evm::{
executors::{DeployResult, EvmError, RawCallResult},
opts::EvmOpts,
traces::{
CallTraceDecoder, TraceKind, Traces, decode_trace_arena, identifier::SignaturesCache,
prune_trace_depth, render_trace_arena_inner,
},
};
use std::{
fmt::Write,
path::{Path, PathBuf},
};
use yansi::Paint;
/// Given a `Project`'s output, finds the contract by path and name and returns its
/// ABI, creation bytecode, and `ArtifactId`.
#[track_caller]
pub fn find_contract_artifacts(
output: ProjectCompileOutput,
path: &Path,
name: &str,
) -> Result<(JsonAbi, CompactBytecode, ArtifactId)> {
let mut other = Vec::new();
let Some((id, contract)) = output.into_artifacts().find_map(|(id, artifact)| {
if id.name == name && id.source == path {
Some((id, artifact))
} else {
other.push(id.name);
None
}
}) else {
let mut err = format!("could not find artifact: `{name}`");
if let Some(suggestion) = super::did_you_mean(name, other).pop()
&& suggestion != name
{
err = format!(
r#"{err}
Did you mean `{suggestion}`?"#
);
}
eyre::bail!(err)
};
let abi = contract
.get_abi()
.ok_or_else(|| eyre::eyre!("contract {} does not contain abi", name))?
.into_owned();
let bin = contract
.get_bytecode()
.ok_or_else(|| eyre::eyre!("contract {} does not contain bytecode", name))?
.into_owned();
Ok((abi, bin, id))
}
/// Helper function for finding a contract by ContractName
// TODO: Is there a better / more ergonomic way to get the artifacts given a project and a
// contract name?
pub fn get_cached_entry_by_name(
cache: &CompilerCache<Settings>,
name: &str,
) -> Result<(PathBuf, CacheEntry)> {
let mut cached_entry = None;
let mut alternatives = Vec::new();
for (abs_path, entry) in &cache.files {
for artifact_name in entry.artifacts.keys() {
if artifact_name == name {
if cached_entry.is_some() {
eyre::bail!(
"contract with duplicate name `{}`. please pass the path instead",
name
)
}
cached_entry = Some((abs_path.to_owned(), entry.to_owned()));
} else {
alternatives.push(artifact_name);
}
}
}
if let Some(entry) = cached_entry {
return Ok(entry);
}
let mut err = format!("could not find artifact: `{name}`");
if let Some(suggestion) = super::did_you_mean(name, &alternatives).pop() {
err = format!(
r#"{err}
Did you mean `{suggestion}`?"#
);
}
eyre::bail!(err)
}
/// Returns error if constructor has arguments.
pub fn ensure_clean_constructor(abi: &JsonAbi) -> Result<()> {
if let Some(constructor) = &abi.constructor
&& !constructor.inputs.is_empty()
{
eyre::bail!(
"Contract constructor should have no arguments. Add those arguments to `run(...)` instead, and call it with `--sig run(...)`."
);
}
Ok(())
}
pub fn needs_setup(abi: &JsonAbi) -> bool {
let setup_fns: Vec<_> = abi.functions().filter(|func| func.name.is_setup()).collect();
for setup_fn in &setup_fns {
if setup_fn.name != "setUp" {
let _ = sh_warn!(
"Found invalid setup function \"{}\" did you mean \"setUp()\"?",
setup_fn.signature()
);
}
}
setup_fns.len() == 1 && setup_fns[0].name == "setUp"
}
pub fn eta_key(state: &indicatif::ProgressState, f: &mut dyn Write) {
write!(f, "{:.1}s", state.eta().as_secs_f64()).unwrap()
}
pub fn init_progress(len: u64, label: &str) -> indicatif::ProgressBar {
let pb = indicatif::ProgressBar::new(len);
let mut template =
"{prefix}{spinner:.green} [{elapsed_precise}] [{wide_bar:.cyan/blue}] {pos}/{len} "
.to_string();
write!(template, "{label}").unwrap();
template += " ({eta})";
pb.set_style(
indicatif::ProgressStyle::with_template(&template)
.unwrap()
.with_key("eta", crate::utils::eta_key)
.progress_chars("#>-"),
);
pb
}
/// True if the network calculates gas costs differently.
pub fn has_different_gas_calc(chain_id: u64) -> bool {
if let Some(chain) = Chain::from(chain_id).named() {
return chain.is_arbitrum()
|| chain.is_elastic()
|| matches!(
chain,
NamedChain::Acala
| NamedChain::AcalaMandalaTestnet
| NamedChain::AcalaTestnet
| NamedChain::Etherlink
| NamedChain::EtherlinkTestnet
| NamedChain::Karura
| NamedChain::KaruraTestnet
| NamedChain::Mantle
| NamedChain::MantleSepolia
| NamedChain::Monad
| NamedChain::MonadTestnet
| NamedChain::Moonbase
| NamedChain::Moonbeam
| NamedChain::MoonbeamDev
| NamedChain::Moonriver
| NamedChain::Metis
);
}
false
}
/// True if it supports broadcasting in batches.
pub fn has_batch_support(chain_id: u64) -> bool {
if let Some(chain) = Chain::from(chain_id).named() {
return !chain.is_arbitrum();
}
true
}
/// Helpers for loading configuration.
///
/// This is usually implemented through the macros defined in [`foundry_config`]. See
/// [`foundry_config::impl_figment_convert`] for more details.
///
/// By default each function will emit warnings generated during loading, unless the `_no_warnings`
/// variant is used.
pub trait LoadConfig {
/// Load the [`Config`] based on the options provided in self.
fn figment(&self) -> Figment;
/// Load and sanitize the [`Config`] based on the options provided in self.
fn load_config(&self) -> Result<Config, ExtractConfigError> {
self.load_config_no_warnings().inspect(emit_warnings)
}
/// Same as [`LoadConfig::load_config`] but does not emit warnings.
fn load_config_no_warnings(&self) -> Result<Config, ExtractConfigError> {
self.load_config_unsanitized_no_warnings().map(Config::sanitized)
}
/// Load [`Config`] but do not sanitize. See [`Config::sanitized`] for more information.
fn load_config_unsanitized(&self) -> Result<Config, ExtractConfigError> {
self.load_config_unsanitized_no_warnings().inspect(emit_warnings)
}
/// Same as [`LoadConfig::load_config_unsanitized`] but also emits warnings generated
fn load_config_unsanitized_no_warnings(&self) -> Result<Config, ExtractConfigError> {
Config::from_provider(self.figment())
}
/// Load and sanitize the [`Config`], as well as extract [`EvmOpts`] from self
fn load_config_and_evm_opts(&self) -> Result<(Config, EvmOpts)> {
self.load_config_and_evm_opts_no_warnings().inspect(|(config, _)| emit_warnings(config))
}
/// Same as [`LoadConfig::load_config_and_evm_opts`] but also emits warnings generated
fn load_config_and_evm_opts_no_warnings(&self) -> Result<(Config, EvmOpts)> {
let figment = self.figment();
let mut evm_opts = figment.extract::<EvmOpts>().map_err(ExtractConfigError::new)?;
let config = Config::from_provider(figment)?.sanitized();
// update the fork url if it was an alias
if let Some(fork_url) = config.get_rpc_url() {
trace!(target: "forge::config", ?fork_url, "Update EvmOpts fork url");
evm_opts.fork_url = Some(fork_url?.into_owned());
}
Ok((config, evm_opts))
}
}
impl<T> LoadConfig for T
where
for<'a> Figment: From<&'a T>,
{
fn figment(&self) -> Figment {
self.into()
}
}
fn emit_warnings(config: &Config) {
for warning in &config.warnings {
let _ = sh_warn!("{warning}");
}
}
/// Read contract constructor arguments from the given file.
pub fn read_constructor_args_file(constructor_args_path: PathBuf) -> Result<Vec<String>> {
if !constructor_args_path.exists() {
eyre::bail!("Constructor args file \"{}\" not found", constructor_args_path.display());
}
let args = if constructor_args_path.extension() == Some(std::ffi::OsStr::new("json")) {
read_json_file(&constructor_args_path).wrap_err(format!(
"Constructor args file \"{}\" must encode a json array",
constructor_args_path.display(),
))?
} else {
fs::read_to_string(constructor_args_path)?.split_whitespace().map(str::to_string).collect()
};
Ok(args)
}
/// A slimmed down return from the executor used for returning minimal trace + gas metering info
#[derive(Debug)]
pub struct TraceResult {
pub success: bool,
pub traces: Option<Traces>,
pub gas_used: u64,
}
impl TraceResult {
/// Create a new [`TraceResult`] from a [`RawCallResult`].
pub fn from_raw(raw: RawCallResult, trace_kind: TraceKind) -> Self {
let RawCallResult { gas_used, traces, reverted, .. } = raw;
Self { success: !reverted, traces: traces.map(|arena| vec![(trace_kind, arena)]), gas_used }
}
}
impl From<DeployResult> for TraceResult {
fn from(result: DeployResult) -> Self {
Self::from_raw(result.raw, TraceKind::Deployment)
}
}
impl TryFrom<Result<DeployResult, EvmError>> for TraceResult {
type Error = EvmError;
fn try_from(value: Result<DeployResult, EvmError>) -> Result<Self, Self::Error> {
match value {
Ok(result) => Ok(Self::from(result)),
Err(EvmError::Execution(err)) => Ok(Self::from_raw(err.raw, TraceKind::Deployment)),
Err(err) => Err(err),
}
}
}
impl From<RawCallResult> for TraceResult {
fn from(result: RawCallResult) -> Self {
Self::from_raw(result, TraceKind::Execution)
}
}
impl TryFrom<Result<RawCallResult>> for TraceResult {
type Error = EvmError;
fn try_from(value: Result<RawCallResult>) -> Result<Self, Self::Error> {
match value {
Ok(result) => Ok(Self::from(result)),
Err(err) => Err(EvmError::from(err)),
}
}
}
pub async fn print_traces(
result: &mut TraceResult,
decoder: &CallTraceDecoder,
verbose: bool,
state_changes: bool,
trace_depth: Option<usize>,
) -> Result<()> {
let traces = result.traces.as_mut().expect("No traces found");
if !shell::is_json() {
sh_println!("Traces:")?;
}
for (_, arena) in traces {
decode_trace_arena(arena, decoder).await;
if let Some(trace_depth) = trace_depth {
prune_trace_depth(arena, trace_depth);
}
sh_println!("{}", render_trace_arena_inner(arena, verbose, state_changes))?;
}
if shell::is_json() {
return Ok(());
}
sh_println!()?;
if result.success {
sh_println!("{}", "Transaction successfully executed.".green())?;
} else {
sh_err!("Transaction failed.")?;
}
sh_println!("Gas used: {}", result.gas_used)?;
Ok(())
}
/// Traverse the artifacts in the project to generate local signatures and merge them into the cache
/// file.
pub fn cache_local_signatures(output: &ProjectCompileOutput) -> Result<()> {
let Some(cache_dir) = Config::foundry_cache_dir() else {
eyre::bail!("Failed to get `cache_dir` to generate local signatures.");
};
let path = cache_dir.join("signatures");
let mut signatures = SignaturesCache::load(&path);
for (_, artifact) in output.artifacts() {
if let Some(abi) = &artifact.abi {
signatures.extend_from_abi(abi);
}
// External libraries don't have functions included in the ABI, but `methodIdentifiers`.
if let Some(method_identifiers) = &artifact.method_identifiers {
signatures.extend(method_identifiers.iter().filter_map(|(signature, selector)| {
Some((SelectorKind::Function(selector.parse().ok()?), signature.clone()))
}));
}
}
signatures.save(&path);
Ok(())
}
/// Traverses all files at `folder_path`, parses any JSON ABI files found,
/// and caches their function/event/error signatures to the local signatures cache.
pub fn cache_signatures_from_abis(folder_path: impl AsRef<Path>) -> Result<()> {
let Some(cache_dir) = Config::foundry_cache_dir() else {
eyre::bail!("Failed to get `cache_dir` to generate local signatures.");
};
let path = cache_dir.join("signatures");
let mut signatures = SignaturesCache::load(&path);
json_files(folder_path.as_ref())
.filter_map(|path| std::fs::read_to_string(&path).ok())
.filter_map(|content| serde_json::from_str::<JsonAbi>(&content).ok())
.for_each(|json_abi| signatures.extend_from_abi(&json_abi));
signatures.save(&path);
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use std::fs;
use tempfile::tempdir;
#[test]
fn test_cache_signatures_from_abis() {
let temp_dir = tempdir().unwrap();
let abi_json = r#"[
{
"type": "function",
"name": "myCustomFunction",
"inputs": [{"name": "amount", "type": "uint256"}],
"outputs": [],
"stateMutability": "nonpayable"
},
{
"type": "event",
"name": "MyCustomEvent",
"inputs": [{"name": "value", "type": "uint256", "indexed": false}],
"anonymous": false
},
{
"type": "error",
"name": "MyCustomError",
"inputs": [{"name": "code", "type": "uint256"}]
}
]"#;
let abi_path = temp_dir.path().join("test.json");
fs::write(&abi_path, abi_json).unwrap();
cache_signatures_from_abis(temp_dir.path()).unwrap();
let cache_dir = Config::foundry_cache_dir().unwrap();
let cache_path = cache_dir.join("signatures");
let cache = SignaturesCache::load(&cache_path);
let func_selector: alloy_primitives::Selector = "0x2e2dbaf7".parse().unwrap();
assert!(cache.contains_key(&SelectorKind::Function(func_selector)));
let event_selector: alloy_primitives::B256 =
"0x8cc20c47f3a2463817352f75dec0dbf43a7a771b5f6817a92bd5724c1f4aa745".parse().unwrap();
assert!(cache.contains_key(&SelectorKind::Event(event_selector)));
let error_selector: alloy_primitives::Selector = "0xd35f45de".parse().unwrap();
assert!(cache.contains_key(&SelectorKind::Error(error_selector)));
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/cli/src/opts/evm.rs | crates/cli/src/opts/evm.rs | //! CLI arguments for configuring the EVM settings.
use alloy_primitives::{Address, B256, U256};
use clap::Parser;
use eyre::ContextCompat;
use foundry_config::{
Chain, Config,
figment::{
self, Metadata, Profile, Provider,
error::Kind::InvalidType,
value::{Dict, Map, Value},
},
};
use serde::Serialize;
use foundry_common::shell;
/// `EvmArgs` and `EnvArgs` take the highest precedence in the Config/Figment hierarchy.
///
/// All vars are opt-in, their default values are expected to be set by the
/// [`foundry_config::Config`], and are always present ([`foundry_config::Config::default`])
///
/// Both have corresponding types in the `evm_adapters` crate which have mandatory fields.
/// The expected workflow is
/// 1. load the [`foundry_config::Config`]
/// 2. merge with `EvmArgs` into a `figment::Figment`
/// 3. extract `evm_adapters::Opts` from the merged `Figment`
///
/// # Example
///
/// ```ignore
/// use foundry_config::Config;
/// use forge::executor::opts::EvmOpts;
/// use foundry_cli::opts::EvmArgs;
/// # fn t(args: EvmArgs) {
/// let figment = Config::figment_with_root(".").merge(args);
/// let opts = figment.extract::<EvmOpts>().unwrap();
/// # }
/// ```
#[derive(Clone, Debug, Default, Serialize, Parser)]
#[command(next_help_heading = "EVM options", about = None, long_about = None)] // override doc
pub struct EvmArgs {
/// Fetch state over a remote endpoint instead of starting from an empty state.
///
/// If you want to fetch state from a specific block number, see --fork-block-number.
#[arg(long, short, visible_alias = "rpc-url", value_name = "URL")]
#[serde(rename = "eth_rpc_url", skip_serializing_if = "Option::is_none")]
pub fork_url: Option<String>,
/// Fetch state from a specific block number over a remote endpoint.
///
/// See --fork-url.
#[arg(long, requires = "fork_url", value_name = "BLOCK")]
#[serde(skip_serializing_if = "Option::is_none")]
pub fork_block_number: Option<u64>,
/// Number of retries.
///
/// See --fork-url.
#[arg(long, requires = "fork_url", value_name = "RETRIES")]
#[serde(skip_serializing_if = "Option::is_none")]
pub fork_retries: Option<u32>,
/// Initial retry backoff on encountering errors.
///
/// See --fork-url.
#[arg(long, requires = "fork_url", value_name = "BACKOFF")]
#[serde(skip_serializing_if = "Option::is_none")]
pub fork_retry_backoff: Option<u64>,
/// Explicitly disables the use of RPC caching.
///
/// All storage slots are read entirely from the endpoint.
///
/// This flag overrides the project's configuration file.
///
/// See --fork-url.
#[arg(long)]
#[serde(skip)]
pub no_storage_caching: bool,
/// The initial balance of deployed test contracts.
#[arg(long, value_name = "BALANCE")]
#[serde(skip_serializing_if = "Option::is_none")]
pub initial_balance: Option<U256>,
/// The address which will be executing tests/scripts.
#[arg(long, value_name = "ADDRESS")]
#[serde(skip_serializing_if = "Option::is_none")]
pub sender: Option<Address>,
/// Enable the FFI cheatcode.
#[arg(long)]
#[serde(skip)]
pub ffi: bool,
/// Use the create 2 factory in all cases including tests and non-broadcasting scripts.
#[arg(long)]
#[serde(skip)]
pub always_use_create_2_factory: bool,
/// The CREATE2 deployer address to use, this will override the one in the config.
#[arg(long, value_name = "ADDRESS")]
#[serde(skip_serializing_if = "Option::is_none")]
pub create2_deployer: Option<Address>,
/// Sets the number of assumed available compute units per second for this provider
///
/// default value: 330
///
/// See also --fork-url and <https://docs.alchemy.com/reference/compute-units#what-are-cups-compute-units-per-second>
#[arg(long, alias = "cups", value_name = "CUPS", help_heading = "Fork config")]
#[serde(skip_serializing_if = "Option::is_none")]
pub compute_units_per_second: Option<u64>,
/// Disables rate limiting for this node's provider.
///
/// See also --fork-url and <https://docs.alchemy.com/reference/compute-units#what-are-cups-compute-units-per-second>
#[arg(
long,
value_name = "NO_RATE_LIMITS",
help_heading = "Fork config",
visible_alias = "no-rate-limit"
)]
#[serde(skip)]
pub no_rpc_rate_limit: bool,
/// All ethereum environment related arguments
#[command(flatten)]
#[serde(flatten)]
pub env: EnvArgs,
/// Whether to enable isolation of calls.
/// In isolation mode all top-level calls are executed as a separate transaction in a separate
/// EVM context, enabling more precise gas accounting and transaction state changes.
#[arg(long)]
#[serde(skip)]
pub isolate: bool,
}
// Make this set of options a `figment::Provider` so that it can be merged into the `Config`
impl Provider for EvmArgs {
fn metadata(&self) -> Metadata {
Metadata::named("Evm Opts Provider")
}
fn data(&self) -> Result<Map<Profile, Dict>, figment::Error> {
let value = Value::serialize(self)?;
let error = InvalidType(value.to_actual(), "map".into());
let mut dict = value.into_dict().ok_or(error)?;
if shell::verbosity() > 0 {
// need to merge that manually otherwise `from_occurrences` does not work
dict.insert("verbosity".to_string(), shell::verbosity().into());
}
if self.ffi {
dict.insert("ffi".to_string(), self.ffi.into());
}
if self.isolate {
dict.insert("isolate".to_string(), self.isolate.into());
}
if self.always_use_create_2_factory {
dict.insert(
"always_use_create_2_factory".to_string(),
self.always_use_create_2_factory.into(),
);
}
if self.no_storage_caching {
dict.insert("no_storage_caching".to_string(), self.no_storage_caching.into());
}
if self.no_rpc_rate_limit {
dict.insert("no_rpc_rate_limit".to_string(), self.no_rpc_rate_limit.into());
}
Ok(Map::from([(Config::selected_profile(), dict)]))
}
}
/// Configures the executor environment during tests.
#[derive(Clone, Debug, Default, Serialize, Parser)]
#[command(next_help_heading = "Executor environment config")]
pub struct EnvArgs {
/// EIP-170: Contract code size limit in bytes. Useful to increase this because of tests. By
/// default, it is 0x6000 (~25kb).
#[arg(long, value_name = "CODE_SIZE")]
#[serde(skip_serializing_if = "Option::is_none")]
pub code_size_limit: Option<usize>,
/// The chain name or EIP-155 chain ID.
#[arg(long, visible_alias = "chain-id", value_name = "CHAIN")]
#[serde(rename = "chain_id", skip_serializing_if = "Option::is_none", serialize_with = "id")]
pub chain: Option<Chain>,
/// The gas price.
#[arg(long, value_name = "GAS_PRICE")]
#[serde(skip_serializing_if = "Option::is_none")]
pub gas_price: Option<u64>,
/// The base fee in a block.
#[arg(long, visible_alias = "base-fee", value_name = "FEE")]
#[serde(skip_serializing_if = "Option::is_none")]
pub block_base_fee_per_gas: Option<u64>,
/// The transaction origin.
#[arg(long, value_name = "ADDRESS")]
#[serde(skip_serializing_if = "Option::is_none")]
pub tx_origin: Option<Address>,
/// The coinbase of the block.
#[arg(long, value_name = "ADDRESS")]
#[serde(skip_serializing_if = "Option::is_none")]
pub block_coinbase: Option<Address>,
/// The timestamp of the block.
#[arg(long, value_name = "TIMESTAMP")]
#[serde(skip_serializing_if = "Option::is_none")]
pub block_timestamp: Option<u64>,
/// The block number.
#[arg(long, value_name = "BLOCK")]
#[serde(skip_serializing_if = "Option::is_none")]
pub block_number: Option<u64>,
/// The block difficulty.
#[arg(long, value_name = "DIFFICULTY")]
#[serde(skip_serializing_if = "Option::is_none")]
pub block_difficulty: Option<u64>,
/// The block prevrandao value. NOTE: Before merge this field was mix_hash.
#[arg(long, value_name = "PREVRANDAO")]
#[serde(skip_serializing_if = "Option::is_none")]
pub block_prevrandao: Option<B256>,
/// The block gas limit.
#[arg(long, visible_alias = "gas-limit", value_name = "BLOCK_GAS_LIMIT")]
#[serde(skip_serializing_if = "Option::is_none")]
pub block_gas_limit: Option<u64>,
/// The memory limit per EVM execution in bytes.
/// If this limit is exceeded, a `MemoryLimitOOG` result is thrown.
///
/// The default is 128MiB.
#[arg(long, value_name = "MEMORY_LIMIT")]
#[serde(skip_serializing_if = "Option::is_none")]
pub memory_limit: Option<u64>,
/// Whether to disable the block gas limit checks.
#[arg(long, visible_aliases = &["no-block-gas-limit", "no-gas-limit"])]
#[serde(skip_serializing_if = "std::ops::Not::not")]
pub disable_block_gas_limit: bool,
/// Whether to enable tx gas limit checks as imposed by Osaka (EIP-7825).
#[arg(long, visible_alias = "tx-gas-limit")]
#[serde(skip_serializing_if = "std::ops::Not::not")]
pub enable_tx_gas_limit: bool,
}
impl EvmArgs {
/// Ensures that fork url exists and returns its reference.
pub fn ensure_fork_url(&self) -> eyre::Result<&String> {
self.fork_url.as_ref().wrap_err("Missing `--fork-url` field.")
}
}
/// We have to serialize chain IDs and not names because when extracting an EVM `Env`, it expects
/// `chain_id` to be `u64`.
fn id<S: serde::Serializer>(chain: &Option<Chain>, s: S) -> Result<S::Ok, S::Error> {
if let Some(chain) = chain {
s.serialize_u64(chain.id())
} else {
// skip_serializing_if = "Option::is_none" should prevent this branch from being taken
unreachable!()
}
}
#[cfg(test)]
mod tests {
use super::*;
use foundry_config::NamedChain;
#[test]
fn compute_units_per_second_skips_when_none() {
let args = EvmArgs::default();
let data = args.data().expect("provider data");
let dict = data.get(&Config::selected_profile()).expect("profile dict");
assert!(
!dict.contains_key("compute_units_per_second"),
"compute_units_per_second should be skipped when None"
);
}
#[test]
fn compute_units_per_second_present_when_some() {
let args = EvmArgs { compute_units_per_second: Some(1000), ..Default::default() };
let data = args.data().expect("provider data");
let dict = data.get(&Config::selected_profile()).expect("profile dict");
let val = dict.get("compute_units_per_second").expect("cups present");
assert_eq!(val, &Value::from(1000u64));
}
#[test]
fn can_parse_chain_id() {
let args = EvmArgs {
env: EnvArgs { chain: Some(NamedChain::Mainnet.into()), ..Default::default() },
..Default::default()
};
let config = Config::from_provider(Config::figment().merge(args)).unwrap();
assert_eq!(config.chain, Some(NamedChain::Mainnet.into()));
let env = EnvArgs::parse_from(["foundry-cli", "--chain-id", "goerli"]);
assert_eq!(env.chain, Some(NamedChain::Goerli.into()));
}
#[test]
fn test_memory_limit() {
let args = EvmArgs {
env: EnvArgs { chain: Some(NamedChain::Mainnet.into()), ..Default::default() },
..Default::default()
};
let config = Config::from_provider(Config::figment().merge(args)).unwrap();
assert_eq!(config.memory_limit, Config::default().memory_limit);
let env = EnvArgs::parse_from(["foundry-cli", "--memory-limit", "100"]);
assert_eq!(env.memory_limit, Some(100));
}
#[test]
fn test_chain_id() {
let env = EnvArgs::parse_from(["foundry-cli", "--chain-id", "1"]);
assert_eq!(env.chain, Some(Chain::mainnet()));
let env = EnvArgs::parse_from(["foundry-cli", "--chain-id", "mainnet"]);
assert_eq!(env.chain, Some(Chain::mainnet()));
let args = EvmArgs { env, ..Default::default() };
let config = Config::from_provider(Config::figment().merge(args)).unwrap();
assert_eq!(config.chain, Some(Chain::mainnet()));
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/cli/src/opts/global.rs | crates/cli/src/opts/global.rs | use clap::{ArgAction, Parser};
use foundry_common::{
shell::{ColorChoice, OutputFormat, OutputMode, Shell, Verbosity},
version::{IS_NIGHTLY_VERSION, NIGHTLY_VERSION_WARNING_MESSAGE},
};
use serde::{Deserialize, Serialize};
/// Global arguments for the CLI.
#[derive(Clone, Debug, Default, Serialize, Deserialize, Parser)]
pub struct GlobalArgs {
/// Verbosity level of the log messages.
///
/// Pass multiple times to increase the verbosity (e.g. -v, -vv, -vvv).
///
/// Depending on the context the verbosity levels have different meanings.
///
/// For example, the verbosity levels of the EVM are:
/// - 2 (-vv): Print logs for all tests.
/// - 3 (-vvv): Print execution traces for failing tests.
/// - 4 (-vvvv): Print execution traces for all tests, and setup traces for failing tests.
/// - 5 (-vvvvv): Print execution and setup traces for all tests, including storage changes and
/// backtraces with line numbers.
#[arg(help_heading = "Display options", global = true, short, long, verbatim_doc_comment, conflicts_with = "quiet", action = ArgAction::Count)]
verbosity: Verbosity,
/// Do not print log messages.
#[arg(help_heading = "Display options", global = true, short, long, alias = "silent")]
quiet: bool,
/// Format log messages as JSON.
#[arg(help_heading = "Display options", global = true, long, alias = "format-json", conflicts_with_all = &["quiet", "color"])]
json: bool,
/// Format log messages as Markdown.
#[arg(
help_heading = "Display options",
global = true,
long,
alias = "markdown",
conflicts_with = "json"
)]
md: bool,
/// The color of the log messages.
#[arg(help_heading = "Display options", global = true, long, value_enum)]
color: Option<ColorChoice>,
/// Number of threads to use. Specifying 0 defaults to the number of logical cores.
#[arg(global = true, long, short = 'j', visible_alias = "jobs")]
threads: Option<usize>,
}
impl GlobalArgs {
/// Initialize the global options.
pub fn init(&self) -> eyre::Result<()> {
// Set the global shell.
let shell = self.shell();
// Argument takes precedence over the env var global color choice.
match shell.color_choice() {
ColorChoice::Auto => {}
ColorChoice::Always => yansi::enable(),
ColorChoice::Never => yansi::disable(),
}
shell.set();
// Initialize the thread pool only if `threads` was requested to avoid unnecessary overhead.
if self.threads.is_some() {
self.force_init_thread_pool()?;
}
// Display a warning message if the current version is not stable.
if IS_NIGHTLY_VERSION
&& !self.json
&& std::env::var_os("FOUNDRY_DISABLE_NIGHTLY_WARNING").is_none()
{
let _ = sh_warn!("{}", NIGHTLY_VERSION_WARNING_MESSAGE);
}
Ok(())
}
/// Create a new shell instance.
pub fn shell(&self) -> Shell {
let mode = match self.quiet {
true => OutputMode::Quiet,
false => OutputMode::Normal,
};
let color = self.json.then_some(ColorChoice::Never).or(self.color).unwrap_or_default();
let format = if self.json {
OutputFormat::Json
} else if self.md {
OutputFormat::Markdown
} else {
OutputFormat::Text
};
Shell::new_with(format, mode, color, self.verbosity)
}
/// Initialize the global thread pool.
pub fn force_init_thread_pool(&self) -> eyre::Result<()> {
init_thread_pool(self.threads.unwrap_or(0))
}
/// Creates a new tokio runtime.
#[track_caller]
pub fn tokio_runtime(&self) -> tokio::runtime::Runtime {
let mut builder = tokio::runtime::Builder::new_multi_thread();
if let Some(threads) = self.threads
&& threads > 0
{
builder.worker_threads(threads);
}
builder.enable_all().build().expect("failed to create tokio runtime")
}
/// Creates a new tokio runtime and blocks on the future.
#[track_caller]
pub fn block_on<F: std::future::Future>(&self, future: F) -> F::Output {
self.tokio_runtime().block_on(future)
}
}
/// Initialize the global thread pool.
pub fn init_thread_pool(threads: usize) -> eyre::Result<()> {
rayon::ThreadPoolBuilder::new()
.thread_name(|i| format!("foundry-{i}"))
.num_threads(threads)
.build_global()?;
Ok(())
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/cli/src/opts/dependency.rs | crates/cli/src/opts/dependency.rs | //! CLI dependency parsing
use eyre::Result;
use regex::Regex;
use std::{str::FromStr, sync::LazyLock};
static GH_REPO_REGEX: LazyLock<Regex> = LazyLock::new(|| Regex::new(r"[\w-]+/[\w.-]+").unwrap());
/// Git repo prefix regex
pub static GH_REPO_PREFIX_REGEX: LazyLock<Regex> = LazyLock::new(|| {
Regex::new(r"((git@)|(git\+https://)|(https://)|https://(?P<token>[^@]+)@|(org-([A-Za-z0-9-])+@))?(?P<brand>[A-Za-z0-9-]+)\.(?P<tld>[A-Za-z0-9-]+)(/|:)")
.unwrap()
});
static VERSION_PREFIX_REGEX: LazyLock<Regex> =
LazyLock::new(|| Regex::new(r#"@(tag|branch|rev)="#).unwrap());
const GITHUB: &str = "github.com";
const VERSION_SEPARATOR: char = '@';
const ALIAS_SEPARATOR: char = '=';
/// Commonly used aliases for solidity repos,
///
/// These will be autocorrected when used in place of the `org`
const COMMON_ORG_ALIASES: &[(&str, &str); 2] =
&[("@openzeppelin", "openzeppelin"), ("@aave", "aave")];
/// A git dependency which will be installed as a submodule
///
/// A dependency can be provided as a raw URL, or as a path to a Github repository
/// e.g. `org-name/repo-name`
///
/// Providing a ref can be done in the following 3 ways:
/// * branch: master
/// * tag: v0.1.1
/// * commit: 8e8128
///
/// Non Github URLs must be provided with an https:// prefix.
/// Adding dependencies as local paths is not supported yet.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct Dependency {
/// The name of the dependency
pub name: String,
/// The url to the git repository corresponding to the dependency
pub url: Option<String>,
/// Optional tag corresponding to a Git SHA, tag, or branch.
pub tag: Option<String>,
/// Optional alias of the dependency
pub alias: Option<String>,
}
impl FromStr for Dependency {
type Err = eyre::Error;
fn from_str(dependency: &str) -> Result<Self, Self::Err> {
// Handle dependency exact ref type (`@tag=`, `@branch=` or `@rev=`)`.
// Only extract version for first tag/branch/commit specified.
let url_and_version: Vec<&str> = VERSION_PREFIX_REGEX.split(dependency).collect();
let dependency = url_and_version[0];
let mut tag_or_branch = url_and_version.get(1).map(|version| version.to_string());
// everything before "=" should be considered the alias
let (mut alias, dependency) = if let Some(split) = dependency.split_once(ALIAS_SEPARATOR) {
(Some(String::from(split.0)), split.1.to_string())
} else {
let mut dependency = dependency.to_string();
// this will autocorrect wrong conventional aliases for tag, but only autocorrect if
// it's not used as alias
for (alias, real_org) in COMMON_ORG_ALIASES {
if dependency.starts_with(alias) {
dependency = dependency.replacen(alias, real_org, 1);
break;
}
}
(None, dependency)
};
let dependency = dependency.as_str();
let url_with_version = if let Some(captures) = GH_REPO_PREFIX_REGEX.captures(dependency) {
let brand = captures.name("brand").unwrap().as_str();
let tld = captures.name("tld").unwrap().as_str();
let project = GH_REPO_PREFIX_REGEX.replace(dependency, "");
if let Some(token) = captures.name("token") {
Some(format!(
"https://{}@{brand}.{tld}/{}",
token.as_str(),
project.trim_end_matches(".git")
))
} else if dependency.starts_with("git@") {
Some(format!("git@{brand}.{tld}:{}", project.trim_end_matches(".git")))
} else {
Some(format!("https://{brand}.{tld}/{}", project.trim_end_matches(".git")))
}
} else {
// If we don't have a URL and we don't have a valid
// GitHub repository name, then we assume this is the alias.
//
// This is to allow for conveniently removing aliased dependencies
// using `forge remove <alias>`
if GH_REPO_REGEX.is_match(dependency) {
Some(format!("https://{GITHUB}/{dependency}"))
} else {
alias = Some(dependency.to_string());
None
}
};
// everything after the last "@" should be considered the version if there are no path
// segments
let (url, name, tag) = if let Some(url_with_version) = url_with_version {
// `@`s are actually valid github project name chars but we assume this is unlikely and
// treat everything after the last `@` as the version tag there's still the
// case that the user tries to use `@<org>/<project>`, so we need to check that the
// `tag` does not contain a slash
let mut split = url_with_version.rsplit(VERSION_SEPARATOR);
let mut url = url_with_version.as_str();
if tag_or_branch.is_none() {
let maybe_tag_or_branch = split.next().unwrap();
if let Some(actual_url) = split.next()
&& !maybe_tag_or_branch.contains('/')
{
tag_or_branch = Some(maybe_tag_or_branch.to_string());
url = actual_url;
}
}
let url = url.to_string();
let name = url
.split('/')
.next_back()
.ok_or_else(|| eyre::eyre!("no dependency name found"))?
.to_string();
(Some(url), Some(name), tag_or_branch)
} else {
(None, None, None)
};
Ok(Self { name: name.or_else(|| alias.clone()).unwrap(), url, tag, alias })
}
}
impl Dependency {
/// Returns the name of the dependency, prioritizing the alias if it exists.
pub fn name(&self) -> &str {
self.alias.as_deref().unwrap_or(self.name.as_str())
}
/// Returns the URL of the dependency if it exists, or an error if not.
pub fn require_url(&self) -> Result<&str> {
self.url.as_deref().ok_or_else(|| eyre::eyre!("dependency {} has no url", self.name()))
}
}
#[cfg(test)]
mod tests {
use super::*;
use foundry_compilers::info::ContractInfo;
#[test]
fn parses_dependencies() {
[
("gakonst/lootloose", "https://github.com/gakonst/lootloose", None, None),
("github.com/gakonst/lootloose", "https://github.com/gakonst/lootloose", None, None),
(
"https://github.com/gakonst/lootloose",
"https://github.com/gakonst/lootloose",
None,
None,
),
(
"git+https://github.com/gakonst/lootloose",
"https://github.com/gakonst/lootloose",
None,
None,
),
(
"git@github.com:gakonst/lootloose@tag=v1",
"git@github.com:gakonst/lootloose",
Some("v1"),
None,
),
("git@github.com:gakonst/lootloose", "git@github.com:gakonst/lootloose", None, None),
(
"https://gitlab.com/gakonst/lootloose",
"https://gitlab.com/gakonst/lootloose",
None,
None,
),
(
"https://github.xyz/gakonst/lootloose",
"https://github.xyz/gakonst/lootloose",
None,
None,
),
(
"gakonst/lootloose@0.1.0",
"https://github.com/gakonst/lootloose",
Some("0.1.0"),
None,
),
(
"gakonst/lootloose@develop",
"https://github.com/gakonst/lootloose",
Some("develop"),
None,
),
(
"gakonst/lootloose@98369d0edc900c71d0ec33a01dfba1d92111deed",
"https://github.com/gakonst/lootloose",
Some("98369d0edc900c71d0ec33a01dfba1d92111deed"),
None,
),
("loot=gakonst/lootloose", "https://github.com/gakonst/lootloose", None, Some("loot")),
(
"loot=github.com/gakonst/lootloose",
"https://github.com/gakonst/lootloose",
None,
Some("loot"),
),
(
"loot=https://github.com/gakonst/lootloose",
"https://github.com/gakonst/lootloose",
None,
Some("loot"),
),
(
"loot=git+https://github.com/gakonst/lootloose",
"https://github.com/gakonst/lootloose",
None,
Some("loot"),
),
(
"loot=git@github.com:gakonst/lootloose@tag=v1",
"git@github.com:gakonst/lootloose",
Some("v1"),
Some("loot"),
),
]
.iter()
.for_each(|(input, expected_path, expected_tag, expected_alias)| {
let dep = Dependency::from_str(input).unwrap();
assert_eq!(dep.url, Some(expected_path.to_string()));
assert_eq!(dep.tag, expected_tag.map(ToString::to_string));
assert_eq!(dep.name, "lootloose");
assert_eq!(dep.alias, expected_alias.map(ToString::to_string));
});
}
#[test]
fn can_parse_alias_only() {
let dep = Dependency::from_str("foo").unwrap();
assert_eq!(dep.name, "foo");
assert_eq!(dep.url, None);
assert_eq!(dep.tag, None);
assert_eq!(dep.alias, Some("foo".to_string()));
}
#[test]
fn test_invalid_github_repo_dependency() {
let dep = Dependency::from_str("solmate").unwrap();
assert_eq!(dep.url, None);
}
#[test]
fn parses_contract_info() {
[
(
"src/contracts/Contracts.sol:Contract",
Some("src/contracts/Contracts.sol"),
"Contract",
),
("Contract", None, "Contract"),
]
.iter()
.for_each(|(input, expected_path, expected_name)| {
let contract = ContractInfo::from_str(input).unwrap();
assert_eq!(contract.path, expected_path.map(ToString::to_string));
assert_eq!(contract.name, expected_name.to_string());
});
}
#[test]
fn contract_info_should_reject_without_name() {
["src/contracts/", "src/contracts/Contracts.sol"].iter().for_each(|input| {
let contract = ContractInfo::from_str(input);
assert!(contract.is_err())
});
}
#[test]
fn can_parse_oz_dep() {
let dep = Dependency::from_str("@openzeppelin/contracts-upgradeable").unwrap();
assert_eq!(dep.name, "contracts-upgradeable");
assert_eq!(
dep.url,
Some("https://github.com/openzeppelin/contracts-upgradeable".to_string())
);
assert_eq!(dep.tag, None);
assert_eq!(dep.alias, None);
}
#[test]
fn can_parse_oz_dep_tag() {
let dep = Dependency::from_str("@openzeppelin/contracts-upgradeable@v1").unwrap();
assert_eq!(dep.name, "contracts-upgradeable");
assert_eq!(
dep.url,
Some("https://github.com/openzeppelin/contracts-upgradeable".to_string())
);
assert_eq!(dep.tag, Some("v1".to_string()));
assert_eq!(dep.alias, None);
}
#[test]
fn can_parse_oz_with_tag() {
let dep = Dependency::from_str("OpenZeppelin/openzeppelin-contracts@v4.7.0").unwrap();
assert_eq!(dep.name, "openzeppelin-contracts");
assert_eq!(
dep.url,
Some("https://github.com/OpenZeppelin/openzeppelin-contracts".to_string())
);
assert_eq!(dep.tag, Some("v4.7.0".to_string()));
assert_eq!(dep.alias, None);
let dep = Dependency::from_str("OpenZeppelin/openzeppelin-contracts@4.7.0").unwrap();
assert_eq!(dep.name, "openzeppelin-contracts");
assert_eq!(
dep.url,
Some("https://github.com/OpenZeppelin/openzeppelin-contracts".to_string())
);
assert_eq!(dep.tag, Some("4.7.0".to_string()));
assert_eq!(dep.alias, None);
}
// <https://github.com/foundry-rs/foundry/pull/3130>
#[test]
fn can_parse_oz_with_alias() {
let dep =
Dependency::from_str("@openzeppelin=OpenZeppelin/openzeppelin-contracts").unwrap();
assert_eq!(dep.name, "openzeppelin-contracts");
assert_eq!(dep.alias, Some("@openzeppelin".to_string()));
assert_eq!(
dep.url,
Some("https://github.com/OpenZeppelin/openzeppelin-contracts".to_string())
);
}
#[test]
fn can_parse_aave() {
let dep = Dependency::from_str("@aave/aave-v3-core").unwrap();
assert_eq!(dep.name, "aave-v3-core");
assert_eq!(dep.url, Some("https://github.com/aave/aave-v3-core".to_string()));
}
#[test]
fn can_parse_aave_with_alias() {
let dep = Dependency::from_str("@aave=aave/aave-v3-core").unwrap();
assert_eq!(dep.name, "aave-v3-core");
assert_eq!(dep.alias, Some("@aave".to_string()));
assert_eq!(dep.url, Some("https://github.com/aave/aave-v3-core".to_string()));
}
#[test]
fn can_parse_org_ssh_url() {
let org_url = "org-git12345678@github.com:my-org/my-repo.git";
assert!(GH_REPO_PREFIX_REGEX.is_match(org_url));
}
#[test]
fn can_parse_org_shh_url_dependency() {
let dep: Dependency = "org-git12345678@github.com:my-org/my-repo.git".parse().unwrap();
assert_eq!(dep.url.unwrap(), "https://github.com/my-org/my-repo");
}
#[test]
fn can_parse_with_explicit_ref_type() {
let dep = Dependency::from_str("smartcontractkit/ccip@tag=contracts-ccip/v1.2.1").unwrap();
assert_eq!(dep.name, "ccip");
assert_eq!(dep.url, Some("https://github.com/smartcontractkit/ccip".to_string()));
assert_eq!(dep.tag, Some("contracts-ccip/v1.2.1".to_string()));
assert_eq!(dep.alias, None);
let dep =
Dependency::from_str("smartcontractkit/ccip@branch=contracts-ccip/v1.2.1").unwrap();
assert_eq!(dep.name, "ccip");
assert_eq!(dep.url, Some("https://github.com/smartcontractkit/ccip".to_string()));
assert_eq!(dep.tag, Some("contracts-ccip/v1.2.1".to_string()));
assert_eq!(dep.alias, None);
let dep = Dependency::from_str("smartcontractkit/ccip@rev=80eb41b").unwrap();
assert_eq!(dep.name, "ccip");
assert_eq!(dep.url, Some("https://github.com/smartcontractkit/ccip".to_string()));
assert_eq!(dep.tag, Some("80eb41b".to_string()));
assert_eq!(dep.alias, None);
}
#[test]
fn can_parse_https_with_github_token() {
// <https://github.com/foundry-rs/foundry/issues/9717>
let dep = Dependency::from_str(
"https://ghp_mytoken@github.com/private-org/precompiles-solidity.git",
)
.unwrap();
assert_eq!(dep.name, "precompiles-solidity");
assert_eq!(
dep.url,
Some("https://ghp_mytoken@github.com/private-org/precompiles-solidity".to_string())
);
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/cli/src/opts/chain.rs | crates/cli/src/opts/chain.rs | use clap::builder::{PossibleValuesParser, TypedValueParser};
use eyre::Result;
use foundry_config::{Chain, NamedChain};
use std::ffi::OsStr;
use strum::VariantNames;
/// Custom Clap value parser for [`Chain`]s.
///
/// Displays all possible chains when an invalid chain is provided.
#[derive(Clone, Debug)]
pub struct ChainValueParser {
pub inner: PossibleValuesParser,
}
impl Default for ChainValueParser {
fn default() -> Self {
Self { inner: PossibleValuesParser::from(NamedChain::VARIANTS) }
}
}
impl TypedValueParser for ChainValueParser {
type Value = Chain;
fn parse_ref(
&self,
cmd: &clap::Command,
arg: Option<&clap::Arg>,
value: &OsStr,
) -> Result<Self::Value, clap::Error> {
let s =
value.to_str().ok_or_else(|| clap::Error::new(clap::error::ErrorKind::InvalidUtf8))?;
if let Ok(id) = s.parse() {
Ok(Chain::from_id(id))
} else {
// NamedChain::VARIANTS is a subset of all possible variants, since there are aliases:
// amoy instead of polygon-amoy etc
//
// Parse first as NamedChain, if it fails parse with NamedChain::VARIANTS for displaying
// the error to the user
s.parse()
.map_err(|_| self.inner.parse_ref(cmd, arg, value).unwrap_err())
.map(Chain::from_named)
}
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/cli/src/opts/mod.rs | crates/cli/src/opts/mod.rs | mod build;
mod chain;
mod dependency;
mod evm;
mod global;
mod rpc;
mod tempo;
mod transaction;
pub use build::*;
pub use chain::*;
pub use dependency::*;
pub use evm::*;
pub use global::*;
pub use rpc::*;
pub use tempo::*;
pub use transaction::*;
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/cli/src/opts/rpc.rs | crates/cli/src/opts/rpc.rs | use crate::opts::ChainValueParser;
use alloy_chains::ChainKind;
use clap::Parser;
use eyre::Result;
use foundry_config::{
Chain, Config, FigmentProviders,
figment::{
self, Figment, Metadata, Profile,
value::{Dict, Map},
},
find_project_root, impl_figment_convert_cast,
};
use foundry_wallets::WalletOpts;
use serde::Serialize;
use std::borrow::Cow;
const FLASHBOTS_URL: &str = "https://rpc.flashbots.net/fast";
#[derive(Clone, Debug, Default, Parser)]
#[command(next_help_heading = "Rpc options")]
pub struct RpcOpts {
/// The RPC endpoint, default value is http://localhost:8545.
#[arg(short = 'r', long = "rpc-url", env = "ETH_RPC_URL")]
pub url: Option<String>,
/// Allow insecure RPC connections (accept invalid HTTPS certificates).
///
/// When the provider's inner runtime transport variant is HTTP, this configures the reqwest
/// client to accept invalid certificates.
#[arg(short = 'k', long = "insecure", default_value = "false")]
pub accept_invalid_certs: bool,
/// Use the Flashbots RPC URL with fast mode (<https://rpc.flashbots.net/fast>).
///
/// This shares the transaction privately with all registered builders.
///
/// See: <https://docs.flashbots.net/flashbots-protect/quick-start#faster-transactions>
#[arg(long)]
pub flashbots: bool,
/// JWT Secret for the RPC endpoint.
///
/// The JWT secret will be used to create a JWT for a RPC. For example, the following can be
/// used to simulate a CL `engine_forkchoiceUpdated` call:
///
/// cast rpc --jwt-secret <JWT_SECRET> engine_forkchoiceUpdatedV2
/// '["0x6bb38c26db65749ab6e472080a3d20a2f35776494e72016d1e339593f21c59bc",
/// "0x6bb38c26db65749ab6e472080a3d20a2f35776494e72016d1e339593f21c59bc",
/// "0x6bb38c26db65749ab6e472080a3d20a2f35776494e72016d1e339593f21c59bc"]'
#[arg(long, env = "ETH_RPC_JWT_SECRET")]
pub jwt_secret: Option<String>,
/// Timeout for the RPC request in seconds.
///
/// The specified timeout will be used to override the default timeout for RPC requests.
///
/// Default value: 45
#[arg(long, env = "ETH_RPC_TIMEOUT")]
pub rpc_timeout: Option<u64>,
/// Specify custom headers for RPC requests.
#[arg(long, alias = "headers", env = "ETH_RPC_HEADERS", value_delimiter(','))]
pub rpc_headers: Option<Vec<String>>,
}
impl_figment_convert_cast!(RpcOpts);
impl figment::Provider for RpcOpts {
fn metadata(&self) -> Metadata {
Metadata::named("RpcOpts")
}
fn data(&self) -> Result<Map<Profile, Dict>, figment::Error> {
Ok(Map::from([(Config::selected_profile(), self.dict())]))
}
}
impl RpcOpts {
/// Returns the RPC endpoint.
pub fn url<'a>(&'a self, config: Option<&'a Config>) -> Result<Option<Cow<'a, str>>> {
let url = match (self.flashbots, self.url.as_deref(), config) {
(true, ..) => Some(Cow::Borrowed(FLASHBOTS_URL)),
(false, Some(url), _) => Some(Cow::Borrowed(url)),
(false, None, Some(config)) => config.get_rpc_url().transpose()?,
(false, None, None) => None,
};
Ok(url)
}
/// Returns the JWT secret.
pub fn jwt<'a>(&'a self, config: Option<&'a Config>) -> Result<Option<Cow<'a, str>>> {
let jwt = match (self.jwt_secret.as_deref(), config) {
(Some(jwt), _) => Some(Cow::Borrowed(jwt)),
(None, Some(config)) => config.get_rpc_jwt_secret()?,
(None, None) => None,
};
Ok(jwt)
}
pub fn dict(&self) -> Dict {
let mut dict = Dict::new();
if let Ok(Some(url)) = self.url(None) {
dict.insert("eth_rpc_url".into(), url.into_owned().into());
}
if let Ok(Some(jwt)) = self.jwt(None) {
dict.insert("eth_rpc_jwt".into(), jwt.into_owned().into());
}
if let Some(rpc_timeout) = self.rpc_timeout {
dict.insert("eth_rpc_timeout".into(), rpc_timeout.into());
}
if let Some(headers) = &self.rpc_headers {
dict.insert("eth_rpc_headers".into(), headers.clone().into());
}
if self.accept_invalid_certs {
dict.insert("eth_rpc_accept_invalid_certs".into(), true.into());
}
dict
}
pub fn into_figment(self, all: bool) -> Figment {
let root = find_project_root(None).expect("could not determine project root");
Config::with_root(&root)
.to_figment(if all { FigmentProviders::All } else { FigmentProviders::Cast })
.merge(self)
}
}
#[derive(Clone, Debug, Default, Serialize, Parser)]
pub struct EtherscanOpts {
/// The Etherscan (or equivalent) API key.
#[arg(short = 'e', long = "etherscan-api-key", alias = "api-key", env = "ETHERSCAN_API_KEY")]
#[serde(rename = "etherscan_api_key", skip_serializing_if = "Option::is_none")]
pub key: Option<String>,
/// The chain name or EIP-155 chain ID.
#[arg(
short,
long,
alias = "chain-id",
env = "CHAIN",
value_parser = ChainValueParser::default(),
)]
#[serde(rename = "chain_id", skip_serializing_if = "Option::is_none")]
pub chain: Option<Chain>,
}
impl_figment_convert_cast!(EtherscanOpts);
impl figment::Provider for EtherscanOpts {
fn metadata(&self) -> Metadata {
Metadata::named("EtherscanOpts")
}
fn data(&self) -> Result<Map<Profile, Dict>, figment::Error> {
Ok(Map::from([(Config::selected_profile(), self.dict())]))
}
}
impl EtherscanOpts {
/// Returns true if the Etherscan API key is set.
pub fn has_key(&self) -> bool {
self.key.as_ref().filter(|key| !key.trim().is_empty()).is_some()
}
/// Returns the Etherscan API key.
pub fn key(&self) -> Option<String> {
self.key.as_ref().filter(|key| !key.trim().is_empty()).cloned()
}
pub fn dict(&self) -> Dict {
let mut dict = Dict::new();
if let Some(key) = self.key() {
dict.insert("etherscan_api_key".into(), key.into());
}
if let Some(chain) = self.chain {
if let ChainKind::Id(id) = chain.kind() {
dict.insert("chain_id".into(), (*id).into());
} else {
dict.insert("chain_id".into(), chain.to_string().into());
}
}
dict
}
}
#[derive(Clone, Debug, Default, Parser)]
#[command(next_help_heading = "Ethereum options")]
pub struct EthereumOpts {
#[command(flatten)]
pub rpc: RpcOpts,
#[command(flatten)]
pub etherscan: EtherscanOpts,
#[command(flatten)]
pub wallet: WalletOpts,
}
impl_figment_convert_cast!(EthereumOpts);
// Make this args a `Figment` so that it can be merged into the `Config`
impl figment::Provider for EthereumOpts {
fn metadata(&self) -> Metadata {
Metadata::named("Ethereum Opts Provider")
}
fn data(&self) -> Result<Map<Profile, Dict>, figment::Error> {
let mut dict = self.etherscan.dict();
dict.extend(self.rpc.dict());
if let Some(from) = self.wallet.from {
dict.insert("sender".to_string(), from.to_string().into());
}
Ok(Map::from([(Config::selected_profile(), dict)]))
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn parse_etherscan_opts() {
let args: EtherscanOpts =
EtherscanOpts::parse_from(["foundry-cli", "--etherscan-api-key", "dummykey"]);
assert_eq!(args.key(), Some("dummykey".to_string()));
let args: EtherscanOpts =
EtherscanOpts::parse_from(["foundry-cli", "--etherscan-api-key", ""]);
assert!(!args.has_key());
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/cli/src/opts/transaction.rs | crates/cli/src/opts/transaction.rs | use std::str::FromStr;
use super::TempoOpts;
use crate::utils::{parse_ether_value, parse_json};
use alloy_eips::{eip2930::AccessList, eip7702::SignedAuthorization};
use alloy_primitives::{Address, U64, U256, hex};
use alloy_rlp::Decodable;
use clap::Parser;
/// CLI helper to parse a EIP-7702 authorization list.
/// Can be either a hex-encoded signed authorization or an address.
#[derive(Clone, Debug)]
pub enum CliAuthorizationList {
/// If an address is provided, we sign the authorization delegating to provided address.
Address(Address),
/// If RLP-encoded authorization is provided, we decode it and attach to transaction.
Signed(SignedAuthorization),
}
impl FromStr for CliAuthorizationList {
type Err = eyre::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
if let Ok(addr) = Address::from_str(s) {
Ok(Self::Address(addr))
} else if let Ok(auth) = SignedAuthorization::decode(&mut hex::decode(s)?.as_ref()) {
Ok(Self::Signed(auth))
} else {
eyre::bail!("Failed to decode authorization")
}
}
}
#[derive(Clone, Debug, Parser)]
#[command(next_help_heading = "Transaction options")]
pub struct TransactionOpts {
/// Gas limit for the transaction.
#[arg(long, env = "ETH_GAS_LIMIT")]
pub gas_limit: Option<U256>,
/// Gas price for legacy transactions, or max fee per gas for EIP1559 transactions, either
/// specified in wei, or as a string with a unit type.
///
/// Examples: 1ether, 10gwei, 0.01ether
#[arg(
long,
env = "ETH_GAS_PRICE",
value_parser = parse_ether_value,
value_name = "PRICE"
)]
pub gas_price: Option<U256>,
/// Max priority fee per gas for EIP1559 transactions.
#[arg(
long,
env = "ETH_PRIORITY_GAS_PRICE",
value_parser = parse_ether_value,
value_name = "PRICE"
)]
pub priority_gas_price: Option<U256>,
/// Ether to send in the transaction, either specified in wei, or as a string with a unit type.
///
///
///
/// Examples: 1ether, 10gwei, 0.01ether
#[arg(long, value_parser = parse_ether_value)]
pub value: Option<U256>,
/// Nonce for the transaction.
#[arg(long)]
pub nonce: Option<U64>,
/// Send a legacy transaction instead of an EIP1559 transaction.
///
/// This is automatically enabled for common networks without EIP1559.
#[arg(long)]
pub legacy: bool,
/// Send a EIP-4844 blob transaction.
#[arg(long, conflicts_with = "legacy")]
pub blob: bool,
/// Gas price for EIP-4844 blob transaction.
#[arg(long, conflicts_with = "legacy", value_parser = parse_ether_value, env = "ETH_BLOB_GAS_PRICE", value_name = "BLOB_PRICE")]
pub blob_gas_price: Option<U256>,
/// EIP-7702 authorization list.
///
/// Can be either a hex-encoded signed authorization or an address.
#[arg(long, conflicts_with_all = &["legacy", "blob"])]
pub auth: Vec<CliAuthorizationList>,
/// EIP-2930 access list.
///
/// Accepts either a JSON-encoded access list or an empty value to create the access list
/// via an RPC call to `eth_createAccessList`. To retrieve only the access list portion, use
/// the `cast access-list` command.
#[arg(long, value_parser = parse_json::<AccessList>)]
pub access_list: Option<Option<AccessList>>,
#[command(flatten)]
pub tempo: TempoOpts,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn parse_priority_gas_tx_opts() {
let args: TransactionOpts =
TransactionOpts::parse_from(["foundry-cli", "--priority-gas-price", "100"]);
assert!(args.priority_gas_price.is_some());
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/cli/src/opts/tempo.rs | crates/cli/src/opts/tempo.rs | use alloy_primitives::{Address, ruint::aliases::U256};
use clap::Parser;
/// CLI options for Tempo transactions.
#[derive(Clone, Debug, Default, Parser)]
#[command(next_help_heading = "Tempo")]
pub struct TempoOpts {
/// Fee token address for Tempo transactions.
///
/// When set, builds a Tempo (type 0x76) transaction that pays gas fees
/// in the specified token.
///
/// If this is not set, the fee token is chosen according to network rules. See the Tempo docs
/// for more information.
#[arg(long = "tempo.fee-token")]
pub fee_token: Option<Address>,
/// Nonce sequence key for Tempo transactions.
///
/// When set, builds a Tempo (type 0x76) transaction with the specified nonce sequence key.
///
/// If this is not set, the protocol sequence key (0) will be used.
///
/// For more information see <https://docs.tempo.xyz/protocol/transactions/spec-tempo-transaction#parallelizable-nonces>.
#[arg(long = "tempo.seq")]
pub sequence_key: Option<U256>,
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/cli/src/opts/build/core.rs | crates/cli/src/opts/build/core.rs | use super::ProjectPathOpts;
use crate::{opts::CompilerOpts, utils::LoadConfig};
use clap::{Parser, ValueHint};
use eyre::Result;
use foundry_compilers::{
Project,
artifacts::{RevertStrings, remappings::Remapping},
compilers::multi::MultiCompiler,
utils::canonicalized,
};
use foundry_config::{
Config, DenyLevel, Remappings,
figment::{
self, Figment, Metadata, Profile, Provider,
error::Kind::InvalidType,
value::{Dict, Map, Value},
},
filter::SkipBuildFilter,
};
use serde::Serialize;
use std::path::PathBuf;
#[derive(Clone, Debug, Default, Serialize, Parser)]
#[command(next_help_heading = "Build options")]
pub struct BuildOpts {
/// Clear the cache and artifacts folder and recompile.
#[arg(long, help_heading = "Cache options")]
#[serde(skip)]
pub force: bool,
/// Disable the cache.
#[arg(long)]
#[serde(skip)]
pub no_cache: bool,
/// Enable dynamic test linking.
#[arg(long, conflicts_with = "no_cache")]
#[serde(skip)]
pub dynamic_test_linking: bool,
/// Set pre-linked libraries.
#[arg(long, help_heading = "Linker options", env = "DAPP_LIBRARIES")]
#[serde(skip_serializing_if = "Vec::is_empty")]
pub libraries: Vec<String>,
/// Ignore solc warnings by error code.
#[arg(long, help_heading = "Compiler options", value_name = "ERROR_CODES")]
#[serde(skip_serializing_if = "Vec::is_empty")]
pub ignored_error_codes: Vec<u64>,
/// A compiler error will be triggered at the specified diagnostic level.
///
/// Replaces the deprecated `--deny-warnings` flag.
///
/// Possible values:
/// - `never`: Do not treat any diagnostics as errors.
/// - `warnings`: Treat warnings as errors.
/// - `notes`: Treat both, warnings and notes, as errors.
#[arg(
long,
short = 'D',
help_heading = "Compiler options",
value_name = "LEVEL",
conflicts_with = "deny_warnings"
)]
#[serde(skip)]
pub deny: Option<DenyLevel>,
/// Deprecated: use `--deny=warnings` instead.
#[arg(long = "deny-warnings", hide = true)]
pub deny_warnings: bool,
/// Do not auto-detect the `solc` version.
#[arg(long, help_heading = "Compiler options")]
#[serde(skip)]
pub no_auto_detect: bool,
/// Specify the solc version, or a path to a local solc, to build with.
///
/// Valid values are in the format `x.y.z`, `solc:x.y.z` or `path/to/solc`.
#[arg(
long = "use",
alias = "compiler-version",
help_heading = "Compiler options",
value_name = "SOLC_VERSION"
)]
#[serde(skip)]
pub use_solc: Option<String>,
/// Do not access the network.
///
/// Missing solc versions will not be installed.
#[arg(help_heading = "Compiler options", long)]
#[serde(skip)]
pub offline: bool,
/// Use the Yul intermediate representation compilation pipeline.
#[arg(long, help_heading = "Compiler options")]
#[serde(skip)]
pub via_ir: bool,
/// Changes compilation to only use literal content and not URLs.
#[arg(long, help_heading = "Compiler options")]
#[serde(skip)]
pub use_literal_content: bool,
/// Do not append any metadata to the bytecode.
///
/// This is equivalent to setting `bytecode_hash` to `none` and `cbor_metadata` to `false`.
#[arg(long, help_heading = "Compiler options")]
#[serde(skip)]
pub no_metadata: bool,
/// The path to the contract artifacts folder.
#[arg(
long = "out",
short,
help_heading = "Project options",
value_hint = ValueHint::DirPath,
value_name = "PATH",
)]
#[serde(rename = "out", skip_serializing_if = "Option::is_none")]
pub out_path: Option<PathBuf>,
/// Revert string configuration.
///
/// Possible values are "default", "strip" (remove),
/// "debug" (Solidity-generated revert strings) and "verboseDebug"
#[arg(long, help_heading = "Project options", value_name = "REVERT")]
#[serde(skip)]
pub revert_strings: Option<RevertStrings>,
/// Generate build info files.
#[arg(long, help_heading = "Project options")]
#[serde(skip)]
pub build_info: bool,
/// Output path to directory that build info files will be written to.
#[arg(
long,
help_heading = "Project options",
value_hint = ValueHint::DirPath,
value_name = "PATH",
requires = "build_info",
)]
#[serde(skip_serializing_if = "Option::is_none")]
pub build_info_path: Option<PathBuf>,
/// Skip building files whose names contain the given filter.
///
/// `test` and `script` are aliases for `.t.sol` and `.s.sol`.
#[arg(long, num_args(1..))]
#[serde(skip)]
pub skip: Option<Vec<SkipBuildFilter>>,
#[command(flatten)]
#[serde(flatten)]
pub compiler: CompilerOpts,
#[command(flatten)]
#[serde(flatten)]
pub project_paths: ProjectPathOpts,
}
impl BuildOpts {
/// Returns the `Project` for the current workspace
///
/// This loads the `foundry_config::Config` for the current workspace (see
/// `find_project_root` and merges the cli `BuildArgs` into it before returning
/// [`foundry_config::Config::project()`]).
pub fn project(&self) -> Result<Project<MultiCompiler>> {
let config = self.load_config()?;
Ok(config.project()?)
}
/// Returns the remappings to add to the config
#[deprecated(note = "Use ProjectPathsArgs::get_remappings() instead")]
pub fn get_remappings(&self) -> Vec<Remapping> {
self.project_paths.get_remappings()
}
}
// Loads project's figment and merges the build cli arguments into it
impl<'a> From<&'a BuildOpts> for Figment {
fn from(args: &'a BuildOpts) -> Self {
let root = if let Some(config_path) = &args.project_paths.config_path {
if !config_path.exists() {
panic!("error: config-path `{}` does not exist", config_path.display())
}
if !config_path.ends_with(Config::FILE_NAME) {
panic!("error: the config-path must be a path to a foundry.toml file")
}
let config_path = canonicalized(config_path);
config_path.parent().unwrap().to_path_buf()
} else {
args.project_paths.project_root()
};
let mut figment = Config::figment_with_root(root);
// remappings should stack
let mut remappings = Remappings::new_with_remappings(args.project_paths.get_remappings())
.with_figment(&figment);
remappings
.extend(figment.extract_inner::<Vec<Remapping>>("remappings").unwrap_or_default());
figment = figment.merge(("remappings", remappings.into_inner())).merge(args);
if let Some(skip) = &args.skip {
let mut skip = skip.iter().map(|s| s.file_pattern().to_string()).collect::<Vec<_>>();
skip.extend(figment.extract_inner::<Vec<String>>("skip").unwrap_or_default());
figment = figment.merge(("skip", skip));
};
figment
}
}
impl Provider for BuildOpts {
fn metadata(&self) -> Metadata {
Metadata::named("Core Build Args Provider")
}
fn data(&self) -> Result<Map<Profile, Dict>, figment::Error> {
let value = Value::serialize(self)?;
let error = InvalidType(value.to_actual(), "map".into());
let mut dict = value.into_dict().ok_or(error)?;
if self.no_auto_detect {
dict.insert("auto_detect_solc".to_string(), false.into());
}
if let Some(ref solc) = self.use_solc {
dict.insert("solc".to_string(), solc.trim_start_matches("solc:").into());
}
if self.offline {
dict.insert("offline".to_string(), true.into());
}
if self.deny_warnings {
dict.insert("deny".to_string(), figment::value::Value::serialize(DenyLevel::Warnings)?);
_ = sh_warn!("`--deny-warnings` is being deprecated in favor of `--deny warnings`.");
} else if let Some(deny) = self.deny {
dict.insert("deny".to_string(), figment::value::Value::serialize(deny)?);
}
if self.via_ir {
dict.insert("via_ir".to_string(), true.into());
}
if self.use_literal_content {
dict.insert("use_literal_content".to_string(), true.into());
}
if self.no_metadata {
dict.insert("bytecode_hash".to_string(), "none".into());
dict.insert("cbor_metadata".to_string(), false.into());
}
if self.force {
dict.insert("force".to_string(), self.force.into());
}
// we need to ensure no_cache set accordingly
if self.no_cache {
dict.insert("cache".to_string(), false.into());
}
if self.dynamic_test_linking {
dict.insert("dynamic_test_linking".to_string(), true.into());
}
if self.build_info {
dict.insert("build_info".to_string(), self.build_info.into());
}
if self.compiler.ast {
dict.insert("ast".to_string(), true.into());
}
if let Some(optimize) = self.compiler.optimize {
dict.insert("optimizer".to_string(), optimize.into());
}
if !self.compiler.extra_output.is_empty() {
let selection: Vec<_> =
self.compiler.extra_output.iter().map(|s| s.to_string()).collect();
dict.insert("extra_output".to_string(), selection.into());
}
if !self.compiler.extra_output_files.is_empty() {
let selection: Vec<_> =
self.compiler.extra_output_files.iter().map(|s| s.to_string()).collect();
dict.insert("extra_output_files".to_string(), selection.into());
}
if let Some(ref revert) = self.revert_strings {
dict.insert("revert_strings".to_string(), revert.to_string().into());
}
Ok(Map::from([(Config::selected_profile(), dict)]))
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/cli/src/opts/build/paths.rs | crates/cli/src/opts/build/paths.rs | use clap::{Parser, ValueHint};
use eyre::Result;
use foundry_compilers::artifacts::remappings::Remapping;
use foundry_config::{
Config, figment,
figment::{
Metadata, Profile, Provider,
error::Kind::InvalidType,
value::{Dict, Map, Value},
},
find_project_root, remappings_from_env_var,
};
use serde::Serialize;
use std::path::PathBuf;
/// Common arguments for a project's paths.
#[derive(Clone, Debug, Default, Serialize, Parser)]
#[command(next_help_heading = "Project options")]
pub struct ProjectPathOpts {
/// The project's root path.
///
/// By default root of the Git repository, if in one,
/// or the current working directory.
#[arg(long, value_hint = ValueHint::DirPath, value_name = "PATH")]
#[serde(skip)]
pub root: Option<PathBuf>,
/// The contracts source directory.
#[arg(long, short = 'C', value_hint = ValueHint::DirPath, value_name = "PATH")]
#[serde(rename = "src", skip_serializing_if = "Option::is_none")]
pub contracts: Option<PathBuf>,
/// The project's remappings.
#[arg(long, short = 'R')]
#[serde(skip)]
pub remappings: Vec<Remapping>,
/// The project's remappings from the environment.
#[arg(long, value_name = "ENV")]
#[serde(skip)]
pub remappings_env: Option<String>,
/// The path to the compiler cache.
#[arg(long, value_hint = ValueHint::DirPath, value_name = "PATH")]
#[serde(skip_serializing_if = "Option::is_none")]
pub cache_path: Option<PathBuf>,
/// The path to the library folder.
#[arg(long, value_hint = ValueHint::DirPath, value_name = "PATH")]
#[serde(rename = "libs", skip_serializing_if = "Vec::is_empty")]
pub lib_paths: Vec<PathBuf>,
/// Use the Hardhat-style project layout.
///
/// This is the same as using: `--contracts contracts --lib-paths node_modules`.
#[arg(long, conflicts_with = "contracts", visible_alias = "hh")]
#[serde(skip)]
pub hardhat: bool,
/// Path to the config file.
#[arg(long, value_hint = ValueHint::FilePath, value_name = "FILE")]
#[serde(skip)]
pub config_path: Option<PathBuf>,
}
impl ProjectPathOpts {
/// Returns the root directory to use for configuring the project.
///
/// This will be the `--root` argument if provided, otherwise see [`find_project_root`].
///
/// # Panics
///
/// Panics if the project root directory cannot be found. See [`find_project_root`].
#[track_caller]
pub fn project_root(&self) -> PathBuf {
self.root
.clone()
.unwrap_or_else(|| find_project_root(None).expect("could not determine project root"))
}
/// Returns the remappings to add to the config
pub fn get_remappings(&self) -> Vec<Remapping> {
let mut remappings = self.remappings.clone();
if let Some(env_remappings) =
self.remappings_env.as_ref().and_then(|env| remappings_from_env_var(env))
{
remappings.extend(env_remappings.expect("Failed to parse env var remappings"));
}
remappings
}
}
foundry_config::impl_figment_convert!(ProjectPathOpts);
// Make this args a `figment::Provider` so that it can be merged into the `Config`
impl Provider for ProjectPathOpts {
fn metadata(&self) -> Metadata {
Metadata::named("Project Paths Args Provider")
}
fn data(&self) -> Result<Map<Profile, Dict>, figment::Error> {
let value = Value::serialize(self)?;
let error = InvalidType(value.to_actual(), "map".into());
let mut dict = value.into_dict().ok_or(error)?;
let mut libs =
self.lib_paths.iter().map(|p| format!("{}", p.display())).collect::<Vec<_>>();
if self.hardhat {
dict.insert("src".to_string(), "contracts".to_string().into());
libs.push("node_modules".to_string());
}
if !libs.is_empty() {
dict.insert("libs".to_string(), libs.into());
}
Ok(Map::from([(Config::selected_profile(), dict)]))
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/cli/src/opts/build/utils.rs | crates/cli/src/opts/build/utils.rs | use eyre::Result;
use foundry_compilers::{
CompilerInput, Graph, Project, ProjectCompileOutput, ProjectPathsConfig,
artifacts::{Source, Sources},
multi::{MultiCompilerLanguage, MultiCompilerParser},
solc::{SOLC_EXTENSIONS, SolcLanguage, SolcVersionedInput},
};
use foundry_config::Config;
use rayon::prelude::*;
use solar::{interface::MIN_SOLIDITY_VERSION, sema::ParsingContext};
use std::{
collections::{HashSet, VecDeque},
path::{Path, PathBuf},
};
/// Configures a [`ParsingContext`] from [`Config`].
///
/// - Configures include paths, remappings
/// - Source files are added if `add_source_file` is set
/// - If no `project` is provided, it will spin up a new ephemeral project.
/// - If no `target_paths` are provided, all project files are processed.
/// - Only processes the subset of sources with the most up-to-date Solidity version.
pub fn configure_pcx(
pcx: &mut ParsingContext<'_>,
config: &Config,
project: Option<&Project>,
target_paths: Option<&[PathBuf]>,
) -> Result<()> {
// Process build options
let project = match project {
Some(project) => project,
None => &config.ephemeral_project()?,
};
let sources = match target_paths {
// If target files are provided, only process those sources
Some(targets) => {
let mut sources = Sources::new();
for t in targets {
let path = dunce::canonicalize(t)?;
let source = Source::read(&path)?;
sources.insert(path, source);
}
sources
}
// Otherwise, process all project files
None => project.paths.read_input_files()?,
};
// Only process sources with latest Solidity version to avoid conflicts.
let graph = Graph::<MultiCompilerParser>::resolve_sources(&project.paths, sources)?;
let (version, sources) = graph
// Resolve graph into mapping language -> version -> sources
.into_sources_by_version(project)?
.sources
.into_iter()
// Only interested in Solidity sources
.find(|(lang, _)| *lang == MultiCompilerLanguage::Solc(SolcLanguage::Solidity))
.ok_or_else(|| eyre::eyre!("no Solidity sources"))?
.1
.into_iter()
// Filter unsupported versions
.filter(|(v, _, _)| v >= &MIN_SOLIDITY_VERSION)
// Always pick the latest version
.max_by(|(v1, _, _), (v2, _, _)| v1.cmp(v2))
.map_or((MIN_SOLIDITY_VERSION, Sources::default()), |(v, s, _)| (v, s));
if sources.is_empty() {
sh_warn!("no files found. Solar doesn't support Solidity versions prior to 0.8.0")?;
}
let solc = SolcVersionedInput::build(
sources,
config.solc_settings()?,
SolcLanguage::Solidity,
version,
);
configure_pcx_from_solc(pcx, &project.paths, &solc, true);
Ok(())
}
/// Extracts Solar-compatible sources from a [`ProjectCompileOutput`].
///
/// # Note:
/// uses `output.graph().source_files()` and `output.artifact_ids()` rather than `output.sources()`
/// because sources aren't populated when build is skipped when there are no changes in the source
/// code. <https://github.com/foundry-rs/foundry/issues/12018>
pub fn get_solar_sources_from_compile_output(
config: &Config,
output: &ProjectCompileOutput,
target_paths: Option<&[PathBuf]>,
) -> Result<SolcVersionedInput> {
let is_solidity_file = |path: &Path| -> bool {
path.extension().and_then(|s| s.to_str()).is_some_and(|ext| SOLC_EXTENSIONS.contains(&ext))
};
// Collect source path targets
let mut source_paths: HashSet<PathBuf> = if let Some(targets) = target_paths
&& !targets.is_empty()
{
let mut source_paths = HashSet::new();
let mut queue: VecDeque<PathBuf> = targets
.iter()
.filter_map(|path| {
is_solidity_file(path).then(|| dunce::canonicalize(path).ok()).flatten()
})
.collect();
while let Some(path) = queue.pop_front() {
if source_paths.insert(path.clone()) {
for import in output.graph().imports(path.as_path()) {
queue.push_back(import.to_path_buf());
}
}
}
source_paths
} else {
output
.graph()
.source_files()
.filter_map(|idx| {
let path = output.graph().node_path(idx).to_path_buf();
is_solidity_file(&path).then_some(path)
})
.collect()
};
// Read all sources and find the latest version.
let (version, sources) = {
let (mut max_version, mut sources) = (MIN_SOLIDITY_VERSION, Sources::new());
for (id, _) in output.artifact_ids() {
if let Ok(path) = dunce::canonicalize(&id.source)
&& source_paths.remove(&path)
{
if id.version < MIN_SOLIDITY_VERSION {
continue;
} else if max_version < id.version {
max_version = id.version;
};
let source = Source::read(&path)?;
sources.insert(path, source);
}
}
(max_version, sources)
};
let solc = SolcVersionedInput::build(
sources,
config.solc_settings()?,
SolcLanguage::Solidity,
version,
);
Ok(solc)
}
/// Configures a [`ParsingContext`] from a [`ProjectCompileOutput`].
pub fn configure_pcx_from_compile_output(
pcx: &mut ParsingContext<'_>,
config: &Config,
output: &ProjectCompileOutput,
target_paths: Option<&[PathBuf]>,
) -> Result<()> {
let solc = get_solar_sources_from_compile_output(config, output, target_paths)?;
configure_pcx_from_solc(pcx, &config.project_paths(), &solc, true);
Ok(())
}
/// Configures a [`ParsingContext`] from [`ProjectPathsConfig`] and [`SolcVersionedInput`].
///
/// - Configures include paths, remappings.
/// - Source files are added if `add_source_file` is set
pub fn configure_pcx_from_solc(
pcx: &mut ParsingContext<'_>,
project_paths: &ProjectPathsConfig,
vinput: &SolcVersionedInput,
add_source_files: bool,
) {
configure_pcx_from_solc_cli(pcx, project_paths, &vinput.cli_settings);
if add_source_files {
let sources = vinput
.input
.sources
.par_iter()
.filter_map(|(path, source)| {
pcx.sess.source_map().new_source_file(path.clone(), source.content.as_str()).ok()
})
.collect::<Vec<_>>();
pcx.add_files(sources);
}
}
fn configure_pcx_from_solc_cli(
pcx: &mut ParsingContext<'_>,
project_paths: &ProjectPathsConfig,
cli_settings: &foundry_compilers::solc::CliSettings,
) {
pcx.file_resolver
.set_current_dir(cli_settings.base_path.as_ref().unwrap_or(&project_paths.root));
for remapping in &project_paths.remappings {
pcx.file_resolver.add_import_remapping(solar::sema::interface::config::ImportRemapping {
context: remapping.context.clone().unwrap_or_default(),
prefix: remapping.name.clone(),
path: remapping.path.clone(),
});
}
pcx.file_resolver.add_include_paths(cli_settings.include_paths.iter().cloned());
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/cli/src/opts/build/mod.rs | crates/cli/src/opts/build/mod.rs | use clap::Parser;
use foundry_compilers::artifacts::{EvmVersion, output_selection::ContractOutputSelection};
use serde::Serialize;
mod core;
pub use self::core::BuildOpts;
mod paths;
pub use self::paths::ProjectPathOpts;
mod utils;
pub use self::utils::*;
// A set of solc compiler settings that can be set via command line arguments, which are intended
// to be merged into an existing `foundry_config::Config`.
//
// See also `BuildArgs`.
#[derive(Clone, Debug, Default, Serialize, Parser)]
#[command(next_help_heading = "Compiler options")]
pub struct CompilerOpts {
/// Includes the AST as JSON in the compiler output.
#[arg(long, help_heading = "Compiler options")]
#[serde(skip)]
pub ast: bool,
/// The target EVM version.
#[arg(long, value_name = "VERSION")]
#[serde(skip_serializing_if = "Option::is_none")]
pub evm_version: Option<EvmVersion>,
/// Activate the Solidity optimizer.
#[arg(long, default_missing_value="true", num_args = 0..=1)]
#[serde(skip)]
pub optimize: Option<bool>,
/// The number of runs specifies roughly how often each opcode of the deployed code will be
/// executed across the life-time of the contract. This means it is a trade-off parameter
/// between code size (deploy cost) and code execution cost (cost after deployment).
/// An `optimizer_runs` parameter of `1` will produce short but expensive code. In contrast, a
/// larger `optimizer_runs` parameter will produce longer but more gas efficient code.
#[arg(long, value_name = "RUNS")]
#[serde(skip_serializing_if = "Option::is_none")]
pub optimizer_runs: Option<usize>,
/// Extra output to include in the contract's artifact.
///
/// Example keys: evm.assembly, ewasm, ir, irOptimized, metadata
///
/// For a full description, see <https://docs.soliditylang.org/en/v0.8.13/using-the-compiler.html#input-description>
#[arg(long, num_args(1..), value_name = "SELECTOR")]
#[serde(skip_serializing_if = "Vec::is_empty")]
pub extra_output: Vec<ContractOutputSelection>,
/// Extra output to write to separate files.
///
/// Valid values: metadata, ir, irOptimized, ewasm, evm.assembly
#[arg(long, num_args(1..), value_name = "SELECTOR")]
#[serde(skip_serializing_if = "Vec::is_empty")]
pub extra_output_files: Vec<ContractOutputSelection>,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn can_parse_evm_version() {
let args: CompilerOpts =
CompilerOpts::parse_from(["foundry-cli", "--evm-version", "london"]);
assert_eq!(args.evm_version, Some(EvmVersion::London));
}
#[test]
fn can_parse_extra_output() {
let args: CompilerOpts =
CompilerOpts::parse_from(["foundry-cli", "--extra-output", "metadata", "ir-optimized"]);
assert_eq!(
args.extra_output,
vec![ContractOutputSelection::Metadata, ContractOutputSelection::IrOptimized]
);
}
#[test]
fn can_parse_extra_output_files() {
let args: CompilerOpts = CompilerOpts::parse_from([
"foundry-cli",
"--extra-output-files",
"metadata",
"ir-optimized",
]);
assert_eq!(
args.extra_output_files,
vec![ContractOutputSelection::Metadata, ContractOutputSelection::IrOptimized]
);
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/doc/src/builder.rs | crates/doc/src/builder.rs | use crate::{
AsDoc, BufWriter, Document, ParseItem, ParseSource, Parser, Preprocessor,
document::DocumentContent, helpers::merge_toml_table, solang_ext::Visitable,
};
use alloy_primitives::map::HashMap;
use eyre::{Context, Result};
use foundry_compilers::{compilers::solc::SOLC_EXTENSIONS, utils::source_files_iter};
use foundry_config::{DocConfig, FormatterConfig, filter::expand_globs};
use itertools::Itertools;
use mdbook::MDBook;
use rayon::prelude::*;
use std::{
cmp::Ordering,
fs,
path::{Path, PathBuf},
};
use toml::value;
/// Build Solidity documentation for a project from natspec comments.
/// The builder parses the source files using [Parser],
/// then formats and writes the elements as the output.
#[derive(Debug)]
pub struct DocBuilder {
/// The project root
root: PathBuf,
/// Path to Solidity source files.
sources: PathBuf,
/// Paths to external libraries.
libraries: Vec<PathBuf>,
/// Flag whether to build mdbook.
should_build: bool,
/// Documentation configuration.
config: DocConfig,
/// The array of preprocessors to apply.
preprocessors: Vec<Box<dyn Preprocessor>>,
/// The formatter config.
fmt: FormatterConfig,
/// Whether to include libraries to the output.
include_libraries: bool,
}
impl DocBuilder {
pub(crate) const SRC: &'static str = "src";
const SOL_EXT: &'static str = "sol";
const README: &'static str = "README.md";
const SUMMARY: &'static str = "SUMMARY.md";
/// Create new instance of builder.
pub fn new(
root: PathBuf,
sources: PathBuf,
libraries: Vec<PathBuf>,
include_libraries: bool,
) -> Self {
Self {
root,
sources,
libraries,
include_libraries,
should_build: false,
config: DocConfig::default(),
preprocessors: Default::default(),
fmt: Default::default(),
}
}
/// Set `should_build` flag on the builder
pub fn with_should_build(mut self, should_build: bool) -> Self {
self.should_build = should_build;
self
}
/// Set config on the builder.
pub fn with_config(mut self, config: DocConfig) -> Self {
self.config = config;
self
}
/// Set formatter config on the builder.
pub fn with_fmt(mut self, fmt: FormatterConfig) -> Self {
self.fmt = fmt;
self
}
/// Set preprocessors on the builder.
pub fn with_preprocessor<P: Preprocessor + 'static>(mut self, preprocessor: P) -> Self {
self.preprocessors.push(Box::new(preprocessor) as Box<dyn Preprocessor>);
self
}
/// Get the output directory
pub fn out_dir(&self) -> Result<PathBuf> {
Ok(self.root.join(&self.config.out).canonicalize()?)
}
/// Parse the sources and build the documentation.
pub fn build(self, compiler: &mut solar::sema::Compiler) -> eyre::Result<()> {
fs::create_dir_all(self.root.join(&self.config.out))
.wrap_err("failed to create output directory")?;
// Expand ignore globs
let ignored = expand_globs(&self.root, self.config.ignore.iter())?;
// Collect and parse source files
let sources = source_files_iter(&self.sources, SOLC_EXTENSIONS)
.filter(|file| !ignored.contains(file))
.collect::<Vec<_>>();
if sources.is_empty() {
sh_println!("No sources detected at {}", self.sources.display())?;
return Ok(());
}
let library_sources = self
.libraries
.iter()
.flat_map(|lib| source_files_iter(lib, SOLC_EXTENSIONS))
.collect::<Vec<_>>();
let combined_sources = sources
.iter()
.map(|path| (path, false))
.chain(library_sources.iter().map(|path| (path, true)))
.collect::<Vec<_>>();
let out_dir = self.out_dir()?;
let out_target_dir = out_dir.clone();
let documents = compiler.enter_mut(|compiler| -> eyre::Result<Vec<Vec<Document>>> {
let gcx = compiler.gcx();
let documents = combined_sources
.par_iter()
.enumerate()
.map(|(i, (path, from_library))| {
let path = *path;
let from_library = *from_library;
let mut files = vec![];
// Read and parse source file
if let Some((_, ast)) = gcx.get_ast_source(path)
&& let Some(source) =
forge_fmt::format_ast(gcx, ast, self.fmt.clone().into())
{
let (mut source_unit, comments) = match solang_parser::parse(&source, i) {
Ok(res) => res,
Err(err) => {
if from_library {
// Ignore failures for library files
return Ok(files);
} else {
return Err(eyre::eyre!(
"Failed to parse Solidity code for {}\nDebug info: {:?}",
path.display(),
err
));
}
}
};
// Visit the parse tree
let mut doc = Parser::new(comments, source, self.fmt.tab_width);
source_unit
.visit(&mut doc)
.map_err(|err| eyre::eyre!("Failed to parse source: {err}"))?;
// Split the parsed items on top-level constants and rest.
let (items, consts): (Vec<ParseItem>, Vec<ParseItem>) = doc
.items()
.into_iter()
.partition(|item| !matches!(item.source, ParseSource::Variable(_)));
// Attempt to group overloaded top-level functions
let mut remaining = Vec::with_capacity(items.len());
let mut funcs: HashMap<String, Vec<ParseItem>> = HashMap::default();
for item in items {
if matches!(item.source, ParseSource::Function(_)) {
funcs.entry(item.source.ident()).or_default().push(item);
} else {
// Put the item back
remaining.push(item);
}
}
let (items, overloaded): (
HashMap<String, Vec<ParseItem>>,
HashMap<String, Vec<ParseItem>>,
) = funcs.into_iter().partition(|(_, v)| v.len() == 1);
remaining.extend(items.into_iter().flat_map(|(_, v)| v));
// Each regular item will be written into its own file.
files = remaining
.into_iter()
.map(|item| {
let relative_path =
path.strip_prefix(&self.root)?.join(item.filename());
let target_path = out_dir.join(Self::SRC).join(relative_path);
let ident = item.source.ident();
Ok(Document::new(
path.clone(),
target_path,
from_library,
out_target_dir.clone(),
)
.with_content(DocumentContent::Single(item), ident))
})
.collect::<eyre::Result<Vec<_>>>()?;
// If top-level constants exist, they will be written to the same file.
if !consts.is_empty() {
let filestem = path.file_stem().and_then(|stem| stem.to_str());
let filename = {
let mut name = "constants".to_owned();
if let Some(stem) = filestem {
name.push_str(&format!(".{stem}"));
}
name.push_str(".md");
name
};
let relative_path = path.strip_prefix(&self.root)?.join(filename);
let target_path = out_dir.join(Self::SRC).join(relative_path);
let identity = match filestem {
Some(stem) if stem.to_lowercase().contains("constants") => {
stem.to_owned()
}
Some(stem) => format!("{stem} constants"),
None => "constants".to_owned(),
};
files.push(
Document::new(
path.clone(),
target_path,
from_library,
out_target_dir.clone(),
)
.with_content(DocumentContent::Constants(consts), identity),
)
}
// If overloaded functions exist, they will be written to the same file
if !overloaded.is_empty() {
for (ident, funcs) in overloaded {
let filename =
funcs.first().expect("no overloaded functions").filename();
let relative_path = path.strip_prefix(&self.root)?.join(filename);
let target_path = out_dir.join(Self::SRC).join(relative_path);
files.push(
Document::new(
path.clone(),
target_path,
from_library,
out_target_dir.clone(),
)
.with_content(
DocumentContent::OverloadedFunctions(funcs),
ident,
),
);
}
}
};
Ok(files)
})
.collect::<eyre::Result<Vec<_>>>()?;
Ok(documents)
})?;
// Flatten results and apply preprocessors to files
let documents = self
.preprocessors
.iter()
.try_fold(documents.into_iter().flatten().collect_vec(), |docs, p| {
p.preprocess(docs)
})?;
// Sort the results and filter libraries.
let documents = documents
.into_iter()
.sorted_by(|doc1, doc2| {
doc1.item_path.display().to_string().cmp(&doc2.item_path.display().to_string())
})
.filter(|d| !d.from_library || self.include_libraries)
.collect_vec();
// Write mdbook related files
self.write_mdbook(documents)?;
// Build the book if requested
if self.should_build {
MDBook::load(self.out_dir().wrap_err("failed to construct output directory")?)
.and_then(|book| book.build())
.map_err(|err| eyre::eyre!("failed to build book: {err:?}"))?;
}
Ok(())
}
fn write_mdbook(&self, documents: Vec<Document>) -> eyre::Result<()> {
let out_dir = self.out_dir().wrap_err("failed to construct output directory")?;
let out_dir_src = out_dir.join(Self::SRC);
fs::create_dir_all(&out_dir_src)?;
// Write readme content if any
let homepage_content = {
// Default to the homepage README if it's available.
// If not, use the src README as a fallback.
let homepage_or_src_readme = self
.config
.homepage
.as_ref()
.map(|homepage| self.root.join(homepage))
.unwrap_or_else(|| self.sources.join(Self::README));
// Grab the root readme.
let root_readme = self.root.join(Self::README);
// Check to see if there is a 'homepage' option specified in config.
// If not, fall back to src and root readme files, in that order.
if homepage_or_src_readme.exists() {
fs::read_to_string(homepage_or_src_readme)?
} else if root_readme.exists() {
fs::read_to_string(root_readme)?
} else {
String::new()
}
};
let readme_path = out_dir_src.join(Self::README);
fs::write(readme_path, homepage_content)?;
// Write summary and section readmes
let mut summary = BufWriter::default();
summary.write_title("Summary")?;
summary.write_link_list_item("Home", Self::README, 0)?;
self.write_summary_section(&mut summary, &documents.iter().collect::<Vec<_>>(), None, 0)?;
fs::write(out_dir_src.join(Self::SUMMARY), summary.finish())?;
// Write solidity syntax highlighting
fs::write(out_dir.join("solidity.min.js"), include_str!("../static/solidity.min.js"))?;
// Write css files
fs::write(out_dir.join("book.css"), include_str!("../static/book.css"))?;
// Write book config
fs::write(out_dir.join("book.toml"), self.book_config()?)?;
// Write .gitignore
let gitignore = "book/";
fs::write(out_dir.join(".gitignore"), gitignore)?;
// Write doc files
for document in documents {
fs::create_dir_all(
document
.target_path
.parent()
.ok_or_else(|| eyre::format_err!("empty target path; noop"))?,
)?;
fs::write(&document.target_path, document.as_doc()?)?;
}
Ok(())
}
fn book_config(&self) -> eyre::Result<String> {
// Read the default book first
let mut book: value::Table = toml::from_str(include_str!("../static/book.toml"))?;
book["book"]
.as_table_mut()
.unwrap()
.insert(String::from("title"), self.config.title.clone().into());
if let Some(ref repo) = self.config.repository {
// Create the full repository URL.
let git_repo_url = if let Some(path) = &self.config.path {
// If path is specified, append it to the repository URL.
format!("{}/{}", repo.trim_end_matches('/'), path.trim_start_matches('/'))
} else {
// If no path specified, use repository URL as-is.
repo.clone()
};
book["output"].as_table_mut().unwrap()["html"]
.as_table_mut()
.unwrap()
.insert(String::from("git-repository-url"), git_repo_url.into());
}
// Attempt to find the user provided book path
let book_path = {
if self.config.book.is_file() {
Some(self.config.book.clone())
} else {
let book_path = self.config.book.join("book.toml");
if book_path.is_file() { Some(book_path) } else { None }
}
};
// Merge two book configs
if let Some(book_path) = book_path {
merge_toml_table(&mut book, toml::from_str(&fs::read_to_string(book_path)?)?);
}
Ok(toml::to_string_pretty(&book)?)
}
fn write_summary_section(
&self,
summary: &mut BufWriter,
files: &[&Document],
base_path: Option<&Path>,
depth: usize,
) -> eyre::Result<()> {
if files.is_empty() {
return Ok(());
}
if let Some(path) = base_path {
let title = path.iter().next_back().unwrap().to_string_lossy();
if depth == 1 {
summary.write_title(&title)?;
} else {
let summary_path = path.join(Self::README);
summary.write_link_list_item(
&format!("❱ {title}"),
&summary_path.display().to_string(),
depth - 1,
)?;
}
}
// Group entries by path depth
let mut grouped = HashMap::new();
for file in files {
let path = file.item_path.strip_prefix(&self.root)?;
let key = path.iter().take(depth + 1).collect::<PathBuf>();
grouped.entry(key).or_insert_with(Vec::new).push(*file);
}
// Sort entries by path depth
let grouped = grouped.into_iter().sorted_by(|(lhs, _), (rhs, _)| {
let lhs_at_end = lhs.extension().map(|ext| ext == Self::SOL_EXT).unwrap_or_default();
let rhs_at_end = rhs.extension().map(|ext| ext == Self::SOL_EXT).unwrap_or_default();
if lhs_at_end == rhs_at_end {
lhs.cmp(rhs)
} else if lhs_at_end {
Ordering::Greater
} else {
Ordering::Less
}
});
let out_dir = self.out_dir().wrap_err("failed to construct output directory")?;
let mut readme = BufWriter::new("\n\n# Contents\n");
for (path, files) in grouped {
if path.extension().map(|ext| ext == Self::SOL_EXT).unwrap_or_default() {
for file in files {
let ident = &file.identity;
let summary_path = &file.target_path.strip_prefix(out_dir.join(Self::SRC))?;
summary.write_link_list_item(
ident,
&summary_path.display().to_string(),
depth,
)?;
let readme_path = base_path
.map(|path| summary_path.strip_prefix(path))
.transpose()?
.unwrap_or(summary_path);
readme.write_link_list_item(ident, &readme_path.display().to_string(), 0)?;
}
} else {
let name = path.iter().next_back().unwrap().to_string_lossy();
let readme_path = Path::new("/").join(&path).display().to_string();
readme.write_link_list_item(&name, &readme_path, 0)?;
self.write_summary_section(summary, &files, Some(&path), depth + 1)?;
}
}
if !readme.is_empty()
&& let Some(path) = base_path
{
let path = out_dir.join(Self::SRC).join(path);
fs::create_dir_all(&path)?;
fs::write(path.join(Self::README), readme.finish())?;
}
Ok(())
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/doc/src/lib.rs | crates/doc/src/lib.rs | //! The module for generating Solidity documentation.
//!
//! See [`DocBuilder`].
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg))]
#[macro_use]
extern crate foundry_common;
#[macro_use]
extern crate tracing;
mod builder;
pub use builder::DocBuilder;
mod document;
pub use document::Document;
mod helpers;
mod parser;
pub use parser::{
Comment, CommentTag, Comments, CommentsRef, ParseItem, ParseSource, Parser, error,
};
mod preprocessor;
pub use preprocessor::*;
mod writer;
pub use writer::{AsDoc, AsDocResult, BufWriter, Markdown};
pub use mdbook;
// old formatter dependencies
pub mod solang_ext;
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/doc/src/document.rs | crates/doc/src/document.rs | use crate::{DocBuilder, ParseItem, PreprocessorId, PreprocessorOutput};
use alloy_primitives::map::HashMap;
use std::{
path::{Path, PathBuf},
slice::IterMut,
sync::Mutex,
};
/// The wrapper around the [ParseItem] containing additional
/// information the original item and extra context for outputting it.
#[derive(Debug)]
pub struct Document {
/// The underlying parsed items.
pub content: DocumentContent,
/// The original item path.
pub item_path: PathBuf,
/// The original item file content.
pub item_content: String,
/// The target path where the document will be written.
pub target_path: PathBuf,
/// The document display identity.
pub identity: String,
/// The preprocessors results.
context: Mutex<HashMap<PreprocessorId, PreprocessorOutput>>,
/// Whether the document is from external library.
pub from_library: bool,
/// The target directory for the doc output.
pub out_target_dir: PathBuf,
}
impl Document {
/// Create new instance of [Document].
pub fn new(
item_path: PathBuf,
target_path: PathBuf,
from_library: bool,
out_target_dir: PathBuf,
) -> Self {
Self {
item_path,
target_path,
from_library,
item_content: String::default(),
identity: String::default(),
content: DocumentContent::Empty,
out_target_dir,
context: Mutex::new(HashMap::default()),
}
}
/// Set content and identity on the [Document].
#[must_use]
pub fn with_content(mut self, content: DocumentContent, identity: String) -> Self {
self.content = content;
self.identity = identity;
self
}
/// Add a preprocessor result to inner document context.
pub fn add_context(&self, id: PreprocessorId, output: PreprocessorOutput) {
let mut context = self.context.lock().expect("failed to lock context");
context.insert(id, output);
}
/// Read preprocessor result from context
pub fn get_from_context(&self, id: PreprocessorId) -> Option<PreprocessorOutput> {
let context = self.context.lock().expect("failed to lock context");
context.get(&id).cloned()
}
fn try_relative_output_path(&self) -> Option<&Path> {
self.target_path.strip_prefix(&self.out_target_dir).ok()?.strip_prefix(DocBuilder::SRC).ok()
}
/// Returns the relative path of the document output.
pub fn relative_output_path(&self) -> &Path {
self.try_relative_output_path().unwrap_or(self.target_path.as_path())
}
}
/// The content of the document.
#[derive(Debug)]
#[allow(clippy::large_enum_variant)]
pub enum DocumentContent {
Empty,
Single(ParseItem),
Constants(Vec<ParseItem>),
OverloadedFunctions(Vec<ParseItem>),
}
impl DocumentContent {
pub(crate) fn len(&self) -> usize {
match self {
Self::Empty => 0,
Self::Single(_) => 1,
Self::Constants(items) => items.len(),
Self::OverloadedFunctions(items) => items.len(),
}
}
pub(crate) fn get_mut(&mut self, index: usize) -> Option<&mut ParseItem> {
match self {
Self::Empty => None,
Self::Single(item) => {
if index == 0 {
Some(item)
} else {
None
}
}
Self::Constants(items) => items.get_mut(index),
Self::OverloadedFunctions(items) => items.get_mut(index),
}
}
pub fn iter_items(&self) -> ParseItemIter<'_> {
match self {
Self::Empty => ParseItemIter { next: None, other: None },
Self::Single(item) => ParseItemIter { next: Some(item), other: None },
Self::Constants(items) => ParseItemIter { next: None, other: Some(items.iter()) },
Self::OverloadedFunctions(items) => {
ParseItemIter { next: None, other: Some(items.iter()) }
}
}
}
pub fn iter_items_mut(&mut self) -> ParseItemIterMut<'_> {
match self {
Self::Empty => ParseItemIterMut { next: None, other: None },
Self::Single(item) => ParseItemIterMut { next: Some(item), other: None },
Self::Constants(items) => {
ParseItemIterMut { next: None, other: Some(items.iter_mut()) }
}
Self::OverloadedFunctions(items) => {
ParseItemIterMut { next: None, other: Some(items.iter_mut()) }
}
}
}
}
#[derive(Debug)]
pub struct ParseItemIter<'a> {
next: Option<&'a ParseItem>,
other: Option<std::slice::Iter<'a, ParseItem>>,
}
impl<'a> Iterator for ParseItemIter<'a> {
type Item = &'a ParseItem;
fn next(&mut self) -> Option<Self::Item> {
if let Some(next) = self.next.take() {
return Some(next);
}
if let Some(other) = self.other.as_mut() {
return other.next();
}
None
}
}
#[derive(Debug)]
pub struct ParseItemIterMut<'a> {
next: Option<&'a mut ParseItem>,
other: Option<IterMut<'a, ParseItem>>,
}
impl<'a> Iterator for ParseItemIterMut<'a> {
type Item = &'a mut ParseItem;
fn next(&mut self) -> Option<Self::Item> {
if let Some(next) = self.next.take() {
return Some(next);
}
if let Some(other) = self.other.as_mut() {
return other.next();
}
None
}
}
/// Read the preprocessor output variant from document context.
/// Returns [None] if there is no output.
macro_rules! read_context {
($doc:expr, $id:expr, $variant:ident) => {
$doc.get_from_context($id).and_then(|out| match out {
// Only a single variant is matched. Otherwise the code is invalid.
PreprocessorOutput::$variant(inner) => Some(inner),
_ => None,
})
};
}
pub(crate) use read_context;
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/doc/src/helpers.rs | crates/doc/src/helpers.rs | use itertools::Itertools;
use solang_parser::pt::FunctionDefinition;
use toml::{Value, value::Table};
/// Generates a function signature with parameter types (e.g., "functionName(type1,type2)").
/// Returns the function name without parameters if the function has no parameters.
pub fn function_signature(func: &FunctionDefinition) -> String {
let func_name = func.name.as_ref().map_or(func.ty.to_string(), |n| n.name.to_owned());
if func.params.is_empty() {
return func_name;
}
format!(
"{}({})",
func_name,
func.params
.iter()
.map(|p| p.1.as_ref().map(|p| p.ty.to_string()).unwrap_or_default())
.join(",")
)
}
/// Merge original toml table with the override.
pub(crate) fn merge_toml_table(table: &mut Table, override_table: Table) {
for (key, override_value) in override_table {
match table.get_mut(&key) {
Some(Value::Table(inner_table)) => {
// Override value must be a table, otherwise discard
if let Value::Table(inner_override) = override_value {
merge_toml_table(inner_table, inner_override);
}
}
Some(Value::Array(inner_array)) => {
// Override value must be an array, otherwise discard
if let Value::Array(inner_override) = override_value {
for entry in inner_override {
if !inner_array.contains(&entry) {
inner_array.push(entry);
}
}
}
}
_ => {
table.insert(key, override_value);
}
};
}
}
#[cfg(test)]
mod tests {
use super::*;
use solang_parser::{
parse,
pt::{ContractPart, SourceUnit, SourceUnitPart},
};
#[test]
fn test_function_signature_no_params() {
let (source_unit, _) = parse(
r#"
contract Test {
function foo() public {}
}
"#,
0,
)
.unwrap();
let func = extract_function(&source_unit);
assert_eq!(function_signature(func), "foo");
}
#[test]
fn test_function_signature_with_params() {
let (source_unit, _) = parse(
r#"
contract Test {
function transfer(address to, uint256 amount) public {}
}
"#,
0,
)
.unwrap();
let func = extract_function(&source_unit);
assert_eq!(function_signature(func), "transfer(address,uint256)");
}
#[test]
fn test_function_signature_constructor() {
let (source_unit, _) = parse(
r#"
contract Test {
constructor(address owner) {}
}
"#,
0,
)
.unwrap();
let func = extract_function(&source_unit);
assert_eq!(function_signature(func), "constructor(address)");
}
/// Helper to extract the first function from a parsed source unit
fn extract_function(source_unit: &SourceUnit) -> &FunctionDefinition {
for part in &source_unit.0 {
if let SourceUnitPart::ContractDefinition(contract) = part {
for part in &contract.parts {
if let ContractPart::FunctionDefinition(func) = part {
return func;
}
}
}
}
panic!("No function found in source unit");
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/doc/src/writer/as_doc.rs | crates/doc/src/writer/as_doc.rs | use crate::{
CONTRACT_INHERITANCE_ID, CommentTag, Comments, CommentsRef, DEPLOYMENTS_ID, Document,
GIT_SOURCE_ID, INHERITDOC_ID, Markdown, PreprocessorOutput,
document::{DocumentContent, read_context},
helpers::function_signature,
parser::ParseSource,
solang_ext::SafeUnwrap,
writer::BufWriter,
};
use itertools::Itertools;
use solang_parser::pt::{Base, FunctionDefinition};
use std::path::Path;
/// The result of [`AsDoc::as_doc`].
pub type AsDocResult = Result<String, std::fmt::Error>;
/// A trait for formatting a parse unit as documentation.
pub trait AsDoc {
/// Formats a parse tree item into a doc string.
fn as_doc(&self) -> AsDocResult;
}
impl AsDoc for String {
fn as_doc(&self) -> AsDocResult {
Ok(self.to_owned())
}
}
impl AsDoc for Comments {
fn as_doc(&self) -> AsDocResult {
CommentsRef::from(self).as_doc()
}
}
impl AsDoc for CommentsRef<'_> {
// TODO: support other tags
fn as_doc(&self) -> AsDocResult {
let mut writer = BufWriter::default();
// Write title tag(s)
let titles = self.include_tag(CommentTag::Title);
if !titles.is_empty() {
writer.write_bold(&format!("Title{}:", if titles.len() == 1 { "" } else { "s" }))?;
writer.writeln_raw(titles.iter().map(|t| &t.value).join(", "))?;
writer.writeln()?;
}
// Write author tag(s)
let authors = self.include_tag(CommentTag::Author);
if !authors.is_empty() {
writer.write_bold(&format!("Author{}:", if authors.len() == 1 { "" } else { "s" }))?;
writer.writeln_raw(authors.iter().map(|a| &a.value).join(", "))?;
writer.writeln()?;
}
// Write notice tags
let notices = self.include_tag(CommentTag::Notice);
for n in notices.iter() {
writer.writeln_raw(&n.value)?;
writer.writeln()?;
}
// Write dev tags
let devs = self.include_tag(CommentTag::Dev);
for d in devs.iter() {
writer.write_dev_content(&d.value)?;
writer.writeln()?;
}
// Write custom tags
let customs = self.get_custom_tags();
if !customs.is_empty() {
writer.write_bold(&format!("Note{}:", if customs.len() == 1 { "" } else { "s" }))?;
for c in customs.iter() {
writer.writeln_raw(format!(
"{}{}: {}",
if customs.len() == 1 { "" } else { "- " },
&c.tag,
&c.value
))?;
writer.writeln()?;
}
}
Ok(writer.finish())
}
}
impl AsDoc for Base {
fn as_doc(&self) -> AsDocResult {
Ok(self.name.identifiers.iter().map(|ident| ident.name.to_owned()).join("."))
}
}
impl AsDoc for Document {
fn as_doc(&self) -> AsDocResult {
let mut writer = BufWriter::default();
match &self.content {
DocumentContent::OverloadedFunctions(items) => {
writer
.write_title(&format!("function {}", items.first().unwrap().source.ident()))?;
if let Some(git_source) = read_context!(self, GIT_SOURCE_ID, GitSource) {
writer.write_link("Git Source", &git_source)?;
writer.writeln()?;
}
for item in items {
let func = item.as_function().unwrap();
let heading = function_signature(func).replace(',', ", ");
writer.write_heading(&heading)?;
writer.write_section(&item.comments, &item.code)?;
}
}
DocumentContent::Constants(items) => {
writer.write_title("Constants")?;
if let Some(git_source) = read_context!(self, GIT_SOURCE_ID, GitSource) {
writer.write_link("Git Source", &git_source)?;
writer.writeln()?;
}
for item in items {
let var = item.as_variable().unwrap();
writer.write_heading(&var.name.safe_unwrap().name)?;
writer.write_section(&item.comments, &item.code)?;
}
}
DocumentContent::Single(item) => {
writer.write_title(&item.source.ident())?;
if let Some(git_source) = read_context!(self, GIT_SOURCE_ID, GitSource) {
writer.write_link("Git Source", &git_source)?;
writer.writeln()?;
}
if let Some(deployments) = read_context!(self, DEPLOYMENTS_ID, Deployments) {
writer.write_deployments_table(deployments)?;
}
match &item.source {
ParseSource::Contract(contract) => {
if !contract.base.is_empty() {
writer.write_bold("Inherits:")?;
let mut bases = vec![];
let linked =
read_context!(self, CONTRACT_INHERITANCE_ID, ContractInheritance);
for base in &contract.base {
let base_doc = base.as_doc()?;
let base_ident = &base.name.identifiers.last().unwrap().name;
let link = linked
.as_ref()
.and_then(|link| {
link.get(base_ident).map(|path| {
let path = if cfg!(windows) {
Path::new("\\").join(path)
} else {
Path::new("/").join(path)
};
Markdown::Link(&base_doc, &path.display().to_string())
.as_doc()
})
})
.transpose()?
.unwrap_or(base_doc);
bases.push(link);
}
writer.writeln_raw(bases.join(", "))?;
writer.writeln()?;
}
writer.writeln_doc(&item.comments)?;
if let Some(state_vars) = item.variables() {
writer.write_subtitle("State Variables")?;
state_vars.into_iter().try_for_each(|(item, comments, code)| {
let comments = comments.merge_inheritdoc(
&item.name.safe_unwrap().name,
read_context!(self, INHERITDOC_ID, Inheritdoc),
);
writer.write_heading(&item.name.safe_unwrap().name)?;
writer.write_section(&comments, code)?;
writer.writeln()
})?;
}
if let Some(funcs) = item.functions() {
writer.write_subtitle("Functions")?;
for (func, comments, code) in &funcs {
self.write_function(&mut writer, func, comments, code)?;
}
}
if let Some(events) = item.events() {
writer.write_subtitle("Events")?;
events.into_iter().try_for_each(|(item, comments, code)| {
writer.write_heading(&item.name.safe_unwrap().name)?;
writer.write_section(comments, code)?;
writer.try_write_events_table(&item.fields, comments)
})?;
}
if let Some(errors) = item.errors() {
writer.write_subtitle("Errors")?;
errors.into_iter().try_for_each(|(item, comments, code)| {
writer.write_heading(&item.name.safe_unwrap().name)?;
writer.write_section(comments, code)?;
writer.try_write_errors_table(&item.fields, comments)
})?;
}
if let Some(structs) = item.structs() {
writer.write_subtitle("Structs")?;
structs.into_iter().try_for_each(|(item, comments, code)| {
writer.write_heading(&item.name.safe_unwrap().name)?;
writer.write_section(comments, code)?;
writer.try_write_properties_table(&item.fields, comments)
})?;
}
if let Some(enums) = item.enums() {
writer.write_subtitle("Enums")?;
enums.into_iter().try_for_each(|(item, comments, code)| {
writer.write_heading(&item.name.safe_unwrap().name)?;
writer.write_section(comments, code)?;
writer.try_write_variant_table(item, comments)
})?;
}
}
ParseSource::Function(func) => {
// TODO: cleanup
// Write function docs
writer.writeln_doc(
&item.comments.exclude_tags(&[CommentTag::Param, CommentTag::Return]),
)?;
// Write function header
writer.write_code(&item.code)?;
// Write function parameter comments in a table
let params =
func.params.iter().filter_map(|p| p.1.as_ref()).collect::<Vec<_>>();
writer.try_write_param_table(CommentTag::Param, ¶ms, &item.comments)?;
// Write function return parameter comments in a table
let returns =
func.returns.iter().filter_map(|p| p.1.as_ref()).collect::<Vec<_>>();
writer.try_write_param_table(
CommentTag::Return,
&returns,
&item.comments,
)?;
writer.writeln()?;
}
ParseSource::Struct(ty) => {
writer.write_section(&item.comments, &item.code)?;
writer.try_write_properties_table(&ty.fields, &item.comments)?;
}
ParseSource::Event(ev) => {
writer.write_section(&item.comments, &item.code)?;
writer.try_write_events_table(&ev.fields, &item.comments)?;
}
ParseSource::Error(err) => {
writer.write_section(&item.comments, &item.code)?;
writer.try_write_errors_table(&err.fields, &item.comments)?;
}
ParseSource::Variable(_) | ParseSource::Enum(_) | ParseSource::Type(_) => {
writer.write_section(&item.comments, &item.code)?;
}
}
}
DocumentContent::Empty => (),
};
Ok(writer.finish())
}
}
impl Document {
/// Writes a function to the buffer.
fn write_function(
&self,
writer: &mut BufWriter,
func: &FunctionDefinition,
comments: &Comments,
code: &str,
) -> Result<(), std::fmt::Error> {
let func_sign = function_signature(func);
let func_name = func.name.as_ref().map_or(func.ty.to_string(), |n| n.name.to_owned());
let comments =
comments.merge_inheritdoc(&func_sign, read_context!(self, INHERITDOC_ID, Inheritdoc));
// Write function name
writer.write_heading(&func_name)?;
writer.writeln()?;
// Write function docs
writer.writeln_doc(&comments.exclude_tags(&[CommentTag::Param, CommentTag::Return]))?;
// Write function header
writer.write_code(code)?;
// Write function parameter comments in a table
let params = func.params.iter().filter_map(|p| p.1.as_ref()).collect::<Vec<_>>();
writer.try_write_param_table(CommentTag::Param, ¶ms, &comments)?;
// Write function return parameter comments in a table
let returns = func.returns.iter().filter_map(|p| p.1.as_ref()).collect::<Vec<_>>();
writer.try_write_param_table(CommentTag::Return, &returns, &comments)?;
writer.writeln()?;
Ok(())
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/doc/src/writer/buf_writer.rs | crates/doc/src/writer/buf_writer.rs | use crate::{AsDoc, CommentTag, Comments, Deployment, Markdown, writer::traits::ParamLike};
use itertools::Itertools;
use solang_parser::pt::{
EnumDefinition, ErrorParameter, EventParameter, Parameter, VariableDeclaration,
};
use std::{
fmt::{self, Display, Write},
sync::LazyLock,
};
/// Solidity language name.
const SOLIDITY: &str = "solidity";
/// Headers and separator for rendering parameter table.
const PARAM_TABLE_HEADERS: &[&str] = &["Name", "Type", "Description"];
static PARAM_TABLE_SEPARATOR: LazyLock<String> =
LazyLock::new(|| PARAM_TABLE_HEADERS.iter().map(|h| "-".repeat(h.len())).join("|"));
/// Headers and separator for rendering the deployments table.
const DEPLOYMENTS_TABLE_HEADERS: &[&str] = &["Network", "Address"];
static DEPLOYMENTS_TABLE_SEPARATOR: LazyLock<String> =
LazyLock::new(|| DEPLOYMENTS_TABLE_HEADERS.iter().map(|h| "-".repeat(h.len())).join("|"));
/// Headers and separator for rendering the variants table.
const VARIANTS_TABLE_HEADERS: &[&str] = &["Name", "Description"];
static VARIANTS_TABLE_SEPARATOR: LazyLock<String> =
LazyLock::new(|| VARIANTS_TABLE_HEADERS.iter().map(|h| "-".repeat(h.len())).join("|"));
/// The buffered writer.
/// Writes various display items into the internal buffer.
#[derive(Debug, Default)]
pub struct BufWriter {
buf: String,
}
impl BufWriter {
/// Create new instance of [BufWriter] from [ToString].
pub fn new(content: impl ToString) -> Self {
Self { buf: content.to_string() }
}
/// Returns true if the buffer is empty.
pub fn is_empty(&self) -> bool {
self.buf.is_empty()
}
/// Write [AsDoc] implementation to the buffer.
pub fn write_doc<T: AsDoc>(&mut self, doc: &T) -> fmt::Result {
write!(self.buf, "{}", doc.as_doc()?)
}
/// Write [AsDoc] implementation to the buffer with newline.
pub fn writeln_doc<T: AsDoc>(&mut self, doc: &T) -> fmt::Result {
writeln!(self.buf, "{}", doc.as_doc()?)
}
/// Writes raw content to the buffer.
pub fn write_raw<T: Display>(&mut self, content: T) -> fmt::Result {
write!(self.buf, "{content}")
}
/// Writes raw content to the buffer with newline.
pub fn writeln_raw<T: Display>(&mut self, content: T) -> fmt::Result {
writeln!(self.buf, "{content}")
}
/// Writes newline to the buffer.
pub fn writeln(&mut self) -> fmt::Result {
writeln!(self.buf)
}
/// Writes a title to the buffer formatted as [Markdown::H1].
pub fn write_title(&mut self, title: &str) -> fmt::Result {
writeln!(self.buf, "{}", Markdown::H1(title))
}
/// Writes a subtitle to the bugger formatted as [Markdown::H2].
pub fn write_subtitle(&mut self, subtitle: &str) -> fmt::Result {
writeln!(self.buf, "{}", Markdown::H2(subtitle))
}
/// Writes heading to the buffer formatted as [Markdown::H3].
pub fn write_heading(&mut self, heading: &str) -> fmt::Result {
writeln!(self.buf, "{}", Markdown::H3(heading))
}
/// Writes text in italics to the buffer formatted as [Markdown::Italic].
pub fn write_italic(&mut self, text: &str) -> fmt::Result {
writeln!(self.buf, "{}", Markdown::Italic(text))
}
/// Writes dev content to the buffer, handling markdown lists properly.
/// If the content contains markdown lists, it formats them correctly.
/// Otherwise, it writes the content in italics.
pub fn write_dev_content(&mut self, text: &str) -> fmt::Result {
for line in text.lines() {
writeln!(self.buf, "{line}")?;
}
Ok(())
}
/// Writes bold text to the buffer formatted as [Markdown::Bold].
pub fn write_bold(&mut self, text: &str) -> fmt::Result {
writeln!(self.buf, "{}", Markdown::Bold(text))
}
/// Writes link to the buffer formatted as [Markdown::Link].
pub fn write_link(&mut self, name: &str, path: &str) -> fmt::Result {
writeln!(self.buf, "{}", Markdown::Link(name, path))
}
/// Writes a list item to the buffer indented by specified depth.
pub fn write_list_item(&mut self, item: &str, depth: usize) -> fmt::Result {
let indent = " ".repeat(depth * 2);
writeln!(self.buf, "{indent}- {item}")
}
/// Writes a link to the buffer as a list item.
pub fn write_link_list_item(&mut self, name: &str, path: &str, depth: usize) -> fmt::Result {
let link = Markdown::Link(name, path);
self.write_list_item(&link.as_doc()?, depth)
}
/// Writes a solidity code block to the buffer.
pub fn write_code(&mut self, code: &str) -> fmt::Result {
writeln!(self.buf, "{}", Markdown::CodeBlock(SOLIDITY, code))
}
/// Write an item section to the buffer. First write comments, the item itself as code.
pub fn write_section(&mut self, comments: &Comments, code: &str) -> fmt::Result {
self.writeln_raw(comments.as_doc()?)?;
self.write_code(code)?;
self.writeln()
}
/// Tries to write the table to the buffer.
/// Doesn't write anything if either params or comments are empty.
fn try_write_table<T>(
&mut self,
tag: CommentTag,
params: &[T],
comments: &Comments,
heading: &str,
) -> fmt::Result
where
T: ParamLike,
{
let comments = comments.include_tag(tag.clone());
// There is nothing to write.
if params.is_empty() || comments.is_empty() {
return Ok(());
}
self.write_bold(heading)?;
self.writeln()?;
self.write_piped(&PARAM_TABLE_HEADERS.join("|"))?;
self.write_piped(&PARAM_TABLE_SEPARATOR)?;
for (index, param) in params.iter().enumerate() {
let param_name = param.name();
let mut comment = param_name.as_ref().and_then(|name| {
comments.iter().find_map(|comment| comment.match_first_word(name))
});
// If it's a return tag and couldn't match by first word,
// lookup the doc by index.
if comment.is_none() && matches!(tag, CommentTag::Return) {
comment = comments.get(index).map(|c| &*c.value);
}
let row = [
Markdown::Code(param_name.unwrap_or("<none>")).as_doc()?,
Markdown::Code(¶m.type_name()).as_doc()?,
comment.unwrap_or_default().replace('\n', " "),
];
self.write_piped(&row.join("|"))?;
}
self.writeln()?;
Ok(())
}
/// Tries to write the properties table to the buffer.
/// Doesn't write anything if either params or comments are empty.
pub fn try_write_properties_table(
&mut self,
params: &[VariableDeclaration],
comments: &Comments,
) -> fmt::Result {
self.try_write_table(CommentTag::Param, params, comments, "Properties")
}
/// Tries to write the variant table to the buffer.
/// Doesn't write anything if either params or comments are empty.
pub fn try_write_variant_table(
&mut self,
params: &EnumDefinition,
comments: &Comments,
) -> fmt::Result {
let comments = comments.include_tags(&[CommentTag::Param]);
// There is nothing to write.
if comments.is_empty() {
return Ok(());
}
self.write_bold("Variants")?;
self.writeln()?;
self.write_piped(&VARIANTS_TABLE_HEADERS.join("|"))?;
self.write_piped(&VARIANTS_TABLE_SEPARATOR)?;
for value in ¶ms.values {
let param_name = value.as_ref().map(|v| v.name.clone());
let comment = param_name.as_ref().and_then(|name| {
comments.iter().find_map(|comment| comment.match_first_word(name))
});
let row = [
Markdown::Code(¶m_name.unwrap_or("<none>".to_string())).as_doc()?,
comment.unwrap_or_default().replace('\n', " "),
];
self.write_piped(&row.join("|"))?;
}
self.writeln()?;
Ok(())
}
/// Tries to write the parameters table to the buffer.
/// Doesn't write anything if either params or comments are empty.
pub fn try_write_events_table(
&mut self,
params: &[EventParameter],
comments: &Comments,
) -> fmt::Result {
self.try_write_table(CommentTag::Param, params, comments, "Parameters")
}
/// Tries to write the parameters table to the buffer.
/// Doesn't write anything if either params or comments are empty.
pub fn try_write_errors_table(
&mut self,
params: &[ErrorParameter],
comments: &Comments,
) -> fmt::Result {
self.try_write_table(CommentTag::Param, params, comments, "Parameters")
}
/// Tries to write the parameters table to the buffer.
/// Doesn't write anything if either params or comments are empty.
pub fn try_write_param_table(
&mut self,
tag: CommentTag,
params: &[&Parameter],
comments: &Comments,
) -> fmt::Result {
let heading = match &tag {
CommentTag::Param => "Parameters",
CommentTag::Return => "Returns",
_ => return Err(fmt::Error),
};
self.try_write_table(tag, params, comments, heading)
}
/// Writes the deployment table to the buffer.
pub fn write_deployments_table(&mut self, deployments: Vec<Deployment>) -> fmt::Result {
self.write_bold("Deployments")?;
self.writeln()?;
self.write_piped(&DEPLOYMENTS_TABLE_HEADERS.join("|"))?;
self.write_piped(&DEPLOYMENTS_TABLE_SEPARATOR)?;
for deployment in deployments {
let mut network = deployment.network.ok_or(fmt::Error)?;
network[0..1].make_ascii_uppercase();
let row = [
Markdown::Bold(&network).as_doc()?,
Markdown::Code(&format!("{:?}", deployment.address)).as_doc()?,
];
self.write_piped(&row.join("|"))?;
}
self.writeln()?;
Ok(())
}
/// Write content to the buffer surrounded by pipes.
pub fn write_piped(&mut self, content: &str) -> fmt::Result {
self.write_raw("|")?;
self.write_raw(content)?;
self.writeln_raw("|")
}
/// Finish and return underlying buffer.
pub fn finish(self) -> String {
self.buf
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/doc/src/writer/markdown.rs | crates/doc/src/writer/markdown.rs | use crate::{AsDoc, AsDocResult};
/// The markdown format.
#[derive(Debug)]
pub enum Markdown<'a> {
/// H1 heading item.
H1(&'a str),
/// H2 heading item.
H2(&'a str),
/// H3 heading item.
H3(&'a str),
/// Italic item.
Italic(&'a str),
/// Bold item.
Bold(&'a str),
/// Link item.
Link(&'a str, &'a str),
/// Code item.
Code(&'a str),
/// Code block item.
CodeBlock(&'a str, &'a str),
}
impl AsDoc for Markdown<'_> {
fn as_doc(&self) -> AsDocResult {
let doc = match self {
Self::H1(val) => format!("# {val}"),
Self::H2(val) => format!("## {val}"),
Self::H3(val) => format!("### {val}"),
Self::Italic(val) => format!("*{val}*"),
Self::Bold(val) => format!("**{val}**"),
Self::Link(val, link) => format!("[{val}]({link})"),
Self::Code(val) => format!("`{val}`"),
Self::CodeBlock(lang, val) => format!("```{lang}\n{val}\n```"),
};
Ok(doc)
}
}
impl std::fmt::Display for Markdown<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_fmt(format_args!("{}", self.as_doc()?))
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/doc/src/writer/mod.rs | crates/doc/src/writer/mod.rs | //! The module for writing and formatting various parse tree items.
mod as_doc;
mod buf_writer;
mod markdown;
pub use as_doc::{AsDoc, AsDocResult};
pub use buf_writer::BufWriter;
pub use markdown::Markdown;
mod traits;
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/doc/src/writer/traits.rs | crates/doc/src/writer/traits.rs | //! Helper traits for writing documentation.
use solang_parser::pt::Expression;
/// Helper trait to abstract over a solang type that can be documented as parameter
pub(crate) trait ParamLike {
/// Returns the type of the parameter.
fn ty(&self) -> &Expression;
/// Returns the type as a string.
fn type_name(&self) -> String {
self.ty().to_string()
}
/// Returns the identifier of the parameter.
fn name(&self) -> Option<&str>;
}
impl ParamLike for solang_parser::pt::Parameter {
fn ty(&self) -> &Expression {
&self.ty
}
fn name(&self) -> Option<&str> {
self.name.as_ref().map(|id| id.name.as_str())
}
}
impl ParamLike for solang_parser::pt::VariableDeclaration {
fn ty(&self) -> &Expression {
&self.ty
}
fn name(&self) -> Option<&str> {
self.name.as_ref().map(|id| id.name.as_str())
}
}
impl ParamLike for solang_parser::pt::EventParameter {
fn ty(&self) -> &Expression {
&self.ty
}
fn name(&self) -> Option<&str> {
self.name.as_ref().map(|id| id.name.as_str())
}
}
impl ParamLike for solang_parser::pt::ErrorParameter {
fn ty(&self) -> &Expression {
&self.ty
}
fn name(&self) -> Option<&str> {
self.name.as_ref().map(|id| id.name.as_str())
}
}
impl<T> ParamLike for &T
where
T: ParamLike,
{
fn ty(&self) -> &Expression {
T::ty(*self)
}
fn name(&self) -> Option<&str> {
T::name(*self)
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/doc/src/preprocessor/contract_inheritance.rs | crates/doc/src/preprocessor/contract_inheritance.rs | use super::{Preprocessor, PreprocessorId};
use crate::{
Document, ParseSource, PreprocessorOutput, document::DocumentContent, solang_ext::SafeUnwrap,
};
use alloy_primitives::map::HashMap;
use std::path::PathBuf;
/// [ContractInheritance] preprocessor id.
pub const CONTRACT_INHERITANCE_ID: PreprocessorId = PreprocessorId("contract_inheritance");
/// The contract inheritance preprocessor.
///
/// It matches the documents with inner [`ParseSource::Contract`](crate::ParseSource) elements,
/// iterates over their [Base](solang_parser::pt::Base)s and attempts
/// to link them with the paths of the other contract documents.
///
/// This preprocessor writes to [Document]'s context.
#[derive(Debug, Default)]
pub struct ContractInheritance {
/// Whether to capture inherited contracts from libraries.
pub include_libraries: bool,
}
impl Preprocessor for ContractInheritance {
fn id(&self) -> PreprocessorId {
CONTRACT_INHERITANCE_ID
}
fn preprocess(&self, documents: Vec<Document>) -> Result<Vec<Document>, eyre::Error> {
for document in &documents {
if let DocumentContent::Single(ref item) = document.content
&& let ParseSource::Contract(ref contract) = item.source
{
let mut links = HashMap::default();
// Attempt to match bases to other contracts
for base in &contract.base {
let base_ident = base.name.identifiers.last().unwrap().name.clone();
if let Some(linked) = self.try_link_base(&base_ident, &documents) {
links.insert(base_ident, linked);
}
}
if !links.is_empty() {
// Write to context
document.add_context(self.id(), PreprocessorOutput::ContractInheritance(links));
}
}
}
Ok(documents)
}
}
impl ContractInheritance {
fn try_link_base(&self, base: &str, documents: &Vec<Document>) -> Option<PathBuf> {
for candidate in documents {
if candidate.from_library && !self.include_libraries {
continue;
}
if let DocumentContent::Single(ref item) = candidate.content
&& let ParseSource::Contract(ref contract) = item.source
&& base == contract.name.safe_unwrap().name
{
return Some(candidate.relative_output_path().to_path_buf());
}
}
None
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/doc/src/preprocessor/infer_hyperlinks.rs | crates/doc/src/preprocessor/infer_hyperlinks.rs | use super::{Preprocessor, PreprocessorId};
use crate::{Comments, Document, ParseItem, ParseSource, solang_ext::SafeUnwrap};
use regex::{Captures, Match, Regex};
use std::{
borrow::Cow,
path::{Path, PathBuf},
sync::LazyLock,
};
/// A regex that matches `{identifier-part}` placeholders
///
/// Overloaded functions are referenced by including the exact function arguments in the `part`
/// section of the placeholder.
static RE_INLINE_LINK: LazyLock<Regex> = LazyLock::new(|| {
Regex::new(r"(?m)(\{(?P<xref>xref-)?(?P<identifier>[a-zA-Z_][0-9a-zA-Z_]*)(-(?P<part>[a-zA-Z_][0-9a-zA-Z_-]*))?}(\[(?P<link>(.*?))\])?)").unwrap()
});
/// [InferInlineHyperlinks] preprocessor id.
pub const INFER_INLINE_HYPERLINKS_ID: PreprocessorId = PreprocessorId("infer inline hyperlinks");
/// The infer hyperlinks preprocessor tries to map @dev tags to referenced items
/// Traverses the documents and attempts to find referenced items
/// comments for dev comment tags.
///
/// This preprocessor replaces inline links in comments with the links to the referenced items.
#[derive(Debug, Default)]
#[non_exhaustive]
pub struct InferInlineHyperlinks;
impl Preprocessor for InferInlineHyperlinks {
fn id(&self) -> PreprocessorId {
INFER_INLINE_HYPERLINKS_ID
}
fn preprocess(&self, mut documents: Vec<Document>) -> Result<Vec<Document>, eyre::Error> {
// traverse all comments and try to match inline links and replace with inline links for
// markdown
let mut docs = Vec::with_capacity(documents.len());
while !documents.is_empty() {
let mut document = documents.remove(0);
let target_path = document.relative_output_path().to_path_buf();
for idx in 0..document.content.len() {
let (mut comments, item_children_len) = {
let item = document.content.get_mut(idx).unwrap();
let comments = std::mem::take(&mut item.comments);
let children = item.children.len();
(comments, children)
};
Self::inline_doc_links(&documents, &target_path, &mut comments, &document);
document.content.get_mut(idx).unwrap().comments = comments;
// we also need to iterate over all child items
// This is a bit horrible but we need to traverse all items in all documents
for child_idx in 0..item_children_len {
let mut comments = {
let item = document.content.get_mut(idx).unwrap();
std::mem::take(&mut item.children[child_idx].comments)
};
Self::inline_doc_links(&documents, &target_path, &mut comments, &document);
document.content.get_mut(idx).unwrap().children[child_idx].comments = comments;
}
}
docs.push(document);
}
Ok(docs)
}
}
impl InferInlineHyperlinks {
/// Finds the first match for the given link.
///
/// All items get their own section in the markdown file.
/// This section uses the identifier of the item: `#functionname`
///
/// Note: the target path is the relative path to the markdown file.
fn find_match<'a>(
link: &InlineLink<'a>,
target_path: &Path,
items: impl Iterator<Item = &'a ParseItem>,
) -> Option<InlineLinkTarget<'a>> {
for item in items {
match &item.source {
ParseSource::Contract(contract) => {
let name = &contract.name.safe_unwrap().name;
if name == link.identifier {
if link.part.is_none() {
return Some(InlineLinkTarget::borrowed(
name,
target_path.to_path_buf(),
));
}
// try to find the referenced item in the contract's children
return Self::find_match(link, target_path, item.children.iter());
}
}
ParseSource::Function(fun) => {
// TODO: handle overloaded functions
// functions can be overloaded so we need to keep track of how many matches we
// have so we can match the correct one
if let Some(id) = &fun.name {
// Note: constructors don't have a name
if id.name == link.ref_name() {
return Some(InlineLinkTarget::borrowed(
&id.name,
target_path.to_path_buf(),
));
}
} else if link.ref_name() == "constructor" {
return Some(InlineLinkTarget::borrowed(
"constructor",
target_path.to_path_buf(),
));
}
}
ParseSource::Variable(_) => {}
ParseSource::Event(ev) => {
let ev_name = &ev.name.safe_unwrap().name;
if ev_name == link.ref_name() {
return Some(InlineLinkTarget::borrowed(
ev_name,
target_path.to_path_buf(),
));
}
}
ParseSource::Error(err) => {
let err_name = &err.name.safe_unwrap().name;
if err_name == link.ref_name() {
return Some(InlineLinkTarget::borrowed(
err_name,
target_path.to_path_buf(),
));
}
}
ParseSource::Struct(structdef) => {
let struct_name = &structdef.name.safe_unwrap().name;
if struct_name == link.ref_name() {
return Some(InlineLinkTarget::borrowed(
struct_name,
target_path.to_path_buf(),
));
}
}
ParseSource::Enum(_) => {}
ParseSource::Type(_) => {}
}
}
None
}
/// Attempts to convert inline links to markdown links.
fn inline_doc_links(
documents: &[Document],
target_path: &Path,
comments: &mut Comments,
parent: &Document,
) {
// loop over all comments in the item
for comment in comments.iter_mut() {
let val = comment.value.clone();
// replace all links with inline markdown links
for link in InlineLink::captures(val.as_str()) {
let target = if link.is_external() {
// find in all documents
documents.iter().find_map(|doc| {
Self::find_match(
&link,
doc.relative_output_path(),
doc.content.iter_items().flat_map(|item| {
Some(item).into_iter().chain(item.children.iter())
}),
)
})
} else {
// find matches in the document
Self::find_match(
&link,
target_path,
parent
.content
.iter_items()
.flat_map(|item| Some(item).into_iter().chain(item.children.iter())),
)
};
if let Some(target) = target {
let display_value = link.markdown_link_display_value();
let markdown_link = format!("[{display_value}]({target})");
// replace the link with the markdown link
comment.value =
comment.value.as_str().replacen(link.as_str(), markdown_link.as_str(), 1);
}
}
}
}
}
struct InlineLinkTarget<'a> {
section: Cow<'a, str>,
target_path: PathBuf,
}
impl<'a> InlineLinkTarget<'a> {
fn borrowed(section: &'a str, target_path: PathBuf) -> Self {
Self { section: Cow::Borrowed(section), target_path }
}
}
impl std::fmt::Display for InlineLinkTarget<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
// NOTE: the url should be absolute for markdown and section names are lowercase
write!(f, "/{}#{}", self.target_path.display(), self.section.to_lowercase())
}
}
/// A parsed link to an item.
#[derive(Debug)]
struct InlineLink<'a> {
outer: Match<'a>,
identifier: &'a str,
part: Option<&'a str>,
link: Option<&'a str>,
}
impl<'a> InlineLink<'a> {
fn from_capture(cap: Captures<'a>) -> Option<Self> {
Some(Self {
outer: cap.get(1)?,
identifier: cap.name("identifier")?.as_str(),
part: cap.name("part").map(|m| m.as_str()),
link: cap.name("link").map(|m| m.as_str()),
})
}
fn captures(s: &'a str) -> impl Iterator<Item = Self> + 'a {
RE_INLINE_LINK.captures_iter(s).filter_map(Self::from_capture)
}
/// Parses the first inline link.
#[allow(unused)]
fn capture(s: &'a str) -> Option<Self> {
let cap = RE_INLINE_LINK.captures(s)?;
Self::from_capture(cap)
}
/// Returns the name of the link
fn markdown_link_display_value(&self) -> Cow<'_, str> {
if let Some(link) = self.link {
Cow::Borrowed(link)
} else if let Some(part) = self.part {
Cow::Owned(format!("{}-{}", self.identifier, part))
} else {
Cow::Borrowed(self.identifier)
}
}
/// Returns the name of the referenced item.
fn ref_name(&self) -> &str {
self.exact_identifier().split('-').next().unwrap()
}
fn exact_identifier(&self) -> &str {
let mut name = self.identifier;
if let Some(part) = self.part {
name = part;
}
name
}
/// Returns the name of the referenced item and its arguments, if any.
///
/// Eg: `safeMint-address-uint256-` returns `("safeMint", ["address", "uint256"])`
#[expect(unused)]
fn ref_name_exact(&self) -> (&str, impl Iterator<Item = &str> + '_) {
let identifier = self.exact_identifier();
let mut iter = identifier.split('-');
(iter.next().unwrap(), iter.filter(|s| !s.is_empty()))
}
/// Returns the content of the matched link.
fn as_str(&self) -> &str {
self.outer.as_str()
}
/// Returns true if the link is external.
fn is_external(&self) -> bool {
self.part.is_some()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn parse_inline_links() {
let s = " {IERC165-supportsInterface} ";
let cap = RE_INLINE_LINK.captures(s).unwrap();
let identifier = cap.name("identifier").unwrap().as_str();
assert_eq!(identifier, "IERC165");
let part = cap.name("part").unwrap().as_str();
assert_eq!(part, "supportsInterface");
let s = " {supportsInterface} ";
let cap = RE_INLINE_LINK.captures(s).unwrap();
let identifier = cap.name("identifier").unwrap().as_str();
assert_eq!(identifier, "supportsInterface");
let s = "{xref-ERC721-_safeMint-address-uint256-}";
let cap = RE_INLINE_LINK.captures(s).unwrap();
let identifier = cap.name("identifier").unwrap().as_str();
assert_eq!(identifier, "ERC721");
let identifier = cap.name("xref").unwrap().as_str();
assert_eq!(identifier, "xref-");
let identifier = cap.name("part").unwrap().as_str();
assert_eq!(identifier, "_safeMint-address-uint256-");
let link = InlineLink::capture(s).unwrap();
assert_eq!(link.ref_name(), "_safeMint");
assert_eq!(link.as_str(), "{xref-ERC721-_safeMint-address-uint256-}");
let s = "{xref-ERC721-_safeMint-address-uint256-}[`Named link`]";
let link = InlineLink::capture(s).unwrap();
assert_eq!(link.link, Some("`Named link`"));
assert_eq!(link.markdown_link_display_value(), "`Named link`");
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/doc/src/preprocessor/deployments.rs | crates/doc/src/preprocessor/deployments.rs | use super::{Preprocessor, PreprocessorId};
use crate::{Document, PreprocessorOutput};
use alloy_primitives::Address;
use std::{
fs,
path::{Path, PathBuf},
};
/// [Deployments] preprocessor id.
pub const DEPLOYMENTS_ID: PreprocessorId = PreprocessorId("deployments");
/// The deployments preprocessor.
///
/// This preprocessor writes to [Document]'s context.
#[derive(Debug)]
pub struct Deployments {
/// The project root.
pub root: PathBuf,
/// The deployments directory.
pub deployments: Option<PathBuf>,
}
/// A contract deployment.
#[derive(Clone, Debug, serde::Deserialize)]
pub struct Deployment {
/// The contract address
pub address: Address,
/// The network name
pub network: Option<String>,
}
impl Preprocessor for Deployments {
fn id(&self) -> PreprocessorId {
DEPLOYMENTS_ID
}
fn preprocess(&self, documents: Vec<Document>) -> Result<Vec<Document>, eyre::Error> {
let deployments_dir =
self.root.join(self.deployments.as_deref().unwrap_or(Path::new("deployments")));
// Gather all networks from the deployments directory.
let networks = fs::read_dir(&deployments_dir)?
.map(|entry| {
let entry = entry?;
let path = entry.path();
if entry.file_type()?.is_dir() {
entry
.file_name()
.into_string()
.map_err(|e| eyre::eyre!("failed to extract directory name: {e:?}"))
} else {
eyre::bail!("not a directory: {}", path.display())
}
})
.collect::<Result<Vec<_>, _>>()?;
// Iterate over all documents to find any deployments.
for document in &documents {
let mut deployments = Vec::default();
// Iterate over all networks and check if there is a deployment for the given contract.
for network in &networks {
// Clone the item path of the document and change it from ".sol" -> ".json"
let mut item_path_clone = document.item_path.clone();
item_path_clone.set_extension("json");
// Determine the path of the deployment artifact relative to the root directory.
let deployment_path =
deployments_dir.join(network).join(item_path_clone.file_name().ok_or_else(
|| eyre::eyre!("Failed to extract file name from item path"),
)?);
// If the deployment file for the given contract is found, add the deployment
// address to the document context.
let mut deployment: Deployment =
serde_json::from_str(&fs::read_to_string(deployment_path)?)?;
deployment.network = Some(network.clone());
deployments.push(deployment);
}
// If there are any deployments for the given contract, add them to the document
// context.
if !deployments.is_empty() {
document.add_context(self.id(), PreprocessorOutput::Deployments(deployments));
}
}
Ok(documents)
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/doc/src/preprocessor/inheritdoc.rs | crates/doc/src/preprocessor/inheritdoc.rs | use super::{Preprocessor, PreprocessorId};
use crate::{
Comments, Document, ParseItem, ParseSource, PreprocessorOutput, document::DocumentContent,
solang_ext::SafeUnwrap,
};
use alloy_primitives::map::HashMap;
/// [`Inheritdoc`] preprocessor ID.
pub const INHERITDOC_ID: PreprocessorId = PreprocessorId("inheritdoc");
/// The inheritdoc preprocessor.
/// Traverses the documents and attempts to find inherited
/// comments for inheritdoc comment tags.
///
/// This preprocessor writes to [Document]'s context.
#[derive(Debug, Default)]
#[non_exhaustive]
pub struct Inheritdoc;
impl Preprocessor for Inheritdoc {
fn id(&self) -> PreprocessorId {
INHERITDOC_ID
}
fn preprocess(&self, documents: Vec<Document>) -> Result<Vec<Document>, eyre::Error> {
for document in &documents {
if let DocumentContent::Single(ref item) = document.content {
let context = self.visit_item(item, &documents);
if !context.is_empty() {
document.add_context(self.id(), PreprocessorOutput::Inheritdoc(context));
}
}
}
Ok(documents)
}
}
impl Inheritdoc {
fn visit_item(&self, item: &ParseItem, documents: &Vec<Document>) -> HashMap<String, Comments> {
let mut context = HashMap::default();
// Match for the item first.
let matched = item
.comments
.find_inheritdoc_base()
.and_then(|base| self.try_match_inheritdoc(base, &item.source, documents));
if let Some((key, comments)) = matched {
context.insert(key, comments);
}
// Match item's children.
for ch in &item.children {
let matched = ch
.comments
.find_inheritdoc_base()
.and_then(|base| self.try_match_inheritdoc(base, &ch.source, documents));
if let Some((key, comments)) = matched {
context.insert(key, comments);
}
}
context
}
fn try_match_inheritdoc(
&self,
base: &str,
source: &ParseSource,
documents: &Vec<Document>,
) -> Option<(String, Comments)> {
for candidate in documents {
if let DocumentContent::Single(ref item) = candidate.content
&& let ParseSource::Contract(ref contract) = item.source
&& base == contract.name.safe_unwrap().name
{
// Not matched for the contract because it's a noop
// https://docs.soliditylang.org/en/v0.8.17/natspec-format.html#tags
for children in &item.children {
// Match using signature for functions (includes parameter types for overloads)
if source.signature() == children.source.signature() {
let key = format!("{}.{}", base, source.signature());
return Some((key, children.comments.clone()));
}
}
}
}
None
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/doc/src/preprocessor/git_source.rs | crates/doc/src/preprocessor/git_source.rs | use super::{Preprocessor, PreprocessorId};
use crate::{Document, PreprocessorOutput};
use std::path::PathBuf;
/// [GitSource] preprocessor id.
pub const GIT_SOURCE_ID: PreprocessorId = PreprocessorId("git_source");
/// The git source preprocessor.
///
/// This preprocessor writes to [Document]'s context.
#[derive(Debug)]
pub struct GitSource {
/// The project root.
pub root: PathBuf,
/// The current commit hash.
pub commit: Option<String>,
/// The repository url.
pub repository: Option<String>,
}
impl Preprocessor for GitSource {
fn id(&self) -> PreprocessorId {
GIT_SOURCE_ID
}
fn preprocess(&self, documents: Vec<Document>) -> Result<Vec<Document>, eyre::Error> {
if let Some(ref repo) = self.repository {
let repo = repo.trim_end_matches('/');
let commit = self.commit.as_deref().unwrap_or("master");
for document in &documents {
if document.from_library {
continue;
}
let git_url = format!(
"{repo}/blob/{commit}/{}",
document.item_path.strip_prefix(&self.root)?.display()
);
document.add_context(self.id(), PreprocessorOutput::GitSource(git_url));
}
}
Ok(documents)
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/doc/src/preprocessor/mod.rs | crates/doc/src/preprocessor/mod.rs | //! Module containing documentation preprocessors.
use crate::{Comments, Document};
use alloy_primitives::map::HashMap;
use std::{fmt::Debug, path::PathBuf};
mod contract_inheritance;
pub use contract_inheritance::{CONTRACT_INHERITANCE_ID, ContractInheritance};
mod inheritdoc;
pub use inheritdoc::{INHERITDOC_ID, Inheritdoc};
mod infer_hyperlinks;
pub use infer_hyperlinks::{INFER_INLINE_HYPERLINKS_ID, InferInlineHyperlinks};
mod git_source;
pub use git_source::{GIT_SOURCE_ID, GitSource};
mod deployments;
pub use deployments::{DEPLOYMENTS_ID, Deployment, Deployments};
/// The preprocessor id.
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct PreprocessorId(&'static str);
/// Preprocessor output.
/// Wraps all existing preprocessor outputs
/// in a single abstraction.
#[derive(Clone, Debug)]
pub enum PreprocessorOutput {
/// The contract inheritance output.
/// The map of contract base idents to the path of the base contract.
ContractInheritance(HashMap<String, PathBuf>),
/// The inheritdoc output.
/// The map of inherited item keys to their comments.
Inheritdoc(HashMap<String, Comments>),
/// The git source output.
/// The git url of the item path.
GitSource(String),
/// The deployments output.
/// The deployment address of the item path.
Deployments(Vec<Deployment>),
}
/// Trait for preprocessing and/or modifying existing documents
/// before writing the to disk.
pub trait Preprocessor: Debug {
/// The id of the preprocessor.
/// Used to write data to document context.
fn id(&self) -> PreprocessorId;
/// Preprocess the collection of documents
fn preprocess(&self, documents: Vec<Document>) -> Result<Vec<Document>, eyre::Error>;
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/doc/src/solang_ext/loc.rs | crates/doc/src/solang_ext/loc.rs | use solang_parser::pt;
use std::{borrow::Cow, rc::Rc, sync::Arc};
/// Returns the code location.
///
/// Patched version of [`pt::CodeLocation`]: includes the block of a [`pt::FunctionDefinition`] in
/// its `loc`.
pub trait CodeLocationExt {
/// Returns the code location of `self`.
fn loc(&self) -> pt::Loc;
}
impl<T: ?Sized + CodeLocationExt> CodeLocationExt for &T {
fn loc(&self) -> pt::Loc {
(**self).loc()
}
}
impl<T: ?Sized + CodeLocationExt> CodeLocationExt for &mut T {
fn loc(&self) -> pt::Loc {
(**self).loc()
}
}
impl<T: ?Sized + ToOwned + CodeLocationExt> CodeLocationExt for Cow<'_, T> {
fn loc(&self) -> pt::Loc {
(**self).loc()
}
}
impl<T: ?Sized + CodeLocationExt> CodeLocationExt for Box<T> {
fn loc(&self) -> pt::Loc {
(**self).loc()
}
}
impl<T: ?Sized + CodeLocationExt> CodeLocationExt for Rc<T> {
fn loc(&self) -> pt::Loc {
(**self).loc()
}
}
impl<T: ?Sized + CodeLocationExt> CodeLocationExt for Arc<T> {
fn loc(&self) -> pt::Loc {
(**self).loc()
}
}
// FunctionDefinition patch
impl CodeLocationExt for pt::FunctionDefinition {
#[inline]
#[track_caller]
fn loc(&self) -> pt::Loc {
let mut loc = self.loc;
if let Some(ref body) = self.body {
loc.use_end_from(&pt::CodeLocation::loc(body));
}
loc
}
}
impl CodeLocationExt for pt::ContractPart {
#[inline]
#[track_caller]
fn loc(&self) -> pt::Loc {
match self {
Self::FunctionDefinition(f) => f.loc(),
_ => pt::CodeLocation::loc(self),
}
}
}
impl CodeLocationExt for pt::SourceUnitPart {
#[inline]
#[track_caller]
fn loc(&self) -> pt::Loc {
match self {
Self::FunctionDefinition(f) => f.loc(),
_ => pt::CodeLocation::loc(self),
}
}
}
impl CodeLocationExt for pt::ImportPath {
fn loc(&self) -> pt::Loc {
match self {
Self::Filename(s) => s.loc(),
Self::Path(i) => i.loc(),
}
}
}
impl CodeLocationExt for pt::VersionComparator {
fn loc(&self) -> pt::Loc {
match self {
Self::Plain { loc, .. }
| Self::Operator { loc, .. }
| Self::Or { loc, .. }
| Self::Range { loc, .. } => *loc,
}
}
}
macro_rules! impl_delegate {
($($t:ty),+ $(,)?) => {$(
impl CodeLocationExt for $t {
#[inline]
#[track_caller]
fn loc(&self) -> pt::Loc {
pt::CodeLocation::loc(self)
}
}
)+};
}
impl_delegate! {
pt::Annotation,
pt::Base,
pt::ContractDefinition,
pt::EnumDefinition,
pt::ErrorDefinition,
pt::ErrorParameter,
pt::EventDefinition,
pt::EventParameter,
pt::PragmaDirective,
// pt::FunctionDefinition,
pt::HexLiteral,
pt::Identifier,
pt::IdentifierPath,
pt::NamedArgument,
pt::Parameter,
// pt::SourceUnit,
pt::StringLiteral,
pt::StructDefinition,
pt::TypeDefinition,
pt::Using,
pt::UsingFunction,
pt::VariableDeclaration,
pt::VariableDefinition,
pt::YulBlock,
pt::YulFor,
pt::YulFunctionCall,
pt::YulFunctionDefinition,
pt::YulSwitch,
pt::YulTypedIdentifier,
pt::CatchClause,
pt::Comment,
// pt::ContractPart,
pt::ContractTy,
pt::Expression,
pt::FunctionAttribute,
// pt::FunctionTy,
pt::Import,
pt::Loc,
pt::Mutability,
// pt::SourceUnitPart,
pt::Statement,
pt::StorageLocation,
// pt::Type,
// pt::UserDefinedOperator,
pt::UsingList,
pt::VariableAttribute,
// pt::Visibility,
pt::YulExpression,
pt::YulStatement,
pt::YulSwitchOptions,
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/doc/src/solang_ext/visit.rs | crates/doc/src/solang_ext/visit.rs | //! Visitor helpers to traverse the [solang Solidity Parse Tree](solang_parser::pt).
use crate::solang_ext::{CodeLocationExt, pt::*};
/// A trait that is invoked while traversing the Solidity Parse Tree.
/// Each method of the [Visitor] trait is a hook that can be potentially overridden.
///
/// Currently the main implementer of this trait is the [`Formatter`](crate::Formatter<'_>) struct.
pub trait Visitor {
type Error: std::error::Error;
fn visit_source(&mut self, _loc: Loc) -> Result<(), Self::Error> {
Ok(())
}
fn visit_source_unit(&mut self, _source_unit: &mut SourceUnit) -> Result<(), Self::Error> {
Ok(())
}
fn visit_contract(&mut self, _contract: &mut ContractDefinition) -> Result<(), Self::Error> {
Ok(())
}
fn visit_annotation(&mut self, annotation: &mut Annotation) -> Result<(), Self::Error> {
self.visit_source(annotation.loc)
}
fn visit_pragma(&mut self, pragma: &mut PragmaDirective) -> Result<(), Self::Error> {
self.visit_source(pragma.loc())
}
fn visit_import_plain(
&mut self,
_loc: Loc,
_import: &mut ImportPath,
) -> Result<(), Self::Error> {
Ok(())
}
fn visit_import_global(
&mut self,
_loc: Loc,
_global: &mut ImportPath,
_alias: &mut Identifier,
) -> Result<(), Self::Error> {
Ok(())
}
fn visit_import_renames(
&mut self,
_loc: Loc,
_imports: &mut [(Identifier, Option<Identifier>)],
_from: &mut ImportPath,
) -> Result<(), Self::Error> {
Ok(())
}
fn visit_enum(&mut self, _enum: &mut EnumDefinition) -> Result<(), Self::Error> {
Ok(())
}
fn visit_assembly(
&mut self,
loc: Loc,
_dialect: &mut Option<StringLiteral>,
_block: &mut YulBlock,
_flags: &mut Option<Vec<StringLiteral>>,
) -> Result<(), Self::Error> {
self.visit_source(loc)
}
fn visit_block(
&mut self,
loc: Loc,
_unchecked: bool,
_statements: &mut Vec<Statement>,
) -> Result<(), Self::Error> {
self.visit_source(loc)
}
fn visit_args(&mut self, loc: Loc, _args: &mut Vec<NamedArgument>) -> Result<(), Self::Error> {
self.visit_source(loc)
}
/// Don't write semicolon at the end because expressions can appear as both
/// part of other node and a statement in the function body
fn visit_expr(&mut self, loc: Loc, _expr: &mut Expression) -> Result<(), Self::Error> {
self.visit_source(loc)
}
fn visit_ident(&mut self, loc: Loc, _ident: &mut Identifier) -> Result<(), Self::Error> {
self.visit_source(loc)
}
fn visit_ident_path(&mut self, idents: &mut IdentifierPath) -> Result<(), Self::Error> {
self.visit_source(idents.loc)
}
fn visit_emit(&mut self, loc: Loc, _event: &mut Expression) -> Result<(), Self::Error> {
self.visit_source(loc)?;
self.visit_stray_semicolon()?;
Ok(())
}
fn visit_var_definition(&mut self, var: &mut VariableDefinition) -> Result<(), Self::Error> {
self.visit_source(var.loc)?;
self.visit_stray_semicolon()?;
Ok(())
}
fn visit_var_definition_stmt(
&mut self,
loc: Loc,
_declaration: &mut VariableDeclaration,
_expr: &mut Option<Expression>,
) -> Result<(), Self::Error> {
self.visit_source(loc)?;
self.visit_stray_semicolon()
}
fn visit_var_declaration(&mut self, var: &mut VariableDeclaration) -> Result<(), Self::Error> {
self.visit_source(var.loc)
}
fn visit_return(
&mut self,
loc: Loc,
_expr: &mut Option<Expression>,
) -> Result<(), Self::Error> {
self.visit_source(loc)?;
self.visit_stray_semicolon()?;
Ok(())
}
fn visit_revert(
&mut self,
loc: Loc,
_error: &mut Option<IdentifierPath>,
_args: &mut Vec<Expression>,
) -> Result<(), Self::Error> {
self.visit_source(loc)?;
self.visit_stray_semicolon()?;
Ok(())
}
fn visit_revert_named_args(
&mut self,
loc: Loc,
_error: &mut Option<IdentifierPath>,
_args: &mut Vec<NamedArgument>,
) -> Result<(), Self::Error> {
self.visit_source(loc)?;
self.visit_stray_semicolon()?;
Ok(())
}
fn visit_break(&mut self, loc: Loc, _semicolon: bool) -> Result<(), Self::Error> {
self.visit_source(loc)
}
fn visit_continue(&mut self, loc: Loc, _semicolon: bool) -> Result<(), Self::Error> {
self.visit_source(loc)
}
#[expect(clippy::type_complexity)]
fn visit_try(
&mut self,
loc: Loc,
_expr: &mut Expression,
_returns: &mut Option<(Vec<(Loc, Option<Parameter>)>, Box<Statement>)>,
_clauses: &mut Vec<CatchClause>,
) -> Result<(), Self::Error> {
self.visit_source(loc)
}
fn visit_if(
&mut self,
loc: Loc,
_cond: &mut Expression,
_if_branch: &mut Box<Statement>,
_else_branch: &mut Option<Box<Statement>>,
_is_first_stmt: bool,
) -> Result<(), Self::Error> {
self.visit_source(loc)
}
fn visit_do_while(
&mut self,
loc: Loc,
_body: &mut Statement,
_cond: &mut Expression,
) -> Result<(), Self::Error> {
self.visit_source(loc)
}
fn visit_while(
&mut self,
loc: Loc,
_cond: &mut Expression,
_body: &mut Statement,
) -> Result<(), Self::Error> {
self.visit_source(loc)
}
fn visit_for(
&mut self,
loc: Loc,
_init: &mut Option<Box<Statement>>,
_cond: &mut Option<Box<Expression>>,
_update: &mut Option<Box<Expression>>,
_body: &mut Option<Box<Statement>>,
) -> Result<(), Self::Error> {
self.visit_source(loc)
}
fn visit_function(&mut self, func: &mut FunctionDefinition) -> Result<(), Self::Error> {
self.visit_source(func.loc())?;
if func.body.is_none() {
self.visit_stray_semicolon()?;
}
Ok(())
}
fn visit_function_attribute(
&mut self,
attribute: &mut FunctionAttribute,
) -> Result<(), Self::Error> {
self.visit_source(attribute.loc())?;
Ok(())
}
fn visit_var_attribute(
&mut self,
attribute: &mut VariableAttribute,
) -> Result<(), Self::Error> {
self.visit_source(attribute.loc())?;
Ok(())
}
fn visit_base(&mut self, base: &mut Base) -> Result<(), Self::Error> {
self.visit_source(base.loc)
}
fn visit_parameter(&mut self, parameter: &mut Parameter) -> Result<(), Self::Error> {
self.visit_source(parameter.loc)
}
fn visit_struct(&mut self, structure: &mut StructDefinition) -> Result<(), Self::Error> {
self.visit_source(structure.loc)?;
Ok(())
}
fn visit_event(&mut self, event: &mut EventDefinition) -> Result<(), Self::Error> {
self.visit_source(event.loc)?;
self.visit_stray_semicolon()?;
Ok(())
}
fn visit_event_parameter(&mut self, param: &mut EventParameter) -> Result<(), Self::Error> {
self.visit_source(param.loc)
}
fn visit_error(&mut self, error: &mut ErrorDefinition) -> Result<(), Self::Error> {
self.visit_source(error.loc)?;
self.visit_stray_semicolon()?;
Ok(())
}
fn visit_error_parameter(&mut self, param: &mut ErrorParameter) -> Result<(), Self::Error> {
self.visit_source(param.loc)
}
fn visit_type_definition(&mut self, def: &mut TypeDefinition) -> Result<(), Self::Error> {
self.visit_source(def.loc)
}
fn visit_stray_semicolon(&mut self) -> Result<(), Self::Error> {
Ok(())
}
fn visit_using(&mut self, using: &mut Using) -> Result<(), Self::Error> {
self.visit_source(using.loc)?;
self.visit_stray_semicolon()?;
Ok(())
}
fn visit_yul_block(
&mut self,
loc: Loc,
_stmts: &mut Vec<YulStatement>,
_attempt_single_line: bool,
) -> Result<(), Self::Error> {
self.visit_source(loc)
}
fn visit_yul_expr(&mut self, expr: &mut YulExpression) -> Result<(), Self::Error> {
self.visit_source(expr.loc())
}
fn visit_yul_assignment<T>(
&mut self,
loc: Loc,
_exprs: &mut Vec<T>,
_expr: &mut Option<&mut YulExpression>,
) -> Result<(), Self::Error>
where
T: Visitable + CodeLocationExt,
{
self.visit_source(loc)
}
fn visit_yul_for(&mut self, stmt: &mut YulFor) -> Result<(), Self::Error> {
self.visit_source(stmt.loc)
}
fn visit_yul_function_call(&mut self, stmt: &mut YulFunctionCall) -> Result<(), Self::Error> {
self.visit_source(stmt.loc)
}
fn visit_yul_fun_def(&mut self, stmt: &mut YulFunctionDefinition) -> Result<(), Self::Error> {
self.visit_source(stmt.loc)
}
fn visit_yul_if(
&mut self,
loc: Loc,
_expr: &mut YulExpression,
_block: &mut YulBlock,
) -> Result<(), Self::Error> {
self.visit_source(loc)
}
fn visit_yul_leave(&mut self, loc: Loc) -> Result<(), Self::Error> {
self.visit_source(loc)
}
fn visit_yul_switch(&mut self, stmt: &mut YulSwitch) -> Result<(), Self::Error> {
self.visit_source(stmt.loc)
}
fn visit_yul_var_declaration(
&mut self,
loc: Loc,
_idents: &mut Vec<YulTypedIdentifier>,
_expr: &mut Option<YulExpression>,
) -> Result<(), Self::Error> {
self.visit_source(loc)
}
fn visit_yul_typed_ident(&mut self, ident: &mut YulTypedIdentifier) -> Result<(), Self::Error> {
self.visit_source(ident.loc)
}
fn visit_parser_error(&mut self, loc: Loc) -> Result<(), Self::Error> {
self.visit_source(loc)
}
}
/// Visitable trait for [`solang_parser::pt`] types.
///
/// All [`solang_parser::pt`] types, such as [Statement], should implement the [Visitable] trait
/// that accepts a trait [Visitor] implementation, which has various callback handles for Solidity
/// Parse Tree nodes.
///
/// We want to take a `&mut self` to be able to implement some advanced features in the future such
/// as modifying the Parse Tree before formatting it.
pub trait Visitable {
fn visit<V>(&mut self, v: &mut V) -> Result<(), V::Error>
where
V: Visitor;
}
impl<T> Visitable for &mut T
where
T: Visitable,
{
fn visit<V>(&mut self, v: &mut V) -> Result<(), V::Error>
where
V: Visitor,
{
T::visit(self, v)
}
}
impl<T> Visitable for Option<T>
where
T: Visitable,
{
fn visit<V>(&mut self, v: &mut V) -> Result<(), V::Error>
where
V: Visitor,
{
if let Some(inner) = self.as_mut() { inner.visit(v) } else { Ok(()) }
}
}
impl<T> Visitable for Box<T>
where
T: Visitable,
{
fn visit<V>(&mut self, v: &mut V) -> Result<(), V::Error>
where
V: Visitor,
{
T::visit(self, v)
}
}
impl<T> Visitable for Vec<T>
where
T: Visitable,
{
fn visit<V>(&mut self, v: &mut V) -> Result<(), V::Error>
where
V: Visitor,
{
for item in self.iter_mut() {
item.visit(v)?;
}
Ok(())
}
}
impl Visitable for SourceUnitPart {
fn visit<V>(&mut self, v: &mut V) -> Result<(), V::Error>
where
V: Visitor,
{
match self {
Self::ContractDefinition(contract) => v.visit_contract(contract),
Self::PragmaDirective(pragma) => v.visit_pragma(pragma),
Self::ImportDirective(import) => import.visit(v),
Self::EnumDefinition(enumeration) => v.visit_enum(enumeration),
Self::StructDefinition(structure) => v.visit_struct(structure),
Self::EventDefinition(event) => v.visit_event(event),
Self::ErrorDefinition(error) => v.visit_error(error),
Self::FunctionDefinition(function) => v.visit_function(function),
Self::VariableDefinition(variable) => v.visit_var_definition(variable),
Self::TypeDefinition(def) => v.visit_type_definition(def),
Self::StraySemicolon(_) => v.visit_stray_semicolon(),
Self::Using(using) => v.visit_using(using),
Self::Annotation(annotation) => v.visit_annotation(annotation),
}
}
}
impl Visitable for Import {
fn visit<V>(&mut self, v: &mut V) -> Result<(), V::Error>
where
V: Visitor,
{
match self {
Self::Plain(import, loc) => v.visit_import_plain(*loc, import),
Self::GlobalSymbol(global, import_as, loc) => {
v.visit_import_global(*loc, global, import_as)
}
Self::Rename(from, imports, loc) => v.visit_import_renames(*loc, imports, from),
}
}
}
impl Visitable for ContractPart {
fn visit<V>(&mut self, v: &mut V) -> Result<(), V::Error>
where
V: Visitor,
{
match self {
Self::StructDefinition(structure) => v.visit_struct(structure),
Self::EventDefinition(event) => v.visit_event(event),
Self::ErrorDefinition(error) => v.visit_error(error),
Self::EnumDefinition(enumeration) => v.visit_enum(enumeration),
Self::VariableDefinition(variable) => v.visit_var_definition(variable),
Self::FunctionDefinition(function) => v.visit_function(function),
Self::TypeDefinition(def) => v.visit_type_definition(def),
Self::StraySemicolon(_) => v.visit_stray_semicolon(),
Self::Using(using) => v.visit_using(using),
Self::Annotation(annotation) => v.visit_annotation(annotation),
}
}
}
impl Visitable for Statement {
fn visit<V>(&mut self, v: &mut V) -> Result<(), V::Error>
where
V: Visitor,
{
match self {
Self::Block { loc, unchecked, statements } => {
v.visit_block(*loc, *unchecked, statements)
}
Self::Assembly { loc, dialect, block, flags } => {
v.visit_assembly(*loc, dialect, block, flags)
}
Self::Args(loc, args) => v.visit_args(*loc, args),
Self::If(loc, cond, if_branch, else_branch) => {
v.visit_if(*loc, cond, if_branch, else_branch, true)
}
Self::While(loc, cond, body) => v.visit_while(*loc, cond, body),
Self::Expression(loc, expr) => {
v.visit_expr(*loc, expr)?;
v.visit_stray_semicolon()
}
Self::VariableDefinition(loc, declaration, expr) => {
v.visit_var_definition_stmt(*loc, declaration, expr)
}
Self::For(loc, init, cond, update, body) => v.visit_for(*loc, init, cond, update, body),
Self::DoWhile(loc, body, cond) => v.visit_do_while(*loc, body, cond),
Self::Continue(loc) => v.visit_continue(*loc, true),
Self::Break(loc) => v.visit_break(*loc, true),
Self::Return(loc, expr) => v.visit_return(*loc, expr),
Self::Revert(loc, error, args) => v.visit_revert(*loc, error, args),
Self::RevertNamedArgs(loc, error, args) => v.visit_revert_named_args(*loc, error, args),
Self::Emit(loc, event) => v.visit_emit(*loc, event),
Self::Try(loc, expr, returns, clauses) => v.visit_try(*loc, expr, returns, clauses),
Self::Error(loc) => v.visit_parser_error(*loc),
}
}
}
impl Visitable for Loc {
fn visit<V>(&mut self, v: &mut V) -> Result<(), V::Error>
where
V: Visitor,
{
v.visit_source(*self)
}
}
impl Visitable for Expression {
fn visit<V>(&mut self, v: &mut V) -> Result<(), V::Error>
where
V: Visitor,
{
v.visit_expr(self.loc(), self)
}
}
impl Visitable for Identifier {
fn visit<V>(&mut self, v: &mut V) -> Result<(), V::Error>
where
V: Visitor,
{
v.visit_ident(self.loc, self)
}
}
impl Visitable for VariableDeclaration {
fn visit<V>(&mut self, v: &mut V) -> Result<(), V::Error>
where
V: Visitor,
{
v.visit_var_declaration(self)
}
}
impl Visitable for YulBlock {
fn visit<V>(&mut self, v: &mut V) -> Result<(), V::Error>
where
V: Visitor,
{
v.visit_yul_block(self.loc, self.statements.as_mut(), false)
}
}
impl Visitable for YulStatement {
fn visit<V>(&mut self, v: &mut V) -> Result<(), V::Error>
where
V: Visitor,
{
match self {
Self::Assign(loc, exprs, expr) => v.visit_yul_assignment(*loc, exprs, &mut Some(expr)),
Self::Block(block) => v.visit_yul_block(block.loc, block.statements.as_mut(), false),
Self::Break(loc) => v.visit_break(*loc, false),
Self::Continue(loc) => v.visit_continue(*loc, false),
Self::For(stmt) => v.visit_yul_for(stmt),
Self::FunctionCall(stmt) => v.visit_yul_function_call(stmt),
Self::FunctionDefinition(stmt) => v.visit_yul_fun_def(stmt),
Self::If(loc, expr, block) => v.visit_yul_if(*loc, expr, block),
Self::Leave(loc) => v.visit_yul_leave(*loc),
Self::Switch(stmt) => v.visit_yul_switch(stmt),
Self::VariableDeclaration(loc, idents, expr) => {
v.visit_yul_var_declaration(*loc, idents, expr)
}
Self::Error(loc) => v.visit_parser_error(*loc),
}
}
}
macro_rules! impl_visitable {
($type:ty, $func:ident) => {
impl Visitable for $type {
fn visit<V>(&mut self, v: &mut V) -> Result<(), V::Error>
where
V: Visitor,
{
v.$func(self)
}
}
};
}
impl_visitable!(SourceUnit, visit_source_unit);
impl_visitable!(FunctionAttribute, visit_function_attribute);
impl_visitable!(VariableAttribute, visit_var_attribute);
impl_visitable!(Parameter, visit_parameter);
impl_visitable!(Base, visit_base);
impl_visitable!(EventParameter, visit_event_parameter);
impl_visitable!(ErrorParameter, visit_error_parameter);
impl_visitable!(IdentifierPath, visit_ident_path);
impl_visitable!(YulExpression, visit_yul_expr);
impl_visitable!(YulTypedIdentifier, visit_yul_typed_ident);
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/doc/src/solang_ext/mod.rs | crates/doc/src/solang_ext/mod.rs | //! Extension traits and modules to the [`solang_parser`] crate.
/// Same as [`solang_parser::pt`], but with the patched `CodeLocation`.
pub mod pt {
#[doc(no_inline)]
pub use super::loc::CodeLocationExt as CodeLocation;
#[doc(no_inline)]
pub use solang_parser::pt::{
Annotation, Base, CatchClause, Comment, ContractDefinition, ContractPart, ContractTy,
EnumDefinition, ErrorDefinition, ErrorParameter, EventDefinition, EventParameter,
Expression, FunctionAttribute, FunctionDefinition, FunctionTy, HexLiteral, Identifier,
IdentifierPath, Import, ImportPath, Loc, Mutability, NamedArgument, OptionalCodeLocation,
Parameter, ParameterList, PragmaDirective, SourceUnit, SourceUnitPart, Statement,
StorageLocation, StringLiteral, StructDefinition, Type, TypeDefinition,
UserDefinedOperator, Using, UsingFunction, UsingList, VariableAttribute,
VariableDeclaration, VariableDefinition, Visibility, YulBlock, YulExpression, YulFor,
YulFunctionCall, YulFunctionDefinition, YulStatement, YulSwitch, YulSwitchOptions,
YulTypedIdentifier,
};
}
mod ast_eq;
mod loc;
mod safe_unwrap;
mod visit;
pub use ast_eq::AstEq;
pub use loc::CodeLocationExt;
pub use safe_unwrap::SafeUnwrap;
pub use visit::{Visitable, Visitor};
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/doc/src/solang_ext/safe_unwrap.rs | crates/doc/src/solang_ext/safe_unwrap.rs | use solang_parser::pt;
/// Trait implemented to unwrap optional parse tree items initially introduced in
/// [hyperledger/solang#1068].
///
/// Note that the methods of this trait should only be used on parse tree items' fields, like
/// [pt::VariableDefinition] or [pt::EventDefinition], where the `name` field is `None` only when an
/// error occurred during parsing.
///
/// [hyperledger/solang#1068]: https://github.com/hyperledger/solang/pull/1068
pub trait SafeUnwrap<T> {
/// See [SafeUnwrap].
fn safe_unwrap(&self) -> &T;
/// See [SafeUnwrap].
fn safe_unwrap_mut(&mut self) -> &mut T;
}
#[inline(never)]
#[cold]
#[track_caller]
fn invalid() -> ! {
panic!("invalid parse tree")
}
macro_rules! impl_ {
($($t:ty),+ $(,)?) => {
$(
impl SafeUnwrap<$t> for Option<$t> {
#[inline]
#[track_caller]
fn safe_unwrap(&self) -> &$t {
match *self {
Some(ref x) => x,
None => invalid(),
}
}
#[inline]
#[track_caller]
fn safe_unwrap_mut(&mut self) -> &mut $t {
match *self {
Some(ref mut x) => x,
None => invalid(),
}
}
}
)+
};
}
impl_!(pt::Identifier, pt::StringLiteral);
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/doc/src/solang_ext/ast_eq.rs | crates/doc/src/solang_ext/ast_eq.rs | use alloy_primitives::{Address, I256, U256};
use solang_parser::pt::*;
use std::str::FromStr;
/// Helper to convert a string number into a comparable one
fn to_num(string: &str) -> I256 {
if string.is_empty() {
return I256::ZERO;
}
string.replace('_', "").trim().parse().unwrap()
}
/// Helper to convert the fractional part of a number into a comparable one.
/// This will reverse the number so that 0's can be ignored
fn to_num_reversed(string: &str) -> U256 {
if string.is_empty() {
return U256::from(0);
}
string.replace('_', "").trim().chars().rev().collect::<String>().parse().unwrap()
}
/// Helper to filter [ParameterList] to omit empty
/// parameters
fn filter_params(list: &ParameterList) -> ParameterList {
list.iter().filter(|(_, param)| param.is_some()).cloned().collect::<Vec<_>>()
}
/// Check if two ParseTrees are equal ignoring location information or ordering if ordering does
/// not matter
pub trait AstEq {
fn ast_eq(&self, other: &Self) -> bool;
}
impl AstEq for Loc {
fn ast_eq(&self, _other: &Self) -> bool {
true
}
}
impl AstEq for IdentifierPath {
fn ast_eq(&self, other: &Self) -> bool {
self.identifiers.ast_eq(&other.identifiers)
}
}
impl AstEq for SourceUnit {
fn ast_eq(&self, other: &Self) -> bool {
self.0.ast_eq(&other.0)
}
}
impl AstEq for VariableDefinition {
fn ast_eq(&self, other: &Self) -> bool {
let sorted_attrs = |def: &Self| {
let mut attrs = def.attrs.clone();
attrs.sort();
attrs
};
self.ty.ast_eq(&other.ty)
&& self.name.ast_eq(&other.name)
&& self.initializer.ast_eq(&other.initializer)
&& sorted_attrs(self).ast_eq(&sorted_attrs(other))
}
}
impl AstEq for FunctionDefinition {
fn ast_eq(&self, other: &Self) -> bool {
// attributes
let sorted_attrs = |def: &Self| {
let mut attrs = def.attributes.clone();
attrs.sort();
attrs
};
// params
let left_params = filter_params(&self.params);
let right_params = filter_params(&other.params);
let left_returns = filter_params(&self.returns);
let right_returns = filter_params(&other.returns);
self.ty.ast_eq(&other.ty)
&& self.name.ast_eq(&other.name)
&& left_params.ast_eq(&right_params)
&& self.return_not_returns.ast_eq(&other.return_not_returns)
&& left_returns.ast_eq(&right_returns)
&& self.body.ast_eq(&other.body)
&& sorted_attrs(self).ast_eq(&sorted_attrs(other))
}
}
impl AstEq for Base {
fn ast_eq(&self, other: &Self) -> bool {
self.name.ast_eq(&other.name)
&& self.args.clone().unwrap_or_default().ast_eq(&other.args.clone().unwrap_or_default())
}
}
impl<T> AstEq for Vec<T>
where
T: AstEq,
{
fn ast_eq(&self, other: &Self) -> bool {
if self.len() != other.len() {
false
} else {
self.iter().zip(other.iter()).all(|(left, right)| left.ast_eq(right))
}
}
}
impl<T> AstEq for Option<T>
where
T: AstEq,
{
fn ast_eq(&self, other: &Self) -> bool {
match (self, other) {
(Some(left), Some(right)) => left.ast_eq(right),
(None, None) => true,
_ => false,
}
}
}
impl<T> AstEq for Box<T>
where
T: AstEq,
{
fn ast_eq(&self, other: &Self) -> bool {
T::ast_eq(self, other)
}
}
impl AstEq for () {
fn ast_eq(&self, _other: &Self) -> bool {
true
}
}
impl<T> AstEq for &T
where
T: AstEq,
{
fn ast_eq(&self, other: &Self) -> bool {
T::ast_eq(self, other)
}
}
impl AstEq for String {
fn ast_eq(&self, other: &Self) -> bool {
match (Address::from_str(self), Address::from_str(other)) {
(Ok(left), Ok(right)) => left == right,
_ => self == other,
}
}
}
macro_rules! ast_eq_field {
(#[ast_eq_use($convert_func:ident)] $field:ident) => {
$convert_func($field)
};
($field:ident) => {
$field
};
}
macro_rules! gen_ast_eq_enum {
($self:expr, $other:expr, $name:ident {
$($unit_variant:ident),* $(,)?
_
$($tuple_variant:ident ( $($(#[ast_eq_use($tuple_convert_func:ident)])? $tuple_field:ident),* $(,)? )),* $(,)?
_
$($struct_variant:ident { $($(#[ast_eq_use($struct_convert_func:ident)])? $struct_field:ident),* $(,)? }),* $(,)?
}) => {
match $self {
$($name::$unit_variant => gen_ast_eq_enum!($other, $name, $unit_variant),)*
$($name::$tuple_variant($($tuple_field),*) =>
gen_ast_eq_enum!($other, $name, $tuple_variant ($($(#[ast_eq_use($tuple_convert_func)])? $tuple_field),*)),)*
$($name::$struct_variant { $($struct_field),* } =>
gen_ast_eq_enum!($other, $name, $struct_variant {$($(#[ast_eq_use($struct_convert_func)])? $struct_field),*}),)*
}
};
($other:expr, $name:ident, $unit_variant:ident) => {
{
matches!($other, $name::$unit_variant)
}
};
($other:expr, $name:ident, $tuple_variant:ident ( $($(#[ast_eq_use($tuple_convert_func:ident)])? $tuple_field:ident),* $(,)? ) ) => {
{
let left = ($(ast_eq_field!($(#[ast_eq_use($tuple_convert_func)])? $tuple_field)),*);
if let $name::$tuple_variant($($tuple_field),*) = $other {
let right = ($(ast_eq_field!($(#[ast_eq_use($tuple_convert_func)])? $tuple_field)),*);
left.ast_eq(&right)
} else {
false
}
}
};
($other:expr, $name:ident, $struct_variant:ident { $($(#[ast_eq_use($struct_convert_func:ident)])? $struct_field:ident),* $(,)? } ) => {
{
let left = ($(ast_eq_field!($(#[ast_eq_use($struct_convert_func)])? $struct_field)),*);
if let $name::$struct_variant { $($struct_field),* } = $other {
let right = ($(ast_eq_field!($(#[ast_eq_use($struct_convert_func)])? $struct_field)),*);
left.ast_eq(&right)
} else {
false
}
}
};
}
macro_rules! wrap_in_box {
($stmt:expr, $loc:expr) => {
if !matches!(**$stmt, Statement::Block { .. }) {
Box::new(Statement::Block {
loc: $loc,
unchecked: false,
statements: vec![*$stmt.clone()],
})
} else {
$stmt.clone()
}
};
}
impl AstEq for Statement {
fn ast_eq(&self, other: &Self) -> bool {
match self {
Self::If(loc, expr, stmt1, stmt2) => {
#[expect(clippy::borrowed_box)]
let wrap_if = |stmt1: &Box<Self>, stmt2: &Option<Box<Self>>| {
(
wrap_in_box!(stmt1, *loc),
stmt2.as_ref().map(|stmt2| {
if matches!(**stmt2, Self::If(..)) {
stmt2.clone()
} else {
wrap_in_box!(stmt2, *loc)
}
}),
)
};
let (stmt1, stmt2) = wrap_if(stmt1, stmt2);
let left = (loc, expr, &stmt1, &stmt2);
if let Self::If(loc, expr, stmt1, stmt2) = other {
let (stmt1, stmt2) = wrap_if(stmt1, stmt2);
let right = (loc, expr, &stmt1, &stmt2);
left.ast_eq(&right)
} else {
false
}
}
Self::While(loc, expr, stmt1) => {
let stmt1 = wrap_in_box!(stmt1, *loc);
let left = (loc, expr, &stmt1);
if let Self::While(loc, expr, stmt1) = other {
let stmt1 = wrap_in_box!(stmt1, *loc);
let right = (loc, expr, &stmt1);
left.ast_eq(&right)
} else {
false
}
}
Self::DoWhile(loc, stmt1, expr) => {
let stmt1 = wrap_in_box!(stmt1, *loc);
let left = (loc, &stmt1, expr);
if let Self::DoWhile(loc, stmt1, expr) = other {
let stmt1 = wrap_in_box!(stmt1, *loc);
let right = (loc, &stmt1, expr);
left.ast_eq(&right)
} else {
false
}
}
Self::For(loc, stmt1, expr, stmt2, stmt3) => {
let stmt3 = stmt3.as_ref().map(|stmt3| wrap_in_box!(stmt3, *loc));
let left = (loc, stmt1, expr, stmt2, &stmt3);
if let Self::For(loc, stmt1, expr, stmt2, stmt3) = other {
let stmt3 = stmt3.as_ref().map(|stmt3| wrap_in_box!(stmt3, *loc));
let right = (loc, stmt1, expr, stmt2, &stmt3);
left.ast_eq(&right)
} else {
false
}
}
Self::Try(loc, expr, returns, catch) => {
let left_returns =
returns.as_ref().map(|(params, stmt)| (filter_params(params), stmt));
let left = (loc, expr, left_returns, catch);
if let Self::Try(loc, expr, returns, catch) = other {
let right_returns =
returns.as_ref().map(|(params, stmt)| (filter_params(params), stmt));
let right = (loc, expr, right_returns, catch);
left.ast_eq(&right)
} else {
false
}
}
_ => gen_ast_eq_enum!(self, other, Statement {
_
Args(loc, args),
Expression(loc, expr),
VariableDefinition(loc, decl, expr),
Continue(loc, ),
Break(loc, ),
Return(loc, expr),
Revert(loc, expr, expr2),
RevertNamedArgs(loc, expr, args),
Emit(loc, expr),
// provide overridden variants regardless
If(loc, expr, stmt1, stmt2),
While(loc, expr, stmt1),
DoWhile(loc, stmt1, expr),
For(loc, stmt1, expr, stmt2, stmt3),
Try(loc, expr, params, clause),
Error(loc)
_
Block {
loc,
unchecked,
statements,
},
Assembly {
loc,
dialect,
block,
flags,
},
}),
}
}
}
macro_rules! derive_ast_eq {
($name:ident) => {
impl AstEq for $name {
fn ast_eq(&self, other: &Self) -> bool {
self == other
}
}
};
(($($index:tt $gen:tt),*)) => {
impl < $( $gen ),* > AstEq for ($($gen,)*) where $($gen: AstEq),* {
fn ast_eq(&self, other: &Self) -> bool {
$(
if !self.$index.ast_eq(&other.$index) {
return false
}
)*
true
}
}
};
(struct $name:ident { $($field:ident),* $(,)? }) => {
impl AstEq for $name {
fn ast_eq(&self, other: &Self) -> bool {
let $name { $($field),* } = self;
let left = ($($field),*);
let $name { $($field),* } = other;
let right = ($($field),*);
left.ast_eq(&right)
}
}
};
(enum $name:ident {
$($unit_variant:ident),* $(,)?
_
$($tuple_variant:ident ( $($(#[ast_eq_use($tuple_convert_func:ident)])? $tuple_field:ident),* $(,)? )),* $(,)?
_
$($struct_variant:ident { $($(#[ast_eq_use($struct_convert_func:ident)])? $struct_field:ident),* $(,)? }),* $(,)?
}) => {
impl AstEq for $name {
fn ast_eq(&self, other: &Self) -> bool {
gen_ast_eq_enum!(self, other, $name {
$($unit_variant),*
_
$($tuple_variant ( $($(#[ast_eq_use($tuple_convert_func)])? $tuple_field),* )),*
_
$($struct_variant { $($(#[ast_eq_use($struct_convert_func)])? $struct_field),* }),*
})
}
}
}
}
derive_ast_eq! { (0 A) }
derive_ast_eq! { (0 A, 1 B) }
derive_ast_eq! { (0 A, 1 B, 2 C) }
derive_ast_eq! { (0 A, 1 B, 2 C, 3 D) }
derive_ast_eq! { (0 A, 1 B, 2 C, 3 D, 4 E) }
derive_ast_eq! { (0 A, 1 B, 2 C, 3 D, 4 E, 5 F) }
derive_ast_eq! { (0 A, 1 B, 2 C, 3 D, 4 E, 5 F, 6 G) }
derive_ast_eq! { bool }
derive_ast_eq! { u8 }
derive_ast_eq! { u16 }
derive_ast_eq! { I256 }
derive_ast_eq! { U256 }
derive_ast_eq! { struct Identifier { loc, name } }
derive_ast_eq! { struct HexLiteral { loc, hex } }
derive_ast_eq! { struct StringLiteral { loc, unicode, string } }
derive_ast_eq! { struct Parameter { loc, annotation, ty, storage, name } }
derive_ast_eq! { struct NamedArgument { loc, name, expr } }
derive_ast_eq! { struct YulBlock { loc, statements } }
derive_ast_eq! { struct YulFunctionCall { loc, id, arguments } }
derive_ast_eq! { struct YulFunctionDefinition { loc, id, params, returns, body } }
derive_ast_eq! { struct YulSwitch { loc, condition, cases, default } }
derive_ast_eq! { struct YulFor {
loc,
init_block,
condition,
post_block,
execution_block,
}}
derive_ast_eq! { struct YulTypedIdentifier { loc, id, ty } }
derive_ast_eq! { struct VariableDeclaration { loc, ty, storage, name } }
derive_ast_eq! { struct Using { loc, list, ty, global } }
derive_ast_eq! { struct UsingFunction { loc, path, oper } }
derive_ast_eq! { struct TypeDefinition { loc, name, ty } }
derive_ast_eq! { struct ContractDefinition { loc, ty, name, base, layout, parts } }
derive_ast_eq! { struct EventParameter { loc, ty, indexed, name } }
derive_ast_eq! { struct ErrorParameter { loc, ty, name } }
derive_ast_eq! { struct EventDefinition { loc, name, fields, anonymous } }
derive_ast_eq! { struct ErrorDefinition { loc, keyword, name, fields } }
derive_ast_eq! { struct StructDefinition { loc, name, fields } }
derive_ast_eq! { struct EnumDefinition { loc, name, values } }
derive_ast_eq! { struct Annotation { loc, id, value } }
derive_ast_eq! { enum PragmaDirective {
_
Identifier(loc, id1, id2),
StringLiteral(loc, id, lit),
Version(loc, id, version),
_
}}
derive_ast_eq! { enum UsingList {
Error,
_
Library(expr),
Functions(exprs),
_
}}
derive_ast_eq! { enum UserDefinedOperator {
BitwiseAnd,
BitwiseNot,
Negate,
BitwiseOr,
BitwiseXor,
Add,
Divide,
Modulo,
Multiply,
Subtract,
Equal,
More,
MoreEqual,
Less,
LessEqual,
NotEqual,
_
_
}}
derive_ast_eq! { enum Visibility {
_
External(loc),
Public(loc),
Internal(loc),
Private(loc),
_
}}
derive_ast_eq! { enum Mutability {
_
Pure(loc),
View(loc),
Constant(loc),
Payable(loc),
_
}}
derive_ast_eq! { enum FunctionAttribute {
_
Mutability(muta),
Visibility(visi),
Virtual(loc),
Immutable(loc),
Override(loc, idents),
BaseOrModifier(loc, base),
Error(loc),
_
}}
derive_ast_eq! { enum StorageLocation {
_
Memory(loc),
Storage(loc),
Calldata(loc),
Transient(loc),
_
}}
derive_ast_eq! { enum Type {
Address,
AddressPayable,
Payable,
Bool,
Rational,
DynamicBytes,
String,
_
Int(int),
Uint(int),
Bytes(int),
_
Mapping{ loc, key, key_name, value, value_name },
Function { params, attributes, returns },
}}
derive_ast_eq! { enum Expression {
_
PostIncrement(loc, expr1),
PostDecrement(loc, expr1),
New(loc, expr1),
ArraySubscript(loc, expr1, expr2),
ArraySlice(
loc,
expr1,
expr2,
expr3,
),
MemberAccess(loc, expr1, ident1),
FunctionCall(loc, expr1, exprs1),
FunctionCallBlock(loc, expr1, stmt),
NamedFunctionCall(loc, expr1, args),
Not(loc, expr1),
BitwiseNot(loc, expr1),
Delete(loc, expr1),
PreIncrement(loc, expr1),
PreDecrement(loc, expr1),
UnaryPlus(loc, expr1),
Negate(loc, expr1),
Power(loc, expr1, expr2),
Multiply(loc, expr1, expr2),
Divide(loc, expr1, expr2),
Modulo(loc, expr1, expr2),
Add(loc, expr1, expr2),
Subtract(loc, expr1, expr2),
ShiftLeft(loc, expr1, expr2),
ShiftRight(loc, expr1, expr2),
BitwiseAnd(loc, expr1, expr2),
BitwiseXor(loc, expr1, expr2),
BitwiseOr(loc, expr1, expr2),
Less(loc, expr1, expr2),
More(loc, expr1, expr2),
LessEqual(loc, expr1, expr2),
MoreEqual(loc, expr1, expr2),
Equal(loc, expr1, expr2),
NotEqual(loc, expr1, expr2),
And(loc, expr1, expr2),
Or(loc, expr1, expr2),
ConditionalOperator(loc, expr1, expr2, expr3),
Assign(loc, expr1, expr2),
AssignOr(loc, expr1, expr2),
AssignAnd(loc, expr1, expr2),
AssignXor(loc, expr1, expr2),
AssignShiftLeft(loc, expr1, expr2),
AssignShiftRight(loc, expr1, expr2),
AssignAdd(loc, expr1, expr2),
AssignSubtract(loc, expr1, expr2),
AssignMultiply(loc, expr1, expr2),
AssignDivide(loc, expr1, expr2),
AssignModulo(loc, expr1, expr2),
BoolLiteral(loc, bool1),
NumberLiteral(loc, #[ast_eq_use(to_num)] str1, #[ast_eq_use(to_num)] str2, unit),
RationalNumberLiteral(
loc,
#[ast_eq_use(to_num)] str1,
#[ast_eq_use(to_num_reversed)] str2,
#[ast_eq_use(to_num)] str3,
unit
),
HexNumberLiteral(loc, str1, unit),
StringLiteral(strs1),
Type(loc, ty1),
HexLiteral(hexs1),
AddressLiteral(loc, str1),
Variable(ident1),
List(loc, params1),
ArrayLiteral(loc, exprs1),
Parenthesis(loc, expr)
_
}}
derive_ast_eq! { enum CatchClause {
_
Simple(param, ident, stmt),
Named(loc, ident, param, stmt),
_
}}
derive_ast_eq! { enum YulStatement {
_
Assign(loc, exprs, expr),
VariableDeclaration(loc, idents, expr),
If(loc, expr, block),
For(yul_for),
Switch(switch),
Leave(loc),
Break(loc),
Continue(loc),
Block(block),
FunctionDefinition(def),
FunctionCall(func),
Error(loc),
_
}}
derive_ast_eq! { enum YulExpression {
_
BoolLiteral(loc, boo, ident),
NumberLiteral(loc, string1, string2, ident),
HexNumberLiteral(loc, string, ident),
HexStringLiteral(hex, ident),
StringLiteral(string, ident),
Variable(ident),
FunctionCall(func),
SuffixAccess(loc, expr, ident),
_
}}
derive_ast_eq! { enum YulSwitchOptions {
_
Case(loc, expr, block),
Default(loc, block),
_
}}
derive_ast_eq! { enum SourceUnitPart {
_
ContractDefinition(def),
PragmaDirective(pragma),
ImportDirective(import),
EnumDefinition(def),
StructDefinition(def),
EventDefinition(def),
ErrorDefinition(def),
FunctionDefinition(def),
VariableDefinition(def),
TypeDefinition(def),
Using(using),
StraySemicolon(loc),
Annotation(annotation),
_
}}
derive_ast_eq! { enum ImportPath {
_
Filename(lit),
Path(path),
_
}}
derive_ast_eq! { enum Import {
_
Plain(string, loc),
GlobalSymbol(string, ident, loc),
Rename(string, idents, loc),
_
}}
derive_ast_eq! { enum FunctionTy {
Constructor,
Function,
Fallback,
Receive,
Modifier,
_
_
}}
derive_ast_eq! { enum ContractPart {
_
StructDefinition(def),
EventDefinition(def),
EnumDefinition(def),
ErrorDefinition(def),
VariableDefinition(def),
FunctionDefinition(def),
TypeDefinition(def),
StraySemicolon(loc),
Using(using),
Annotation(annotation),
_
}}
derive_ast_eq! { enum ContractTy {
_
Abstract(loc),
Contract(loc),
Interface(loc),
Library(loc),
_
}}
derive_ast_eq! { enum VariableAttribute {
_
Visibility(visi),
Constant(loc),
Immutable(loc),
Override(loc, idents),
StorageType(st),
StorageLocation(st),
_
}}
// Who cares
impl AstEq for StorageType {
fn ast_eq(&self, _other: &Self) -> bool {
true
}
}
impl AstEq for VersionComparator {
fn ast_eq(&self, _other: &Self) -> bool {
true
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/doc/src/parser/error.rs | crates/doc/src/parser/error.rs | use solar::interface::diagnostics::EmittedDiagnostics;
use thiserror::Error;
/// The parser error.
#[derive(Debug, Error)]
#[error(transparent)]
pub enum ParserError {
/// Formatter error.
#[error(transparent)]
Formatter(EmittedDiagnostics),
/// Internal parser error.
#[error(transparent)]
Internal(#[from] eyre::Error),
}
/// The parser result.
pub type ParserResult<T, E = ParserError> = std::result::Result<T, E>;
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/doc/src/parser/mod.rs | crates/doc/src/parser/mod.rs | //! The parser module.
use crate::solang_ext::{Visitable, Visitor};
use itertools::Itertools;
use solang_parser::{
doccomment::{DocComment, parse_doccomments},
pt::{
Comment as SolangComment, EnumDefinition, ErrorDefinition, EventDefinition,
FunctionDefinition, Identifier, Loc, SourceUnit, SourceUnitPart, StructDefinition,
TypeDefinition, VariableDefinition,
},
};
/// Parser error.
pub mod error;
use error::{ParserError, ParserResult};
/// Parser item.
mod item;
pub use item::{ParseItem, ParseSource};
/// Doc comment.
mod comment;
pub use comment::{Comment, CommentTag, Comments, CommentsRef};
/// The documentation parser. This type implements a [Visitor] trait.
///
/// While walking the parse tree, [Parser] will collect relevant source items and corresponding
/// doc comments. The resulting [ParseItem]s can be accessed by calling [Parser::items].
#[derive(Debug, Default)]
pub struct Parser {
/// Initial comments from solang parser.
comments: Vec<SolangComment>,
/// Parser context.
context: ParserContext,
/// Parsed results.
items: Vec<ParseItem>,
/// Source file.
source: String,
/// Tab width used to format code.
tab_width: usize,
}
/// [Parser] context.
#[derive(Debug, Default)]
struct ParserContext {
/// Current visited parent.
parent: Option<ParseItem>,
/// Current start pointer for parsing doc comments.
doc_start_loc: usize,
}
impl Parser {
/// Create a new instance of [Parser].
pub fn new(comments: Vec<SolangComment>, source: String, tab_width: usize) -> Self {
Self { comments, source, tab_width, ..Default::default() }
}
/// Return the parsed items. Consumes the parser.
pub fn items(self) -> Vec<ParseItem> {
self.items
}
/// Visit the children elements with parent context.
/// This function memoizes the previous parent, sets the context
/// to a new one and invokes a visit function. The context will be reset
/// to the previous parent at the end of the function.
fn with_parent(
&mut self,
mut parent: ParseItem,
mut visit: impl FnMut(&mut Self) -> ParserResult<()>,
) -> ParserResult<ParseItem> {
let curr = self.context.parent.take();
self.context.parent = Some(parent);
visit(self)?;
parent = self.context.parent.take().unwrap();
self.context.parent = curr;
Ok(parent)
}
/// Adds a child element to the parent item if it exists.
/// Otherwise the element will be added to a top-level items collection.
/// Moves the doc comment pointer to the end location of the child element.
fn add_element_to_parent(&mut self, source: ParseSource, loc: Loc) -> ParserResult<()> {
let child = self.new_item(source, loc.start())?;
if let Some(parent) = self.context.parent.as_mut() {
parent.children.push(child);
} else {
self.items.push(child);
}
self.context.doc_start_loc = loc.end();
Ok(())
}
/// Create new [ParseItem] with comments and formatted code.
fn new_item(&mut self, source: ParseSource, loc_start: usize) -> ParserResult<ParseItem> {
let docs = self.parse_docs(loc_start)?;
Ok(ParseItem::new(source).with_comments(docs).with_code(&self.source, self.tab_width))
}
/// Parse the doc comments from the current start location.
fn parse_docs(&mut self, end: usize) -> ParserResult<Comments> {
self.parse_docs_range(self.context.doc_start_loc, end)
}
/// Parse doc comments from the within specified range.
fn parse_docs_range(&mut self, start: usize, end: usize) -> ParserResult<Comments> {
let mut res = vec![];
for comment in parse_doccomments(&self.comments, start, end) {
match comment {
DocComment::Line { comment } => res.push(comment),
DocComment::Block { comments } => res.extend(comments),
}
}
// Filter out `@solidity` and empty tags
// See https://docs.soliditylang.org/en/v0.8.17/assembly.html#memory-safety
let res = res
.into_iter()
.filter(|c| c.tag.trim() != "solidity" && !c.tag.trim().is_empty())
.collect_vec();
Ok(res.into())
}
}
impl Visitor for Parser {
type Error = ParserError;
fn visit_source_unit(&mut self, source_unit: &mut SourceUnit) -> ParserResult<()> {
for source in &mut source_unit.0 {
match source {
SourceUnitPart::ContractDefinition(def) => {
// Create new contract parse item.
let contract =
self.new_item(ParseSource::Contract(def.clone()), def.loc.start())?;
// Move the doc pointer to the contract location start.
self.context.doc_start_loc = def.loc.start();
// Parse child elements with current contract as parent
let contract = self.with_parent(contract, |doc| {
def.parts
.iter_mut()
.map(|d| d.visit(doc))
.collect::<ParserResult<Vec<_>>>()?;
Ok(())
})?;
// Move the doc pointer to the contract location end.
self.context.doc_start_loc = def.loc.end();
// Add contract to the parsed items.
self.items.push(contract);
}
SourceUnitPart::FunctionDefinition(func) => self.visit_function(func)?,
SourceUnitPart::EventDefinition(event) => self.visit_event(event)?,
SourceUnitPart::ErrorDefinition(error) => self.visit_error(error)?,
SourceUnitPart::StructDefinition(structure) => self.visit_struct(structure)?,
SourceUnitPart::EnumDefinition(enumerable) => self.visit_enum(enumerable)?,
SourceUnitPart::VariableDefinition(var) => self.visit_var_definition(var)?,
SourceUnitPart::TypeDefinition(ty) => self.visit_type_definition(ty)?,
_ => {}
};
}
Ok(())
}
fn visit_enum(&mut self, enumerable: &mut EnumDefinition) -> ParserResult<()> {
self.add_element_to_parent(ParseSource::Enum(enumerable.clone()), enumerable.loc)
}
fn visit_var_definition(&mut self, var: &mut VariableDefinition) -> ParserResult<()> {
self.add_element_to_parent(ParseSource::Variable(var.clone()), var.loc)
}
fn visit_function(&mut self, func: &mut FunctionDefinition) -> ParserResult<()> {
// If the function parameter doesn't have a name, try to set it with
// `@custom:name` tag if any was provided
let mut start_loc = func.loc.start();
for (loc, param) in &mut func.params {
if let Some(param) = param
&& param.name.is_none()
{
let docs = self.parse_docs_range(start_loc, loc.end())?;
let name_tag = docs.iter().find(|c| c.tag == CommentTag::Custom("name".to_owned()));
if let Some(name_tag) = name_tag
&& let Some(name) = name_tag.value.trim().split(' ').next()
{
param.name = Some(Identifier { loc: Loc::Implicit, name: name.to_owned() })
}
}
start_loc = loc.end();
}
self.add_element_to_parent(ParseSource::Function(func.clone()), func.loc)
}
fn visit_struct(&mut self, structure: &mut StructDefinition) -> ParserResult<()> {
self.add_element_to_parent(ParseSource::Struct(structure.clone()), structure.loc)
}
fn visit_event(&mut self, event: &mut EventDefinition) -> ParserResult<()> {
self.add_element_to_parent(ParseSource::Event(event.clone()), event.loc)
}
fn visit_error(&mut self, error: &mut ErrorDefinition) -> ParserResult<()> {
self.add_element_to_parent(ParseSource::Error(error.clone()), error.loc)
}
fn visit_type_definition(&mut self, def: &mut TypeDefinition) -> ParserResult<()> {
self.add_element_to_parent(ParseSource::Type(def.clone()), def.loc)
}
}
#[cfg(test)]
mod tests {
use super::*;
use solang_parser::parse;
fn parse_source(src: &str) -> Vec<ParseItem> {
let (mut source, comments) = parse(src, 0).expect("failed to parse source");
let mut doc = Parser::new(comments, src.to_owned(), 4);
source.visit(&mut doc).expect("failed to visit source");
doc.items()
}
macro_rules! test_single_unit {
($test:ident, $src:expr, $variant:ident $identity:expr) => {
#[test]
fn $test() {
let items = parse_source($src);
assert_eq!(items.len(), 1);
let item = items.first().unwrap();
assert!(item.comments.is_empty());
assert!(item.children.is_empty());
assert_eq!(item.source.ident(), $identity);
assert!(matches!(item.source, ParseSource::$variant(_)));
}
};
}
#[test]
fn empty_source() {
assert_eq!(parse_source(""), vec![]);
}
test_single_unit!(single_function, "function someFn() { }", Function "someFn");
test_single_unit!(single_variable, "uint256 constant VALUE = 0;", Variable "VALUE");
test_single_unit!(single_event, "event SomeEvent();", Event "SomeEvent");
test_single_unit!(single_error, "error SomeError();", Error "SomeError");
test_single_unit!(single_struct, "struct SomeStruct { }", Struct "SomeStruct");
test_single_unit!(single_enum, "enum SomeEnum { SOME, OTHER }", Enum "SomeEnum");
test_single_unit!(single_contract, "contract Contract { }", Contract "Contract");
#[test]
fn multiple_shallow_contracts() {
let items = parse_source(
r"
contract A { }
contract B { }
contract C { }
",
);
assert_eq!(items.len(), 3);
let first_item = items.first().unwrap();
assert!(matches!(first_item.source, ParseSource::Contract(_)));
assert_eq!(first_item.source.ident(), "A");
let first_item = items.get(1).unwrap();
assert!(matches!(first_item.source, ParseSource::Contract(_)));
assert_eq!(first_item.source.ident(), "B");
let first_item = items.get(2).unwrap();
assert!(matches!(first_item.source, ParseSource::Contract(_)));
assert_eq!(first_item.source.ident(), "C");
}
#[test]
fn contract_with_children_items() {
let items = parse_source(
r"
event TopLevelEvent();
contract Contract {
event ContractEvent();
error ContractError();
struct ContractStruct { }
enum ContractEnum { }
uint256 constant CONTRACT_CONSTANT = 0;
bool contractVar;
function contractFunction(uint256) external returns (uint256) {
bool localVar; // must be ignored
}
}
",
);
assert_eq!(items.len(), 2);
let event = items.first().unwrap();
assert!(event.comments.is_empty());
assert!(event.children.is_empty());
assert_eq!(event.source.ident(), "TopLevelEvent");
assert!(matches!(event.source, ParseSource::Event(_)));
let contract = items.get(1).unwrap();
assert!(contract.comments.is_empty());
assert_eq!(contract.children.len(), 7);
assert_eq!(contract.source.ident(), "Contract");
assert!(matches!(contract.source, ParseSource::Contract(_)));
assert!(contract.children.iter().all(|ch| ch.children.is_empty()));
assert!(contract.children.iter().all(|ch| ch.comments.is_empty()));
}
#[test]
fn contract_with_fallback() {
let items = parse_source(
r"
contract Contract {
fallback() external payable {}
}
",
);
assert_eq!(items.len(), 1);
let contract = items.first().unwrap();
assert!(contract.comments.is_empty());
assert_eq!(contract.children.len(), 1);
assert_eq!(contract.source.ident(), "Contract");
assert!(matches!(contract.source, ParseSource::Contract(_)));
let fallback = contract.children.first().unwrap();
assert_eq!(fallback.source.ident(), "fallback");
assert!(matches!(fallback.source, ParseSource::Function(_)));
}
#[test]
fn contract_with_doc_comments() {
let items = parse_source(
r"
pragma solidity ^0.8.19;
/// @name Test
/// no tag
///@notice Cool contract
/// @ dev This is not a dev tag
/**
* @dev line one
* line 2
*/
contract Test {
/** my function
i like whitespace
*/
function test() {}
}
",
);
assert_eq!(items.len(), 1);
let contract = items.first().unwrap();
assert_eq!(contract.comments.len(), 2);
assert_eq!(
*contract.comments.first().unwrap(),
Comment::new(CommentTag::Notice, "Cool contract".to_owned())
);
assert_eq!(
*contract.comments.get(1).unwrap(),
Comment::new(CommentTag::Dev, "line one\nline 2".to_owned())
);
let function = contract.children.first().unwrap();
assert_eq!(
*function.comments.first().unwrap(),
Comment::new(CommentTag::Notice, "my function\ni like whitespace".to_owned())
);
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/doc/src/parser/item.rs | crates/doc/src/parser/item.rs | use crate::{Comments, helpers::function_signature, solang_ext::SafeUnwrap};
use solang_parser::pt::{
ContractDefinition, ContractTy, EnumDefinition, ErrorDefinition, EventDefinition,
FunctionDefinition, StructDefinition, TypeDefinition, VariableDefinition,
};
use std::ops::Range;
/// The parsed item.
#[derive(Debug, PartialEq)]
pub struct ParseItem {
/// The parse tree source.
pub source: ParseSource,
/// Item comments.
pub comments: Comments,
/// Children items.
pub children: Vec<Self>,
/// Formatted code string.
pub code: String,
}
/// Defines a method that filters [ParseItem]'s children and returns the source pt token of the
/// children matching the target variant as well as its comments.
/// Returns [Option::None] if no children matching the variant are found.
macro_rules! filter_children_fn {
($vis:vis fn $name:ident(&self, $variant:ident) -> $ret:ty) => {
/// Filter children items for [ParseSource::$variant] variants.
$vis fn $name(&self) -> Option<Vec<(&$ret, &Comments, &String)>> {
let items = self.children.iter().filter_map(|item| match item.source {
ParseSource::$variant(ref inner) => Some((inner, &item.comments, &item.code)),
_ => None,
});
let items = items.collect::<Vec<_>>();
if !items.is_empty() {
Some(items)
} else {
None
}
}
};
}
/// Defines a method that returns [ParseSource] inner element if it matches
/// the variant
macro_rules! as_inner_source {
($vis:vis fn $name:ident(&self, $variant:ident) -> $ret:ty) => {
/// Return inner element if it matches $variant.
/// If the element doesn't match, returns [None]
$vis fn $name(&self) -> Option<&$ret> {
match self.source {
ParseSource::$variant(ref inner) => Some(inner),
_ => None
}
}
};
}
impl ParseItem {
/// Create new instance of [ParseItem].
pub fn new(source: ParseSource) -> Self {
Self {
source,
comments: Default::default(),
children: Default::default(),
code: Default::default(),
}
}
/// Set comments on the [ParseItem].
pub fn with_comments(mut self, comments: Comments) -> Self {
self.comments = comments;
self
}
/// Set children on the [ParseItem].
pub fn with_children(mut self, children: Vec<Self>) -> Self {
self.children = children;
self
}
/// Set the source code of this [ParseItem].
///
/// The parameter should be the full source file where this parse item originated from.
pub fn with_code(mut self, source: &str, tab_width: usize) -> Self {
let mut code = source[self.source.range()].to_string();
// Special function case, add `;` at the end of definition.
if let ParseSource::Function(_) | ParseSource::Error(_) | ParseSource::Event(_) =
self.source
{
code.push(';');
}
// Remove extra indent from source lines.
let prefix = &" ".repeat(tab_width);
self.code = code
.lines()
.map(|line| line.strip_prefix(prefix).unwrap_or(line))
.collect::<Vec<_>>()
.join("\n");
self
}
/// Format the item's filename.
pub fn filename(&self) -> String {
let prefix = match self.source {
ParseSource::Contract(ref c) => match c.ty {
ContractTy::Contract(_) => "contract",
ContractTy::Abstract(_) => "abstract",
ContractTy::Interface(_) => "interface",
ContractTy::Library(_) => "library",
},
ParseSource::Function(_) => "function",
ParseSource::Variable(_) => "variable",
ParseSource::Event(_) => "event",
ParseSource::Error(_) => "error",
ParseSource::Struct(_) => "struct",
ParseSource::Enum(_) => "enum",
ParseSource::Type(_) => "type",
};
let ident = self.source.ident();
format!("{prefix}.{ident}.md")
}
filter_children_fn!(pub fn variables(&self, Variable) -> VariableDefinition);
filter_children_fn!(pub fn functions(&self, Function) -> FunctionDefinition);
filter_children_fn!(pub fn events(&self, Event) -> EventDefinition);
filter_children_fn!(pub fn errors(&self, Error) -> ErrorDefinition);
filter_children_fn!(pub fn structs(&self, Struct) -> StructDefinition);
filter_children_fn!(pub fn enums(&self, Enum) -> EnumDefinition);
as_inner_source!(pub fn as_contract(&self, Contract) -> ContractDefinition);
as_inner_source!(pub fn as_variable(&self, Variable) -> VariableDefinition);
as_inner_source!(pub fn as_function(&self, Function) -> FunctionDefinition);
}
/// A wrapper type around pt token.
#[derive(Clone, Debug, PartialEq, Eq)]
#[allow(clippy::large_enum_variant)]
pub enum ParseSource {
/// Source contract definition.
Contract(Box<ContractDefinition>),
/// Source function definition.
Function(FunctionDefinition),
/// Source variable definition.
Variable(VariableDefinition),
/// Source event definition.
Event(EventDefinition),
/// Source error definition.
Error(ErrorDefinition),
/// Source struct definition.
Struct(StructDefinition),
/// Source enum definition.
Enum(EnumDefinition),
/// Source type definition.
Type(TypeDefinition),
}
impl ParseSource {
/// Get the identity of the source
pub fn ident(&self) -> String {
match self {
Self::Contract(contract) => contract.name.safe_unwrap().name.to_owned(),
Self::Variable(var) => var.name.safe_unwrap().name.to_owned(),
Self::Event(event) => event.name.safe_unwrap().name.to_owned(),
Self::Error(error) => error.name.safe_unwrap().name.to_owned(),
Self::Struct(structure) => structure.name.safe_unwrap().name.to_owned(),
Self::Enum(enumerable) => enumerable.name.safe_unwrap().name.to_owned(),
Self::Function(func) => {
func.name.as_ref().map_or(func.ty.to_string(), |n| n.name.to_owned())
}
Self::Type(ty) => ty.name.name.to_owned(),
}
}
/// Get the signature of the source (for functions, includes parameter types)
pub fn signature(&self) -> String {
match self {
Self::Function(func) => function_signature(func),
_ => self.ident(),
}
}
/// Get the range of this item in the source file.
pub fn range(&self) -> Range<usize> {
match self {
Self::Contract(contract) => contract.loc,
Self::Variable(var) => var.loc,
Self::Event(event) => event.loc,
Self::Error(error) => error.loc,
Self::Struct(structure) => structure.loc,
Self::Enum(enumerable) => enumerable.loc,
Self::Function(func) => func.loc_prototype,
Self::Type(ty) => ty.loc,
}
.range()
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/doc/src/parser/comment.rs | crates/doc/src/parser/comment.rs | use alloy_primitives::map::HashMap;
use derive_more::{Deref, DerefMut, derive::Display};
use solang_parser::doccomment::DocCommentTag;
/// The natspec comment tag explaining the purpose of the comment.
/// See: <https://docs.soliditylang.org/en/v0.8.17/natspec-format.html#tags>.
#[derive(Clone, Debug, Display, PartialEq, Eq)]
pub enum CommentTag {
/// A title that should describe the contract/interface
Title,
/// The name of the author
Author,
/// Explain to an end user what this does
Notice,
/// Explain to a developer any extra details
Dev,
/// Documents a parameter just like in Doxygen (must be followed by parameter name)
Param,
/// Documents the return variables of a contract’s function
Return,
/// Copies all missing tags from the base function (must be followed by the contract name)
Inheritdoc,
/// Custom tag, semantics is application-defined
Custom(String),
}
impl CommentTag {
fn from_str(s: &str) -> Option<Self> {
let trimmed = s.trim();
let tag = match trimmed {
"title" => Self::Title,
"author" => Self::Author,
"notice" => Self::Notice,
"dev" => Self::Dev,
"param" => Self::Param,
"return" => Self::Return,
"inheritdoc" => Self::Inheritdoc,
_ if trimmed.starts_with("custom:") => {
// `@custom:param` tag will be parsed as `CommentTag::Param` due to a limitation
// on specifying parameter docs for unnamed function arguments.
let custom_tag = trimmed.trim_start_matches("custom:").trim();
match custom_tag {
"param" => Self::Param,
_ => Self::Custom(custom_tag.to_owned()),
}
}
_ => {
warn!(target: "forge::doc", tag=trimmed, "unknown comment tag. custom tags must be preceded by `custom:`");
return None;
}
};
Some(tag)
}
}
/// The natspec documentation comment.
///
/// Ref: <https://docs.soliditylang.org/en/v0.8.17/natspec-format.html>
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct Comment {
/// The doc comment tag.
pub tag: CommentTag,
/// The doc comment value.
pub value: String,
}
impl Comment {
/// Create new instance of [Comment].
pub fn new(tag: CommentTag, value: String) -> Self {
Self { tag, value }
}
/// Create new instance of [Comment] from [DocCommentTag]
/// if it has a valid natspec tag.
pub fn from_doc_comment(value: DocCommentTag) -> Option<Self> {
CommentTag::from_str(&value.tag).map(|tag| Self { tag, value: value.value })
}
/// Split the comment at first word.
/// Useful for [CommentTag::Param] and [CommentTag::Return] comments.
pub fn split_first_word(&self) -> Option<(&str, &str)> {
self.value.trim_start().split_once(' ')
}
/// Match the first word of the comment with the expected.
/// Returns [None] if the word doesn't match.
/// Useful for [CommentTag::Param] and [CommentTag::Return] comments.
pub fn match_first_word(&self, expected: &str) -> Option<&str> {
self.split_first_word().and_then(
|(word, rest)| {
if word == expected { Some(rest) } else { None }
},
)
}
/// Check if this comment is a custom tag.
pub fn is_custom(&self) -> bool {
matches!(self.tag, CommentTag::Custom(_))
}
}
/// The collection of natspec [Comment] items.
#[derive(Clone, Debug, Default, PartialEq, Deref, DerefMut)]
pub struct Comments(Vec<Comment>);
/// Forward the [Comments] function implementation to the [CommentsRef]
/// reference type.
macro_rules! ref_fn {
($vis:vis fn $name:ident(&self$(, )?$($arg_name:ident: $arg:ty),*) -> $ret:ty) => {
/// Forward the function implementation to [CommentsRef] reference type.
$vis fn $name(&self, $($arg_name: $arg),*) -> $ret {
CommentsRef::from(self).$name($($arg_name),*)
}
};
}
impl Comments {
ref_fn!(pub fn include_tag(&self, tag: CommentTag) -> CommentsRef<'_>);
ref_fn!(pub fn include_tags(&self, tags: &[CommentTag]) -> CommentsRef<'_>);
ref_fn!(pub fn exclude_tags(&self, tags: &[CommentTag]) -> CommentsRef<'_>);
ref_fn!(pub fn contains_tag(&self, tag: &Comment) -> bool);
ref_fn!(pub fn find_inheritdoc_base(&self) -> Option<&'_ str>);
/// Attempts to lookup inherited comments and merge them with the current collection.
///
/// Looks up comments in `inheritdocs` using the key `{base}.{ident}` where `base` is
/// extracted from an `@inheritdoc` tag. Merges the found comments by inserting
/// [CommentTag] from the inherited collection into the current one unless they are
/// already present.
pub fn merge_inheritdoc(
&self,
ident: &str,
inheritdocs: Option<HashMap<String, Self>>,
) -> Self {
let mut result = self.clone();
if let (Some(inheritdocs), Some(base)) = (inheritdocs, self.find_inheritdoc_base()) {
let key = format!("{base}.{ident}");
if let Some(other) = inheritdocs.get(&key) {
for comment in other.iter() {
if !result.contains_tag(comment) {
result.push(comment.clone());
}
}
}
}
result
}
}
impl From<Vec<DocCommentTag>> for Comments {
fn from(value: Vec<DocCommentTag>) -> Self {
Self(value.into_iter().flat_map(Comment::from_doc_comment).collect())
}
}
/// The collection of references to natspec [Comment] items.
#[derive(Debug, Default, PartialEq, Deref)]
pub struct CommentsRef<'a>(Vec<&'a Comment>);
impl<'a> CommentsRef<'a> {
/// Filter a collection of comments and return only those that match a provided tag.
pub fn include_tag(&self, tag: CommentTag) -> Self {
self.include_tags(&[tag])
}
/// Filter a collection of comments and return only those that match provided tags.
pub fn include_tags(&self, tags: &[CommentTag]) -> Self {
// Cloning only references here
CommentsRef(self.iter().copied().filter(|c| tags.contains(&c.tag)).collect())
}
/// Filter a collection of comments and return only those that do not match provided tags.
pub fn exclude_tags(&self, tags: &[CommentTag]) -> Self {
// Cloning only references here
CommentsRef(self.iter().copied().filter(|c| !tags.contains(&c.tag)).collect())
}
/// Check if the collection contains a target comment.
pub fn contains_tag(&self, target: &Comment) -> bool {
self.iter().any(|c| match (&c.tag, &target.tag) {
(CommentTag::Inheritdoc, CommentTag::Inheritdoc) => c.value == target.value,
(CommentTag::Param, CommentTag::Param) | (CommentTag::Return, CommentTag::Return) => {
c.split_first_word().map(|(name, _)| name)
== target.split_first_word().map(|(name, _)| name)
}
(tag1, tag2) => tag1 == tag2,
})
}
/// Find an [CommentTag::Inheritdoc] comment and extract the base.
fn find_inheritdoc_base(&self) -> Option<&'a str> {
self.iter()
.find(|c| matches!(c.tag, CommentTag::Inheritdoc))
.and_then(|c| c.value.split_whitespace().next())
}
/// Filter a collection of comments and only return the custom tags.
pub fn get_custom_tags(&self) -> Self {
CommentsRef(self.iter().copied().filter(|c| c.is_custom()).collect())
}
}
impl<'a> From<&'a Comments> for CommentsRef<'a> {
fn from(value: &'a Comments) -> Self {
Self(value.iter().collect())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn parse_comment_tag() {
assert_eq!(CommentTag::from_str("title"), Some(CommentTag::Title));
assert_eq!(CommentTag::from_str(" title "), Some(CommentTag::Title));
assert_eq!(CommentTag::from_str("author"), Some(CommentTag::Author));
assert_eq!(CommentTag::from_str("notice"), Some(CommentTag::Notice));
assert_eq!(CommentTag::from_str("dev"), Some(CommentTag::Dev));
assert_eq!(CommentTag::from_str("param"), Some(CommentTag::Param));
assert_eq!(CommentTag::from_str("return"), Some(CommentTag::Return));
assert_eq!(CommentTag::from_str("inheritdoc"), Some(CommentTag::Inheritdoc));
assert_eq!(CommentTag::from_str("custom:"), Some(CommentTag::Custom(String::new())));
assert_eq!(
CommentTag::from_str("custom:some"),
Some(CommentTag::Custom("some".to_owned()))
);
assert_eq!(
CommentTag::from_str(" custom: some "),
Some(CommentTag::Custom("some".to_owned()))
);
assert_eq!(CommentTag::from_str(""), None);
assert_eq!(CommentTag::from_str("custom"), None);
assert_eq!(CommentTag::from_str("sometag"), None);
}
#[test]
fn test_is_custom() {
// Test custom tag.
let custom_comment = Comment::new(
CommentTag::from_str("custom:test").unwrap(),
"dummy custom tag".to_owned(),
);
assert!(custom_comment.is_custom(), "Custom tag should return true for is_custom");
// Test non-custom tags.
let non_custom_tags = [
CommentTag::Title,
CommentTag::Author,
CommentTag::Notice,
CommentTag::Dev,
CommentTag::Param,
CommentTag::Return,
CommentTag::Inheritdoc,
];
for tag in non_custom_tags {
let comment = Comment::new(tag.clone(), "Non-custom comment".to_string());
assert!(
!comment.is_custom(),
"Non-custom tag {tag:?} should return false for is_custom"
);
}
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/src/config.rs | crates/anvil/src/config.rs | use crate::{
EthereumHardfork, FeeManager, PrecompileFactory,
eth::{
backend::{
db::{Db, SerializableState},
env::Env,
fork::{ClientFork, ClientForkConfig},
genesis::GenesisConfig,
mem::fork_db::ForkedDatabase,
time::duration_since_unix_epoch,
},
fees::{INITIAL_BASE_FEE, INITIAL_GAS_PRICE},
pool::transactions::{PoolTransaction, TransactionOrder},
},
mem::{self, in_memory_db::MemDb},
};
use alloy_chains::Chain;
use alloy_consensus::BlockHeader;
use alloy_eips::{eip1559::BaseFeeParams, eip7840::BlobParams};
use alloy_evm::EvmEnv;
use alloy_genesis::Genesis;
use alloy_network::{AnyNetwork, TransactionResponse};
use alloy_primitives::{BlockNumber, TxHash, U256, hex, map::HashMap, utils::Unit};
use alloy_provider::Provider;
use alloy_rpc_types::{Block, BlockNumberOrTag};
use alloy_signer::Signer;
use alloy_signer_local::{
MnemonicBuilder, PrivateKeySigner,
coins_bip39::{English, Mnemonic},
};
use alloy_transport::TransportError;
use anvil_server::ServerConfig;
use eyre::{Context, Result};
use foundry_common::{
ALCHEMY_FREE_TIER_CUPS, NON_ARCHIVE_NODE_WARNING, REQUEST_TIMEOUT,
provider::{ProviderBuilder, RetryProvider},
};
use foundry_config::Config;
use foundry_evm::{
backend::{BlockchainDb, BlockchainDbMeta, SharedBackend},
constants::DEFAULT_CREATE2_DEPLOYER,
core::AsEnvMut,
hardfork::{
FoundryHardfork, OpHardfork, ethereum_hardfork_from_block_tag,
spec_id_from_ethereum_hardfork,
},
utils::{apply_chain_and_block_specific_env_changes, get_blob_base_fee_update_fraction},
};
use itertools::Itertools;
use op_revm::OpTransaction;
use parking_lot::RwLock;
use rand_08::thread_rng;
use revm::{
context::{BlockEnv, CfgEnv, TxEnv},
context_interface::block::BlobExcessGasAndPrice,
primitives::hardfork::SpecId,
};
use serde_json::{Value, json};
use std::{
fmt::Write as FmtWrite,
fs::File,
io,
net::{IpAddr, Ipv4Addr},
path::PathBuf,
sync::Arc,
time::Duration,
};
use tokio::sync::RwLock as TokioRwLock;
use yansi::Paint;
pub use foundry_common::version::SHORT_VERSION as VERSION_MESSAGE;
use foundry_evm::{
traces::{CallTraceDecoderBuilder, identifier::SignaturesIdentifier},
utils::get_blob_params,
};
use foundry_evm_networks::NetworkConfigs;
/// Default port the rpc will open
pub const NODE_PORT: u16 = 8545;
/// Default chain id of the node
pub const CHAIN_ID: u64 = 31337;
/// The default gas limit for all transactions
pub const DEFAULT_GAS_LIMIT: u64 = 30_000_000;
/// Default mnemonic for dev accounts
pub const DEFAULT_MNEMONIC: &str = "test test test test test test test test test test test junk";
/// The default IPC endpoint
pub const DEFAULT_IPC_ENDPOINT: &str =
if cfg!(unix) { "/tmp/anvil.ipc" } else { r"\\.\pipe\anvil.ipc" };
const BANNER: &str = r"
_ _
(_) | |
__ _ _ __ __ __ _ | |
/ _` | | '_ \ \ \ / / | | | |
| (_| | | | | | \ V / | | | |
\__,_| |_| |_| \_/ |_| |_|
";
/// Configurations of the EVM node
#[derive(Clone, Debug)]
pub struct NodeConfig {
/// Chain ID of the EVM chain
pub chain_id: Option<u64>,
/// Default gas limit for all txs
pub gas_limit: Option<u64>,
/// If set to `true`, disables the block gas limit
pub disable_block_gas_limit: bool,
/// If set to `true`, enables the tx gas limit as imposed by Osaka (EIP-7825)
pub enable_tx_gas_limit: bool,
/// Default gas price for all txs
pub gas_price: Option<u128>,
/// Default base fee
pub base_fee: Option<u64>,
/// If set to `true`, disables the enforcement of a minimum suggested priority fee
pub disable_min_priority_fee: bool,
/// Default blob excess gas and price
pub blob_excess_gas_and_price: Option<BlobExcessGasAndPrice>,
/// The hardfork to use
pub hardfork: Option<FoundryHardfork>,
/// Signer accounts that will be initialised with `genesis_balance` in the genesis block
pub genesis_accounts: Vec<PrivateKeySigner>,
/// Native token balance of every genesis account in the genesis block
pub genesis_balance: U256,
/// Genesis block timestamp
pub genesis_timestamp: Option<u64>,
/// Genesis block number
pub genesis_block_number: Option<u64>,
/// Signer accounts that can sign messages/transactions from the EVM node
pub signer_accounts: Vec<PrivateKeySigner>,
/// Configured block time for the EVM chain. Use `None` to mine a new block for every tx
pub block_time: Option<Duration>,
/// Disable auto, interval mining mode uns use `MiningMode::None` instead
pub no_mining: bool,
/// Enables auto and interval mining mode
pub mixed_mining: bool,
/// port to use for the server
pub port: u16,
/// maximum number of transactions in a block
pub max_transactions: usize,
/// url of the rpc server that should be used for any rpc calls
pub eth_rpc_url: Option<String>,
/// pins the block number or transaction hash for the state fork
pub fork_choice: Option<ForkChoice>,
/// headers to use with `eth_rpc_url`
pub fork_headers: Vec<String>,
/// specifies chain id for cache to skip fetching from remote in offline-start mode
pub fork_chain_id: Option<U256>,
/// The generator used to generate the dev accounts
pub account_generator: Option<AccountGenerator>,
/// whether to enable tracing
pub enable_tracing: bool,
/// Explicitly disables the use of RPC caching.
pub no_storage_caching: bool,
/// How to configure the server
pub server_config: ServerConfig,
/// The host the server will listen on
pub host: Vec<IpAddr>,
/// How transactions are sorted in the mempool
pub transaction_order: TransactionOrder,
/// Filename to write anvil output as json
pub config_out: Option<PathBuf>,
/// The genesis to use to initialize the node
pub genesis: Option<Genesis>,
/// Timeout in for requests sent to remote JSON-RPC server in forking mode
pub fork_request_timeout: Duration,
/// Number of request retries for spurious networks
pub fork_request_retries: u32,
/// The initial retry backoff
pub fork_retry_backoff: Duration,
/// available CUPS
pub compute_units_per_second: u64,
/// The ipc path
pub ipc_path: Option<Option<String>>,
/// Enable transaction/call steps tracing for debug calls returning geth-style traces
pub enable_steps_tracing: bool,
/// Enable printing of `console.log` invocations.
pub print_logs: bool,
/// Enable printing of traces.
pub print_traces: bool,
/// Enable auto impersonation of accounts on startup
pub enable_auto_impersonate: bool,
/// Configure the code size limit
pub code_size_limit: Option<usize>,
/// Configures how to remove historic state.
///
/// If set to `Some(num)` keep latest num state in memory only.
pub prune_history: PruneStateHistoryConfig,
/// Max number of states cached on disk.
pub max_persisted_states: Option<usize>,
/// The file where to load the state from
pub init_state: Option<SerializableState>,
/// max number of blocks with transactions in memory
pub transaction_block_keeper: Option<usize>,
/// Disable the default CREATE2 deployer
pub disable_default_create2_deployer: bool,
/// Disable pool balance checks
pub disable_pool_balance_checks: bool,
/// Slots in an epoch
pub slots_in_an_epoch: u64,
/// The memory limit per EVM execution in bytes.
pub memory_limit: Option<u64>,
/// Factory used by `anvil` to extend the EVM's precompiles.
pub precompile_factory: Option<Arc<dyn PrecompileFactory>>,
/// Networks to enable features for.
pub networks: NetworkConfigs,
/// Do not print log messages.
pub silent: bool,
/// The path where states are cached.
pub cache_path: Option<PathBuf>,
}
impl NodeConfig {
fn as_string(&self, fork: Option<&ClientFork>) -> String {
let mut s: String = String::new();
let _ = write!(s, "\n{}", BANNER.green());
let _ = write!(s, "\n {VERSION_MESSAGE}");
let _ = write!(s, "\n {}", "https://github.com/foundry-rs/foundry".green());
let _ = write!(
s,
r#"
Available Accounts
==================
"#
);
let balance = alloy_primitives::utils::format_ether(self.genesis_balance);
for (idx, wallet) in self.genesis_accounts.iter().enumerate() {
write!(s, "\n({idx}) {} ({balance} ETH)", wallet.address()).unwrap();
}
let _ = write!(
s,
r#"
Private Keys
==================
"#
);
for (idx, wallet) in self.genesis_accounts.iter().enumerate() {
let hex = hex::encode(wallet.credential().to_bytes());
let _ = write!(s, "\n({idx}) 0x{hex}");
}
if let Some(generator) = &self.account_generator {
let _ = write!(
s,
r#"
Wallet
==================
Mnemonic: {}
Derivation path: {}
"#,
generator.phrase,
generator.get_derivation_path()
);
}
if let Some(fork) = fork {
let _ = write!(
s,
r#"
Fork
==================
Endpoint: {}
Block number: {}
Block hash: {:?}
Chain ID: {}
"#,
fork.eth_rpc_url(),
fork.block_number(),
fork.block_hash(),
fork.chain_id()
);
if let Some(tx_hash) = fork.transaction_hash() {
let _ = writeln!(s, "Transaction hash: {tx_hash}");
}
} else {
let _ = write!(
s,
r#"
Chain ID
==================
{}
"#,
self.get_chain_id().green()
);
}
if (SpecId::from(self.get_hardfork()) as u8) < (SpecId::LONDON as u8) {
let _ = write!(
s,
r#"
Gas Price
==================
{}
"#,
self.get_gas_price().green()
);
} else {
let _ = write!(
s,
r#"
Base Fee
==================
{}
"#,
self.get_base_fee().green()
);
}
let _ = write!(
s,
r#"
Gas Limit
==================
{}
"#,
{
if self.disable_block_gas_limit {
"Disabled".to_string()
} else {
self.gas_limit.map(|l| l.to_string()).unwrap_or_else(|| {
if self.fork_choice.is_some() {
"Forked".to_string()
} else {
DEFAULT_GAS_LIMIT.to_string()
}
})
}
}
.green()
);
let _ = write!(
s,
r#"
Genesis Timestamp
==================
{}
"#,
self.get_genesis_timestamp().green()
);
let _ = write!(
s,
r#"
Genesis Number
==================
{}
"#,
self.get_genesis_number().green()
);
s
}
fn as_json(&self, fork: Option<&ClientFork>) -> Value {
let mut wallet_description = HashMap::new();
let mut available_accounts = Vec::with_capacity(self.genesis_accounts.len());
let mut private_keys = Vec::with_capacity(self.genesis_accounts.len());
for wallet in &self.genesis_accounts {
available_accounts.push(format!("{:?}", wallet.address()));
private_keys.push(format!("0x{}", hex::encode(wallet.credential().to_bytes())));
}
if let Some(generator) = &self.account_generator {
let phrase = generator.get_phrase().to_string();
let derivation_path = generator.get_derivation_path().to_string();
wallet_description.insert("derivation_path".to_string(), derivation_path);
wallet_description.insert("mnemonic".to_string(), phrase);
};
let gas_limit = match self.gas_limit {
// if we have a disabled flag we should max out the limit
Some(_) | None if self.disable_block_gas_limit => Some(u64::MAX.to_string()),
Some(limit) => Some(limit.to_string()),
_ => None,
};
if let Some(fork) = fork {
json!({
"available_accounts": available_accounts,
"private_keys": private_keys,
"endpoint": fork.eth_rpc_url(),
"block_number": fork.block_number(),
"block_hash": fork.block_hash(),
"chain_id": fork.chain_id(),
"wallet": wallet_description,
"base_fee": format!("{}", self.get_base_fee()),
"gas_price": format!("{}", self.get_gas_price()),
"gas_limit": gas_limit,
})
} else {
json!({
"available_accounts": available_accounts,
"private_keys": private_keys,
"wallet": wallet_description,
"base_fee": format!("{}", self.get_base_fee()),
"gas_price": format!("{}", self.get_gas_price()),
"gas_limit": gas_limit,
"genesis_timestamp": format!("{}", self.get_genesis_timestamp()),
})
}
}
}
impl NodeConfig {
/// Returns a new config intended to be used in tests, which does not print and binds to a
/// random, free port by setting it to `0`
#[doc(hidden)]
pub fn test() -> Self {
Self { enable_tracing: true, port: 0, silent: true, ..Default::default() }
}
/// Returns a new config which does not initialize any accounts on node startup.
pub fn empty_state() -> Self {
Self {
genesis_accounts: vec![],
signer_accounts: vec![],
disable_default_create2_deployer: true,
..Default::default()
}
}
}
impl Default for NodeConfig {
fn default() -> Self {
// generate some random wallets
let genesis_accounts = AccountGenerator::new(10)
.phrase(DEFAULT_MNEMONIC)
.generate()
.expect("Invalid mnemonic.");
Self {
chain_id: None,
gas_limit: None,
disable_block_gas_limit: false,
enable_tx_gas_limit: false,
gas_price: None,
hardfork: None,
signer_accounts: genesis_accounts.clone(),
genesis_timestamp: None,
genesis_block_number: None,
genesis_accounts,
// 100ETH default balance
genesis_balance: Unit::ETHER.wei().saturating_mul(U256::from(100u64)),
block_time: None,
no_mining: false,
mixed_mining: false,
port: NODE_PORT,
max_transactions: 1_000,
eth_rpc_url: None,
fork_choice: None,
account_generator: None,
base_fee: None,
disable_min_priority_fee: false,
blob_excess_gas_and_price: None,
enable_tracing: true,
enable_steps_tracing: false,
print_logs: true,
print_traces: false,
enable_auto_impersonate: false,
no_storage_caching: false,
server_config: Default::default(),
host: vec![IpAddr::V4(Ipv4Addr::LOCALHOST)],
transaction_order: Default::default(),
config_out: None,
genesis: None,
fork_request_timeout: REQUEST_TIMEOUT,
fork_headers: vec![],
fork_request_retries: 5,
fork_retry_backoff: Duration::from_millis(1_000),
fork_chain_id: None,
// alchemy max cpus <https://docs.alchemy.com/reference/compute-units#what-are-cups-compute-units-per-second>
compute_units_per_second: ALCHEMY_FREE_TIER_CUPS,
ipc_path: None,
code_size_limit: None,
prune_history: Default::default(),
max_persisted_states: None,
init_state: None,
transaction_block_keeper: None,
disable_default_create2_deployer: false,
disable_pool_balance_checks: false,
slots_in_an_epoch: 32,
memory_limit: None,
precompile_factory: None,
networks: Default::default(),
silent: false,
cache_path: None,
}
}
}
impl NodeConfig {
/// Returns the memory limit of the node
#[must_use]
pub fn with_memory_limit(mut self, mems_value: Option<u64>) -> Self {
self.memory_limit = mems_value;
self
}
/// Returns the base fee to use
pub fn get_base_fee(&self) -> u64 {
self.base_fee
.or_else(|| self.genesis.as_ref().and_then(|g| g.base_fee_per_gas.map(|g| g as u64)))
.unwrap_or(INITIAL_BASE_FEE)
}
/// Returns the base fee to use
pub fn get_gas_price(&self) -> u128 {
self.gas_price.unwrap_or(INITIAL_GAS_PRICE)
}
pub fn get_blob_excess_gas_and_price(&self) -> BlobExcessGasAndPrice {
if let Some(value) = self.blob_excess_gas_and_price {
value
} else {
let excess_blob_gas =
self.genesis.as_ref().and_then(|g| g.excess_blob_gas).unwrap_or(0);
BlobExcessGasAndPrice::new(
excess_blob_gas,
get_blob_base_fee_update_fraction(
self.chain_id.unwrap_or(Chain::mainnet().id()),
self.get_genesis_timestamp(),
),
)
}
}
/// Returns the [`BlobParams`] that should be used.
pub fn get_blob_params(&self) -> BlobParams {
get_blob_params(
self.chain_id.unwrap_or(Chain::mainnet().id()),
self.get_genesis_timestamp(),
)
}
/// Returns the hardfork to use
pub fn get_hardfork(&self) -> FoundryHardfork {
if let Some(hardfork) = self.hardfork {
return hardfork;
}
if self.networks.is_optimism() {
return OpHardfork::default().into();
}
EthereumHardfork::default().into()
}
/// Sets a custom code size limit
#[must_use]
pub fn with_code_size_limit(mut self, code_size_limit: Option<usize>) -> Self {
self.code_size_limit = code_size_limit;
self
}
/// Disables code size limit
#[must_use]
pub fn disable_code_size_limit(mut self, disable_code_size_limit: bool) -> Self {
if disable_code_size_limit {
self.code_size_limit = Some(usize::MAX);
}
self
}
/// Sets the init state if any
#[must_use]
pub fn with_init_state(mut self, init_state: Option<SerializableState>) -> Self {
self.init_state = init_state;
self
}
/// Loads the init state from a file if it exists
#[must_use]
#[cfg(feature = "cmd")]
pub fn with_init_state_path(mut self, path: impl AsRef<std::path::Path>) -> Self {
self.init_state = crate::cmd::StateFile::parse_path(path).ok().and_then(|file| file.state);
self
}
/// Sets the chain ID
#[must_use]
pub fn with_chain_id<U: Into<u64>>(mut self, chain_id: Option<U>) -> Self {
self.set_chain_id(chain_id);
self
}
/// Returns the chain ID to use
pub fn get_chain_id(&self) -> u64 {
self.chain_id
.or_else(|| self.genesis.as_ref().map(|g| g.config.chain_id))
.unwrap_or(CHAIN_ID)
}
/// Sets the chain id and updates all wallets
pub fn set_chain_id(&mut self, chain_id: Option<impl Into<u64>>) {
self.chain_id = chain_id.map(Into::into);
let chain_id = self.get_chain_id();
self.networks.with_chain_id(chain_id);
self.genesis_accounts.iter_mut().for_each(|wallet| {
*wallet = wallet.clone().with_chain_id(Some(chain_id));
});
self.signer_accounts.iter_mut().for_each(|wallet| {
*wallet = wallet.clone().with_chain_id(Some(chain_id));
})
}
/// Sets the gas limit
#[must_use]
pub fn with_gas_limit(mut self, gas_limit: Option<u64>) -> Self {
self.gas_limit = gas_limit;
self
}
/// Disable block gas limit check
///
/// If set to `true` block gas limit will not be enforced
#[must_use]
pub fn disable_block_gas_limit(mut self, disable_block_gas_limit: bool) -> Self {
self.disable_block_gas_limit = disable_block_gas_limit;
self
}
/// Enable tx gas limit check
///
/// If set to `true`, enables the tx gas limit as imposed by Osaka (EIP-7825)
#[must_use]
pub fn enable_tx_gas_limit(mut self, enable_tx_gas_limit: bool) -> Self {
self.enable_tx_gas_limit = enable_tx_gas_limit;
self
}
/// Sets the gas price
#[must_use]
pub fn with_gas_price(mut self, gas_price: Option<u128>) -> Self {
self.gas_price = gas_price;
self
}
/// Sets prune history status.
#[must_use]
pub fn set_pruned_history(mut self, prune_history: Option<Option<usize>>) -> Self {
self.prune_history = PruneStateHistoryConfig::from_args(prune_history);
self
}
/// Sets max number of states to cache on disk.
#[must_use]
pub fn with_max_persisted_states<U: Into<usize>>(
mut self,
max_persisted_states: Option<U>,
) -> Self {
self.max_persisted_states = max_persisted_states.map(Into::into);
self
}
/// Sets max number of blocks with transactions to keep in memory
#[must_use]
pub fn with_transaction_block_keeper<U: Into<usize>>(
mut self,
transaction_block_keeper: Option<U>,
) -> Self {
self.transaction_block_keeper = transaction_block_keeper.map(Into::into);
self
}
/// Sets the base fee
#[must_use]
pub fn with_base_fee(mut self, base_fee: Option<u64>) -> Self {
self.base_fee = base_fee;
self
}
/// Disable the enforcement of a minimum suggested priority fee
#[must_use]
pub fn disable_min_priority_fee(mut self, disable_min_priority_fee: bool) -> Self {
self.disable_min_priority_fee = disable_min_priority_fee;
self
}
/// Sets the init genesis (genesis.json)
#[must_use]
pub fn with_genesis(mut self, genesis: Option<Genesis>) -> Self {
self.genesis = genesis;
self
}
/// Returns the genesis timestamp to use
pub fn get_genesis_timestamp(&self) -> u64 {
self.genesis_timestamp
.or_else(|| self.genesis.as_ref().map(|g| g.timestamp))
.unwrap_or_else(|| duration_since_unix_epoch().as_secs())
}
/// Sets the genesis timestamp
#[must_use]
pub fn with_genesis_timestamp<U: Into<u64>>(mut self, timestamp: Option<U>) -> Self {
if let Some(timestamp) = timestamp {
self.genesis_timestamp = Some(timestamp.into());
}
self
}
/// Sets the genesis number
#[must_use]
pub fn with_genesis_block_number<U: Into<u64>>(mut self, number: Option<U>) -> Self {
if let Some(number) = number {
self.genesis_block_number = Some(number.into());
}
self
}
/// Returns the genesis number
pub fn get_genesis_number(&self) -> u64 {
self.genesis_block_number
.or_else(|| self.genesis.as_ref().and_then(|g| g.number))
.unwrap_or(0)
}
/// Sets the hardfork
#[must_use]
pub fn with_hardfork(mut self, hardfork: Option<FoundryHardfork>) -> Self {
self.hardfork = hardfork;
self
}
/// Sets the genesis accounts
#[must_use]
pub fn with_genesis_accounts(mut self, accounts: Vec<PrivateKeySigner>) -> Self {
self.genesis_accounts = accounts;
self
}
/// Sets the signer accounts
#[must_use]
pub fn with_signer_accounts(mut self, accounts: Vec<PrivateKeySigner>) -> Self {
self.signer_accounts = accounts;
self
}
/// Sets both the genesis accounts and the signer accounts
/// so that `genesis_accounts == accounts`
pub fn with_account_generator(mut self, generator: AccountGenerator) -> eyre::Result<Self> {
let accounts = generator.generate()?;
self.account_generator = Some(generator);
Ok(self.with_signer_accounts(accounts.clone()).with_genesis_accounts(accounts))
}
/// Sets the balance of the genesis accounts in the genesis block
#[must_use]
pub fn with_genesis_balance<U: Into<U256>>(mut self, balance: U) -> Self {
self.genesis_balance = balance.into();
self
}
/// Sets the block time to automine blocks
#[must_use]
pub fn with_blocktime<D: Into<Duration>>(mut self, block_time: Option<D>) -> Self {
self.block_time = block_time.map(Into::into);
self
}
#[must_use]
pub fn with_mixed_mining<D: Into<Duration>>(
mut self,
mixed_mining: bool,
block_time: Option<D>,
) -> Self {
self.block_time = block_time.map(Into::into);
self.mixed_mining = mixed_mining;
self
}
/// If set to `true` auto mining will be disabled
#[must_use]
pub fn with_no_mining(mut self, no_mining: bool) -> Self {
self.no_mining = no_mining;
self
}
/// Sets the slots in an epoch
#[must_use]
pub fn with_slots_in_an_epoch(mut self, slots_in_an_epoch: u64) -> Self {
self.slots_in_an_epoch = slots_in_an_epoch;
self
}
/// Sets the port to use
#[must_use]
pub fn with_port(mut self, port: u16) -> Self {
self.port = port;
self
}
/// Sets the ipc path to use
///
/// Note: this is a double Option for
/// - `None` -> no ipc
/// - `Some(None)` -> use default path
/// - `Some(Some(path))` -> use custom path
#[must_use]
pub fn with_ipc(mut self, ipc_path: Option<Option<String>>) -> Self {
self.ipc_path = ipc_path;
self
}
/// Sets the file path to write the Anvil node's config info to.
#[must_use]
pub fn set_config_out(mut self, config_out: Option<PathBuf>) -> Self {
self.config_out = config_out;
self
}
/// Disables storage caching
#[must_use]
pub fn no_storage_caching(self) -> Self {
self.with_storage_caching(true)
}
#[must_use]
pub fn with_storage_caching(mut self, storage_caching: bool) -> Self {
self.no_storage_caching = storage_caching;
self
}
/// Sets the `eth_rpc_url` to use when forking
#[must_use]
pub fn with_eth_rpc_url<U: Into<String>>(mut self, eth_rpc_url: Option<U>) -> Self {
self.eth_rpc_url = eth_rpc_url.map(Into::into);
self
}
/// Sets the `fork_choice` to use to fork off from based on a block number
#[must_use]
pub fn with_fork_block_number<U: Into<u64>>(self, fork_block_number: Option<U>) -> Self {
self.with_fork_choice(fork_block_number.map(Into::into))
}
/// Sets the `fork_choice` to use to fork off from based on a transaction hash
#[must_use]
pub fn with_fork_transaction_hash<U: Into<TxHash>>(
self,
fork_transaction_hash: Option<U>,
) -> Self {
self.with_fork_choice(fork_transaction_hash.map(Into::into))
}
/// Sets the `fork_choice` to use to fork off from
#[must_use]
pub fn with_fork_choice<U: Into<ForkChoice>>(mut self, fork_choice: Option<U>) -> Self {
self.fork_choice = fork_choice.map(Into::into);
self
}
/// Sets the `fork_chain_id` to use to fork off local cache from
#[must_use]
pub fn with_fork_chain_id(mut self, fork_chain_id: Option<U256>) -> Self {
self.fork_chain_id = fork_chain_id;
self
}
/// Sets the `fork_headers` to use with `eth_rpc_url`
#[must_use]
pub fn with_fork_headers(mut self, headers: Vec<String>) -> Self {
self.fork_headers = headers;
self
}
/// Sets the `fork_request_timeout` to use for requests
#[must_use]
pub fn fork_request_timeout(mut self, fork_request_timeout: Option<Duration>) -> Self {
if let Some(fork_request_timeout) = fork_request_timeout {
self.fork_request_timeout = fork_request_timeout;
}
self
}
/// Sets the `fork_request_retries` to use for spurious networks
#[must_use]
pub fn fork_request_retries(mut self, fork_request_retries: Option<u32>) -> Self {
if let Some(fork_request_retries) = fork_request_retries {
self.fork_request_retries = fork_request_retries;
}
self
}
/// Sets the initial `fork_retry_backoff` for rate limits
#[must_use]
pub fn fork_retry_backoff(mut self, fork_retry_backoff: Option<Duration>) -> Self {
if let Some(fork_retry_backoff) = fork_retry_backoff {
self.fork_retry_backoff = fork_retry_backoff;
}
self
}
/// Sets the number of assumed available compute units per second
///
/// See also, <https://docs.alchemy.com/reference/compute-units#what-are-cups-compute-units-per-second>
#[must_use]
pub fn fork_compute_units_per_second(mut self, compute_units_per_second: Option<u64>) -> Self {
if let Some(compute_units_per_second) = compute_units_per_second {
self.compute_units_per_second = compute_units_per_second;
}
self
}
/// Sets whether to enable tracing
#[must_use]
pub fn with_tracing(mut self, enable_tracing: bool) -> Self {
self.enable_tracing = enable_tracing;
self
}
/// Sets whether to enable steps tracing
#[must_use]
pub fn with_steps_tracing(mut self, enable_steps_tracing: bool) -> Self {
self.enable_steps_tracing = enable_steps_tracing;
self
}
/// Sets whether to print `console.log` invocations to stdout.
#[must_use]
pub fn with_print_logs(mut self, print_logs: bool) -> Self {
self.print_logs = print_logs;
self
}
/// Sets whether to print traces to stdout.
#[must_use]
pub fn with_print_traces(mut self, print_traces: bool) -> Self {
self.print_traces = print_traces;
self
}
/// Sets whether to enable autoImpersonate
#[must_use]
pub fn with_auto_impersonate(mut self, enable_auto_impersonate: bool) -> Self {
self.enable_auto_impersonate = enable_auto_impersonate;
self
}
#[must_use]
pub fn with_server_config(mut self, config: ServerConfig) -> Self {
self.server_config = config;
self
}
/// Sets the host the server will listen on
#[must_use]
pub fn with_host(mut self, host: Vec<IpAddr>) -> Self {
self.host = if host.is_empty() { vec![IpAddr::V4(Ipv4Addr::LOCALHOST)] } else { host };
self
}
#[must_use]
pub fn with_transaction_order(mut self, transaction_order: TransactionOrder) -> Self {
self.transaction_order = transaction_order;
self
}
/// Returns the ipc path for the ipc endpoint if any
pub fn get_ipc_path(&self) -> Option<String> {
match &self.ipc_path {
Some(path) => path.clone().or_else(|| Some(DEFAULT_IPC_ENDPOINT.to_string())),
None => None,
}
}
/// Prints the config info
pub fn print(&self, fork: Option<&ClientFork>) -> Result<()> {
if let Some(path) = &self.config_out {
let file = io::BufWriter::new(
File::create(path).wrap_err("unable to create anvil config description file")?,
);
let value = self.as_json(fork);
serde_json::to_writer(file, &value).wrap_err("failed writing JSON")?;
}
if !self.silent {
sh_println!("{}", self.as_string(fork))?;
}
Ok(())
}
/// Returns the path where the cache file should be stored
///
/// See also [ Config::foundry_block_cache_file()]
pub fn block_cache_path(&self, block: u64) -> Option<PathBuf> {
if self.no_storage_caching || self.eth_rpc_url.is_none() {
return None;
}
let chain_id = self.get_chain_id();
Config::foundry_block_cache_file(chain_id, block)
}
/// Sets whether to disable the default create2 deployer
#[must_use]
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | true |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/src/evm.rs | crates/anvil/src/evm.rs | use alloy_evm::precompiles::DynPrecompile;
use alloy_primitives::Address;
use std::fmt::Debug;
/// Object-safe trait that enables injecting extra precompiles when using
/// `anvil` as a library.
pub trait PrecompileFactory: Send + Sync + Unpin + Debug {
/// Returns a set of precompiles to extend the EVM with.
fn precompiles(&self) -> Vec<(Address, DynPrecompile)>;
}
#[cfg(test)]
mod tests {
use std::convert::Infallible;
use crate::PrecompileFactory;
use alloy_evm::{
EthEvm, Evm, EvmEnv,
eth::EthEvmContext,
precompiles::{DynPrecompile, PrecompilesMap},
};
use alloy_op_evm::OpEvm;
use alloy_primitives::{Address, Bytes, TxKind, U256, address};
use foundry_evm::core::either_evm::EitherEvm;
use foundry_evm_networks::NetworkConfigs;
use itertools::Itertools;
use op_revm::{L1BlockInfo, OpContext, OpSpecId, OpTransaction, precompiles::OpPrecompiles};
use revm::{
Journal,
context::{CfgEnv, Evm as RevmEvm, JournalTr, LocalContext, TxEnv},
database::{EmptyDB, EmptyDBTyped},
handler::{EthPrecompiles, instructions::EthInstructions},
inspector::NoOpInspector,
interpreter::interpreter::EthInterpreter,
precompile::{PrecompileOutput, PrecompileSpecId, Precompiles},
primitives::hardfork::SpecId,
};
// A precompile activated in the `Prague` spec.
const ETH_PRAGUE_PRECOMPILE: Address = address!("0x0000000000000000000000000000000000000011");
// A precompile activated in the `Isthmus` spec.
const OP_ISTHMUS_PRECOMPILE: Address = address!("0x0000000000000000000000000000000000000100");
// A custom precompile address and payload for testing.
const PRECOMPILE_ADDR: Address = address!("0x0000000000000000000000000000000000000071");
const PAYLOAD: &[u8] = &[0xde, 0xad, 0xbe, 0xef];
#[derive(Debug)]
struct CustomPrecompileFactory;
impl PrecompileFactory for CustomPrecompileFactory {
fn precompiles(&self) -> Vec<(Address, DynPrecompile)> {
use alloy_evm::precompiles::PrecompileInput;
vec![(
PRECOMPILE_ADDR,
DynPrecompile::from(|input: PrecompileInput<'_>| {
Ok(PrecompileOutput {
bytes: Bytes::copy_from_slice(input.data),
gas_used: 0,
gas_refunded: 0,
reverted: false,
})
}),
)]
}
}
/// Creates a new EVM instance with the custom precompile factory.
fn create_eth_evm(
spec: SpecId,
) -> (foundry_evm::Env, EitherEvm<EmptyDBTyped<Infallible>, NoOpInspector, PrecompilesMap>)
{
let eth_env = foundry_evm::Env {
evm_env: EvmEnv { block_env: Default::default(), cfg_env: CfgEnv::new_with_spec(spec) },
tx: TxEnv {
kind: TxKind::Call(PRECOMPILE_ADDR),
data: PAYLOAD.into(),
..Default::default()
},
};
let eth_evm_context = EthEvmContext {
journaled_state: Journal::new(EmptyDB::default()),
block: eth_env.evm_env.block_env.clone(),
cfg: eth_env.evm_env.cfg_env.clone(),
tx: eth_env.tx.clone(),
chain: (),
local: LocalContext::default(),
error: Ok(()),
};
let eth_precompiles = EthPrecompiles {
precompiles: Precompiles::new(PrecompileSpecId::from_spec_id(spec)),
spec,
}
.precompiles;
let eth_evm = EitherEvm::Eth(EthEvm::new(
RevmEvm::new_with_inspector(
eth_evm_context,
NoOpInspector,
EthInstructions::<EthInterpreter, EthEvmContext<EmptyDB>>::default(),
PrecompilesMap::from_static(eth_precompiles),
),
true,
));
(eth_env, eth_evm)
}
/// Creates a new OP EVM instance with the custom precompile factory.
fn create_op_evm(
spec: SpecId,
op_spec: OpSpecId,
) -> (
crate::eth::backend::env::Env,
EitherEvm<EmptyDBTyped<Infallible>, NoOpInspector, PrecompilesMap>,
) {
let op_env = crate::eth::backend::env::Env {
evm_env: EvmEnv { block_env: Default::default(), cfg_env: CfgEnv::new_with_spec(spec) },
tx: OpTransaction::<TxEnv> {
base: TxEnv {
kind: TxKind::Call(PRECOMPILE_ADDR),
data: PAYLOAD.into(),
..Default::default()
},
..Default::default()
},
networks: NetworkConfigs::with_optimism(),
};
let mut chain = L1BlockInfo::default();
if op_spec == OpSpecId::ISTHMUS {
chain.operator_fee_constant = Some(U256::from(0));
chain.operator_fee_scalar = Some(U256::from(0));
}
let op_cfg = op_env.evm_env.cfg_env.clone().with_spec(op_spec);
let op_evm_context = OpContext {
journaled_state: {
let mut journal = Journal::new(EmptyDB::default());
// Converting SpecId into OpSpecId
journal.set_spec_id(op_env.evm_env.cfg_env.spec);
journal
},
block: op_env.evm_env.block_env.clone(),
cfg: op_cfg.clone(),
tx: op_env.tx.clone(),
chain,
local: LocalContext::default(),
error: Ok(()),
};
let op_precompiles = OpPrecompiles::new_with_spec(op_cfg.spec).precompiles();
let op_evm = EitherEvm::Op(OpEvm::new(
op_revm::OpEvm(RevmEvm::new_with_inspector(
op_evm_context,
NoOpInspector,
EthInstructions::<EthInterpreter, OpContext<EmptyDB>>::default(),
PrecompilesMap::from_static(op_precompiles),
)),
true,
));
(op_env, op_evm)
}
#[test]
fn build_eth_evm_with_extra_precompiles_default_spec() {
let (env, mut evm) = create_eth_evm(SpecId::default());
// Check that the Prague precompile IS present when using the default spec.
assert!(evm.precompiles().addresses().contains(Ð_PRAGUE_PRECOMPILE));
assert!(!evm.precompiles().addresses().contains(&PRECOMPILE_ADDR));
evm.precompiles_mut().extend_precompiles(CustomPrecompileFactory.precompiles());
assert!(evm.precompiles().addresses().contains(&PRECOMPILE_ADDR));
let result = match &mut evm {
EitherEvm::Eth(eth_evm) => eth_evm.transact(env.tx).unwrap(),
_ => unreachable!(),
};
assert!(result.result.is_success());
assert_eq!(result.result.output(), Some(&PAYLOAD.into()));
}
#[test]
fn build_eth_evm_with_extra_precompiles_london_spec() {
let (env, mut evm) = create_eth_evm(SpecId::LONDON);
// Check that the Prague precompile IS NOT present when using the London spec.
assert!(!evm.precompiles().addresses().contains(Ð_PRAGUE_PRECOMPILE));
assert!(!evm.precompiles().addresses().contains(&PRECOMPILE_ADDR));
evm.precompiles_mut().extend_precompiles(CustomPrecompileFactory.precompiles());
assert!(evm.precompiles().addresses().contains(&PRECOMPILE_ADDR));
let result = match &mut evm {
EitherEvm::Eth(eth_evm) => eth_evm.transact(env.tx).unwrap(),
_ => unreachable!(),
};
assert!(result.result.is_success());
assert_eq!(result.result.output(), Some(&PAYLOAD.into()));
}
#[test]
fn build_op_evm_with_extra_precompiles_default_spec() {
let (env, mut evm) = create_op_evm(SpecId::default(), OpSpecId::default());
// Check that the Isthmus precompile IS present when using the default spec.
assert!(evm.precompiles().addresses().contains(&OP_ISTHMUS_PRECOMPILE));
// Check that the Prague precompile IS present when using the default spec.
assert!(evm.precompiles().addresses().contains(Ð_PRAGUE_PRECOMPILE));
assert!(!evm.precompiles().addresses().contains(&PRECOMPILE_ADDR));
evm.precompiles_mut().extend_precompiles(CustomPrecompileFactory.precompiles());
assert!(evm.precompiles().addresses().contains(&PRECOMPILE_ADDR));
let result = match &mut evm {
EitherEvm::Op(op_evm) => op_evm.transact(env.tx).unwrap(),
_ => unreachable!(),
};
assert!(result.result.is_success());
assert_eq!(result.result.output(), Some(&PAYLOAD.into()));
}
#[test]
fn build_op_evm_with_extra_precompiles_bedrock_spec() {
let (env, mut evm) = create_op_evm(SpecId::default(), OpSpecId::BEDROCK);
// Check that the Isthmus precompile IS NOT present when using the `OpSpecId::BEDROCK` spec.
assert!(!evm.precompiles().addresses().contains(&OP_ISTHMUS_PRECOMPILE));
// Check that the Prague precompile IS NOT present when using the `OpSpecId::BEDROCK` spec.
assert!(!evm.precompiles().addresses().contains(Ð_PRAGUE_PRECOMPILE));
assert!(!evm.precompiles().addresses().contains(&PRECOMPILE_ADDR));
evm.precompiles_mut().extend_precompiles(CustomPrecompileFactory.precompiles());
assert!(evm.precompiles().addresses().contains(&PRECOMPILE_ADDR));
let result = match &mut evm {
EitherEvm::Op(op_evm) => op_evm.transact(env.tx).unwrap(),
_ => unreachable!(),
};
assert!(result.result.is_success());
assert_eq!(result.result.output(), Some(&PAYLOAD.into()));
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/src/lib.rs | crates/anvil/src/lib.rs | //! Anvil is a fast local Ethereum development node.
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg))]
use crate::{
error::{NodeError, NodeResult},
eth::{
EthApi,
backend::{info::StorageInfo, mem},
fees::{FeeHistoryService, FeeManager},
miner::{Miner, MiningMode},
pool::Pool,
sign::{DevSigner, Signer as EthSigner},
},
filter::Filters,
logging::{LoggingManager, NodeLogLayer},
service::NodeService,
shutdown::Signal,
tasks::TaskManager,
};
use alloy_eips::eip7840::BlobParams;
use alloy_primitives::{Address, U256};
use alloy_signer_local::PrivateKeySigner;
use eth::backend::fork::ClientFork;
use eyre::Result;
use foundry_common::provider::{ProviderBuilder, RetryProvider};
pub use foundry_evm::hardfork::EthereumHardfork;
use futures::{FutureExt, TryFutureExt};
use parking_lot::Mutex;
use revm::primitives::hardfork::SpecId;
use server::try_spawn_ipc;
use std::{
net::SocketAddr,
pin::Pin,
sync::Arc,
task::{Context, Poll},
};
use tokio::{
runtime::Handle,
task::{JoinError, JoinHandle},
};
use tracing_subscriber::EnvFilter;
/// contains the background service that drives the node
mod service;
mod config;
pub use config::{
AccountGenerator, CHAIN_ID, DEFAULT_GAS_LIMIT, ForkChoice, NodeConfig, VERSION_MESSAGE,
};
mod error;
/// ethereum related implementations
pub mod eth;
/// Evm related abstractions
mod evm;
pub use evm::PrecompileFactory;
/// support for polling filters
pub mod filter;
/// commandline output
pub mod logging;
/// types for subscriptions
pub mod pubsub;
/// axum RPC server implementations
pub mod server;
/// Futures for shutdown signal
mod shutdown;
/// additional task management
mod tasks;
/// contains cli command
#[cfg(feature = "cmd")]
pub mod cmd;
#[cfg(feature = "cmd")]
pub mod args;
#[cfg(feature = "cmd")]
pub mod opts;
#[macro_use]
extern crate foundry_common;
#[macro_use]
extern crate tracing;
/// Creates the node and runs the server.
///
/// Returns the [EthApi] that can be used to interact with the node and the [JoinHandle] of the
/// task.
///
/// # Panics
///
/// Panics if any error occurs. For a non-panicking version, use [`try_spawn`].
///
///
/// # Examples
///
/// ```no_run
/// # use anvil::NodeConfig;
/// # async fn spawn() -> eyre::Result<()> {
/// let config = NodeConfig::default();
/// let (api, handle) = anvil::spawn(config).await;
///
/// // use api
///
/// // wait forever
/// handle.await.unwrap().unwrap();
/// # Ok(())
/// # }
/// ```
pub async fn spawn(config: NodeConfig) -> (EthApi, NodeHandle) {
try_spawn(config).await.expect("failed to spawn node")
}
/// Creates the node and runs the server
///
/// Returns the [EthApi] that can be used to interact with the node and the [JoinHandle] of the
/// task.
///
/// # Examples
///
/// ```no_run
/// # use anvil::NodeConfig;
/// # async fn spawn() -> eyre::Result<()> {
/// let config = NodeConfig::default();
/// let (api, handle) = anvil::try_spawn(config).await?;
///
/// // use api
///
/// // wait forever
/// handle.await??;
/// # Ok(())
/// # }
/// ```
pub async fn try_spawn(mut config: NodeConfig) -> Result<(EthApi, NodeHandle)> {
let logger = if config.enable_tracing { init_tracing() } else { Default::default() };
logger.set_enabled(!config.silent);
let backend = Arc::new(config.setup().await?);
if config.enable_auto_impersonate {
backend.auto_impersonate_account(true);
}
let fork = backend.get_fork();
let NodeConfig {
signer_accounts,
block_time,
port,
max_transactions,
server_config,
no_mining,
transaction_order,
genesis,
mixed_mining,
..
} = config.clone();
let pool = Arc::new(Pool::default());
let mode = if let Some(block_time) = block_time {
if mixed_mining {
let listener = pool.add_ready_listener();
MiningMode::mixed(max_transactions, listener, block_time)
} else {
MiningMode::interval(block_time)
}
} else if no_mining {
MiningMode::None
} else {
// get a listener for ready transactions
let listener = pool.add_ready_listener();
MiningMode::instant(max_transactions, listener)
};
let miner = match &fork {
Some(fork) => {
Miner::new(mode).with_forced_transactions(fork.config.read().force_transactions.clone())
}
_ => Miner::new(mode),
};
let dev_signer: Box<dyn EthSigner> = Box::new(DevSigner::new(signer_accounts));
let mut signers = vec![dev_signer];
if let Some(genesis) = genesis {
let genesis_signers = genesis
.alloc
.values()
.filter_map(|acc| acc.private_key)
.flat_map(|k| PrivateKeySigner::from_bytes(&k))
.collect::<Vec<_>>();
if !genesis_signers.is_empty() {
signers.push(Box::new(DevSigner::new(genesis_signers)));
}
}
let fee_history_cache = Arc::new(Mutex::new(Default::default()));
let fee_history_service = FeeHistoryService::new(
match backend.spec_id() {
SpecId::OSAKA => BlobParams::osaka(),
SpecId::PRAGUE => BlobParams::prague(),
_ => BlobParams::cancun(),
},
backend.new_block_notifications(),
Arc::clone(&fee_history_cache),
StorageInfo::new(Arc::clone(&backend)),
);
// create an entry for the best block
if let Some(header) = backend.get_block(backend.best_number()).map(|block| block.header) {
fee_history_service.insert_cache_entry_for_block(header.hash_slow(), &header);
}
let filters = Filters::default();
// create the cloneable api wrapper
let api = EthApi::new(
Arc::clone(&pool),
Arc::clone(&backend),
Arc::new(signers),
fee_history_cache,
fee_history_service.fee_history_limit(),
miner.clone(),
logger,
filters.clone(),
transaction_order,
);
// spawn the node service
let node_service =
tokio::task::spawn(NodeService::new(pool, backend, miner, fee_history_service, filters));
let mut servers = Vec::with_capacity(config.host.len());
let mut addresses = Vec::with_capacity(config.host.len());
for addr in &config.host {
let sock_addr = SocketAddr::new(*addr, port);
// Create a TCP listener.
let tcp_listener = tokio::net::TcpListener::bind(sock_addr).await?;
addresses.push(tcp_listener.local_addr()?);
// Spawn the server future on a new task.
let srv = server::serve_on(tcp_listener, api.clone(), server_config.clone());
servers.push(tokio::task::spawn(srv.map_err(Into::into)));
}
let tokio_handle = Handle::current();
let (signal, on_shutdown) = shutdown::signal();
let task_manager = TaskManager::new(tokio_handle, on_shutdown);
let ipc_task =
config.get_ipc_path().map(|path| try_spawn_ipc(api.clone(), path)).transpose()?;
let handle = NodeHandle {
config,
node_service,
servers,
ipc_task,
addresses,
_signal: Some(signal),
task_manager,
};
handle.print(fork.as_ref())?;
Ok((api, handle))
}
type IpcTask = JoinHandle<()>;
/// A handle to the spawned node and server tasks.
///
/// This future will resolve if either the node or server task resolve/fail.
pub struct NodeHandle {
config: NodeConfig,
/// The address of the running rpc server.
addresses: Vec<SocketAddr>,
/// Join handle for the Node Service.
pub node_service: JoinHandle<Result<(), NodeError>>,
/// Join handles (one per socket) for the Anvil server.
pub servers: Vec<JoinHandle<Result<(), NodeError>>>,
/// The future that joins the ipc server, if any.
ipc_task: Option<IpcTask>,
/// A signal that fires the shutdown, fired on drop.
_signal: Option<Signal>,
/// A task manager that can be used to spawn additional tasks.
task_manager: TaskManager,
}
impl Drop for NodeHandle {
fn drop(&mut self) {
// Fire shutdown signal to make sure anvil instance is terminated.
if let Some(signal) = self._signal.take() {
let _ = signal.fire();
}
}
}
impl NodeHandle {
/// The [NodeConfig] the node was launched with.
pub fn config(&self) -> &NodeConfig {
&self.config
}
/// Prints the launch info.
pub(crate) fn print(&self, fork: Option<&ClientFork>) -> Result<()> {
self.config.print(fork)?;
if !self.config.silent {
if let Some(ipc_path) = self.ipc_path() {
sh_println!("IPC path: {ipc_path}")?;
}
sh_println!(
"Listening on {}",
self.addresses
.iter()
.map(|addr| { addr.to_string() })
.collect::<Vec<String>>()
.join(", ")
)?;
}
Ok(())
}
/// The address of the launched server.
///
/// **N.B.** this may not necessarily be the same `host + port` as configured in the
/// `NodeConfig`, if port was set to 0, then the OS auto picks an available port.
pub fn socket_address(&self) -> &SocketAddr {
&self.addresses[0]
}
/// Returns the http endpoint.
pub fn http_endpoint(&self) -> String {
format!("http://{}", self.socket_address())
}
/// Returns the websocket endpoint.
pub fn ws_endpoint(&self) -> String {
format!("ws://{}", self.socket_address())
}
/// Returns the path of the launched ipc server, if any.
pub fn ipc_path(&self) -> Option<String> {
self.config.get_ipc_path()
}
/// Constructs a [`RetryProvider`] for this handle's HTTP endpoint.
pub fn http_provider(&self) -> RetryProvider {
ProviderBuilder::new(&self.http_endpoint()).build().expect("failed to build HTTP provider")
}
/// Constructs a [`RetryProvider`] for this handle's WS endpoint.
pub fn ws_provider(&self) -> RetryProvider {
ProviderBuilder::new(&self.ws_endpoint()).build().expect("failed to build WS provider")
}
/// Constructs a [`RetryProvider`] for this handle's IPC endpoint, if any.
pub fn ipc_provider(&self) -> Option<RetryProvider> {
ProviderBuilder::new(&self.config.get_ipc_path()?).build().ok()
}
/// Signer accounts that can sign messages/transactions from the EVM node.
pub fn dev_accounts(&self) -> impl Iterator<Item = Address> + '_ {
self.config.signer_accounts.iter().map(|wallet| wallet.address())
}
/// Signer accounts that can sign messages/transactions from the EVM node.
pub fn dev_wallets(&self) -> impl Iterator<Item = PrivateKeySigner> + '_ {
self.config.signer_accounts.iter().cloned()
}
/// Accounts that will be initialised with `genesis_balance` in the genesis block.
pub fn genesis_accounts(&self) -> impl Iterator<Item = Address> + '_ {
self.config.genesis_accounts.iter().map(|w| w.address())
}
/// Native token balance of every genesis account in the genesis block.
pub fn genesis_balance(&self) -> U256 {
self.config.genesis_balance
}
/// Default gas price for all txs.
pub fn gas_price(&self) -> u128 {
self.config.get_gas_price()
}
/// Returns the shutdown signal.
pub fn shutdown_signal(&self) -> &Option<Signal> {
&self._signal
}
/// Returns mutable access to the shutdown signal.
///
/// This can be used to extract the Signal.
pub fn shutdown_signal_mut(&mut self) -> &mut Option<Signal> {
&mut self._signal
}
/// Returns the task manager that can be used to spawn new tasks.
///
/// ```
/// use anvil::NodeHandle;
/// # fn t(handle: NodeHandle) {
/// let task_manager = handle.task_manager();
/// let on_shutdown = task_manager.on_shutdown();
///
/// task_manager.spawn(async move {
/// on_shutdown.await;
/// // do something
/// });
///
/// # }
/// ```
pub fn task_manager(&self) -> &TaskManager {
&self.task_manager
}
}
impl Future for NodeHandle {
type Output = Result<NodeResult<()>, JoinError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let pin = self.get_mut();
// poll the ipc task
if let Some(mut ipc) = pin.ipc_task.take() {
if let Poll::Ready(res) = ipc.poll_unpin(cx) {
return Poll::Ready(res.map(|()| Ok(())));
} else {
pin.ipc_task = Some(ipc);
}
}
// poll the node service task
if let Poll::Ready(res) = pin.node_service.poll_unpin(cx) {
return Poll::Ready(res);
}
// poll the axum server handles
for server in &mut pin.servers {
if let Poll::Ready(res) = server.poll_unpin(cx) {
return Poll::Ready(res);
}
}
Poll::Pending
}
}
#[doc(hidden)]
pub fn init_tracing() -> LoggingManager {
use tracing_subscriber::prelude::*;
let manager = LoggingManager::default();
let _ = if let Ok(rust_log_val) = std::env::var("RUST_LOG")
&& !rust_log_val.contains("=")
{
// Mutate the given filter to include `node` logs if it is not already present.
// This prevents the unexpected behaviour of not seeing any node logs if a RUST_LOG
// is already present that doesn't set it.
let rust_log_val = if !rust_log_val.contains("node") {
format!("{rust_log_val},node=info")
} else {
rust_log_val
};
let env_filter: EnvFilter =
rust_log_val.parse().expect("failed to parse modified RUST_LOG");
tracing_subscriber::registry()
.with(env_filter)
.with(tracing_subscriber::fmt::layer())
.try_init()
} else {
tracing_subscriber::Registry::default()
.with(NodeLogLayer::new(manager.clone()))
.with(
tracing_subscriber::fmt::layer()
.without_time()
.with_target(false)
.with_level(false),
)
.try_init()
};
manager
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/src/shutdown.rs | crates/anvil/src/shutdown.rs | //! Helper for shutdown signals
use futures::{
FutureExt,
channel::oneshot,
future::{FusedFuture, Shared},
};
use std::{
pin::Pin,
task::{Context, Poll},
};
/// Future that resolves when the shutdown event has fired
#[derive(Clone)]
pub struct Shutdown(Shared<oneshot::Receiver<()>>);
impl Future for Shutdown {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let pin = self.get_mut();
if pin.0.is_terminated() || pin.0.poll_unpin(cx).is_ready() {
Poll::Ready(())
} else {
Poll::Pending
}
}
}
/// Shutdown signal that fires either manually or on drop by closing the channel
pub struct Signal(oneshot::Sender<()>);
impl Signal {
/// Fire the signal manually.
pub fn fire(self) -> Result<(), ()> {
self.0.send(())
}
}
/// Create a channel pair that's used to propagate shutdown event
pub fn signal() -> (Signal, Shutdown) {
let (sender, receiver) = oneshot::channel();
(Signal(sender), Shutdown(receiver.shared()))
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/src/filter.rs | crates/anvil/src/filter.rs | //! Support for polling based filters
use crate::{
StorageInfo,
eth::{backend::notifications::NewBlockNotifications, error::ToRpcResponseResult},
pubsub::filter_logs,
};
use alloy_primitives::{TxHash, map::HashMap};
use alloy_rpc_types::{Filter, FilteredParams, Log};
use anvil_core::eth::subscription::SubscriptionId;
use anvil_rpc::response::ResponseResult;
use futures::{Stream, StreamExt, channel::mpsc::Receiver};
use std::{
pin::Pin,
sync::Arc,
task::{Context, Poll},
time::{Duration, Instant},
};
use tokio::sync::Mutex;
/// Type alias for filters identified by their id and their expiration timestamp
type FilterMap = Arc<Mutex<HashMap<String, (EthFilter, Instant)>>>;
/// timeout after which to remove an active filter if it wasn't polled since then
pub const ACTIVE_FILTER_TIMEOUT_SECS: u64 = 60 * 5;
/// Contains all registered filters
#[derive(Clone, Debug)]
pub struct Filters {
/// all currently active filters
active_filters: FilterMap,
/// How long we keep a live the filter after the last poll
keepalive: Duration,
}
impl Filters {
/// Adds a new `EthFilter` to the set
pub async fn add_filter(&self, filter: EthFilter) -> String {
let id = new_id();
trace!(target: "node::filter", "Adding new filter id {}", id);
let mut filters = self.active_filters.lock().await;
filters.insert(id.clone(), (filter, self.next_deadline()));
id
}
pub async fn get_filter_changes(&self, id: &str) -> ResponseResult {
{
let mut filters = self.active_filters.lock().await;
if let Some((filter, deadline)) = filters.get_mut(id) {
let resp = filter
.next()
.await
.unwrap_or_else(|| ResponseResult::success(Vec::<()>::new()));
*deadline = self.next_deadline();
return resp;
}
}
warn!(target: "node::filter", "No filter found for {}", id);
ResponseResult::success(Vec::<()>::new())
}
/// Returns the original `Filter` of an `eth_newFilter`
pub async fn get_log_filter(&self, id: &str) -> Option<Filter> {
let filters = self.active_filters.lock().await;
if let Some((EthFilter::Logs(log), _)) = filters.get(id) {
return log.filter.filter.clone();
}
None
}
/// Removes the filter identified with the `id`
pub async fn uninstall_filter(&self, id: &str) -> Option<EthFilter> {
trace!(target: "node::filter", "Uninstalling filter id {}", id);
self.active_filters.lock().await.remove(id).map(|(f, _)| f)
}
/// The duration how long to keep alive stale filters
pub fn keep_alive(&self) -> Duration {
self.keepalive
}
/// Returns the timestamp after which a filter should expire
fn next_deadline(&self) -> Instant {
Instant::now() + self.keep_alive()
}
/// Evict all filters that weren't updated and reached there deadline
pub async fn evict(&self) {
trace!(target: "node::filter", "Evicting stale filters");
let now = Instant::now();
let mut active_filters = self.active_filters.lock().await;
active_filters.retain(|id, (_, deadline)| {
if now > *deadline {
trace!(target: "node::filter",?id, "Evicting stale filter");
return false;
}
true
});
}
}
impl Default for Filters {
fn default() -> Self {
Self {
active_filters: Arc::new(Default::default()),
keepalive: Duration::from_secs(ACTIVE_FILTER_TIMEOUT_SECS),
}
}
}
/// returns a new random hex id
fn new_id() -> String {
SubscriptionId::random_hex().to_string()
}
/// Represents a poll based filter
#[derive(Debug)]
pub enum EthFilter {
Logs(Box<LogsFilter>),
Blocks(NewBlockNotifications),
PendingTransactions(Receiver<TxHash>),
}
impl Stream for EthFilter {
type Item = ResponseResult;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let pin = self.get_mut();
match pin {
Self::Logs(logs) => Poll::Ready(Some(Ok(logs.poll(cx)).to_rpc_result())),
Self::Blocks(blocks) => {
let mut new_blocks = Vec::new();
while let Poll::Ready(Some(block)) = blocks.poll_next_unpin(cx) {
new_blocks.push(block.hash);
}
Poll::Ready(Some(Ok(new_blocks).to_rpc_result()))
}
Self::PendingTransactions(tx) => {
let mut new_txs = Vec::new();
while let Poll::Ready(Some(tx_hash)) = tx.poll_next_unpin(cx) {
new_txs.push(tx_hash);
}
Poll::Ready(Some(Ok(new_txs).to_rpc_result()))
}
}
}
}
/// Listens for new blocks and matching logs emitted in that block
#[derive(Debug)]
pub struct LogsFilter {
/// listener for new blocks
pub blocks: NewBlockNotifications,
/// accessor for block storage
pub storage: StorageInfo,
/// matcher with all provided filter params
pub filter: FilteredParams,
/// existing logs that matched the filter when the listener was installed
///
/// They'll be returned on the first poll
pub historic: Option<Vec<Log>>,
}
impl LogsFilter {
/// Returns all the logs since the last time this filter was polled
pub fn poll(&mut self, cx: &mut Context<'_>) -> Vec<Log> {
let mut logs = self.historic.take().unwrap_or_default();
while let Poll::Ready(Some(block)) = self.blocks.poll_next_unpin(cx) {
let b = self.storage.block(block.hash);
let receipts = self.storage.receipts(block.hash);
if let (Some(receipts), Some(block)) = (receipts, b) {
logs.extend(filter_logs(block, receipts, &self.filter))
}
}
logs
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/src/args.rs | crates/anvil/src/args.rs | use crate::opts::{Anvil, AnvilSubcommand};
use clap::{CommandFactory, Parser};
use eyre::Result;
use foundry_cli::utils;
/// Run the `anvil` command line interface.
pub fn run() -> Result<()> {
setup()?;
let mut args = Anvil::parse();
args.global.init()?;
args.node.evm.resolve_rpc_alias();
run_command(args)
}
/// Setup the exception handler and other utilities.
pub fn setup() -> Result<()> {
utils::common_setup();
Ok(())
}
/// Run the subcommand.
pub fn run_command(args: Anvil) -> Result<()> {
if let Some(cmd) = &args.cmd {
match cmd {
AnvilSubcommand::Completions { shell } => {
clap_complete::generate(
*shell,
&mut Anvil::command(),
"anvil",
&mut std::io::stdout(),
);
}
}
return Ok(());
}
let _ = fdlimit::raise_fd_limit();
args.global.tokio_runtime().block_on(args.node.run())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn verify_cli() {
Anvil::command().debug_assert();
}
#[test]
fn can_parse_help() {
let _: Anvil = Anvil::parse_from(["anvil", "--help"]);
}
#[test]
fn can_parse_short_version() {
let _: Anvil = Anvil::parse_from(["anvil", "-V"]);
}
#[test]
fn can_parse_long_version() {
let _: Anvil = Anvil::parse_from(["anvil", "--version"]);
}
#[test]
fn can_parse_completions() {
let args: Anvil = Anvil::parse_from(["anvil", "completions", "bash"]);
assert!(matches!(
args.cmd,
Some(AnvilSubcommand::Completions {
shell: foundry_cli::clap::Shell::ClapCompleteShell(clap_complete::Shell::Bash)
})
));
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/src/pubsub.rs | crates/anvil/src/pubsub.rs | use crate::{
StorageInfo,
eth::{backend::notifications::NewBlockNotifications, error::to_rpc_result},
};
use alloy_network::AnyRpcTransaction;
use alloy_primitives::{B256, TxHash};
use alloy_rpc_types::{FilteredParams, Log, Transaction, pubsub::SubscriptionResult};
use anvil_core::eth::{block::Block, subscription::SubscriptionId};
use anvil_rpc::{request::Version, response::ResponseResult};
use foundry_primitives::FoundryReceiptEnvelope;
use futures::{Stream, StreamExt, channel::mpsc::Receiver, ready};
use serde::Serialize;
use std::{
collections::VecDeque,
pin::Pin,
task::{Context, Poll},
};
use tokio::sync::mpsc::UnboundedReceiver;
/// Listens for new blocks and matching logs emitted in that block
#[derive(Debug)]
pub struct LogsSubscription {
pub blocks: NewBlockNotifications,
pub storage: StorageInfo,
pub filter: FilteredParams,
pub queued: VecDeque<Log>,
pub id: SubscriptionId,
}
impl LogsSubscription {
fn poll(&mut self, cx: &mut Context<'_>) -> Poll<Option<EthSubscriptionResponse>> {
loop {
if let Some(log) = self.queued.pop_front() {
let params = EthSubscriptionParams {
subscription: self.id.clone(),
result: to_rpc_result(log),
};
return Poll::Ready(Some(EthSubscriptionResponse::new(params)));
}
if let Some(block) = ready!(self.blocks.poll_next_unpin(cx)) {
let b = self.storage.block(block.hash);
let receipts = self.storage.receipts(block.hash);
if let (Some(receipts), Some(block)) = (receipts, b) {
let logs = filter_logs(block, receipts, &self.filter);
if logs.is_empty() {
// this ensures we poll the receiver until it is pending, in which case the
// underlying `UnboundedReceiver` will register the new waker, see
// [`futures::channel::mpsc::UnboundedReceiver::poll_next()`]
continue;
}
self.queued.extend(logs)
}
} else {
return Poll::Ready(None);
}
if self.queued.is_empty() {
return Poll::Pending;
}
}
}
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
pub struct EthSubscriptionResponse {
jsonrpc: Version,
method: &'static str,
params: EthSubscriptionParams,
}
impl EthSubscriptionResponse {
pub fn new(params: EthSubscriptionParams) -> Self {
Self { jsonrpc: Version::V2, method: "eth_subscription", params }
}
}
/// Represents the `params` field of an `eth_subscription` event
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
pub struct EthSubscriptionParams {
subscription: SubscriptionId,
#[serde(flatten)]
result: ResponseResult,
}
/// Represents an ethereum Websocket subscription
#[derive(Debug)]
pub enum EthSubscription {
Logs(Box<LogsSubscription>),
Header(NewBlockNotifications, StorageInfo, SubscriptionId),
PendingTransactions(Receiver<TxHash>, SubscriptionId),
FullPendingTransactions(UnboundedReceiver<AnyRpcTransaction>, SubscriptionId),
}
impl EthSubscription {
fn poll_response(&mut self, cx: &mut Context<'_>) -> Poll<Option<EthSubscriptionResponse>> {
match self {
Self::Logs(listener) => listener.poll(cx),
Self::Header(blocks, storage, id) => {
// this loop ensures we poll the receiver until it is pending, in which case the
// underlying `UnboundedReceiver` will register the new waker, see
// [`futures::channel::mpsc::UnboundedReceiver::poll_next()`]
loop {
if let Some(block) = ready!(blocks.poll_next_unpin(cx)) {
if let Some(block) = storage.eth_block(block.hash) {
let params = EthSubscriptionParams {
subscription: id.clone(),
result: to_rpc_result(block),
};
return Poll::Ready(Some(EthSubscriptionResponse::new(params)));
}
} else {
return Poll::Ready(None);
}
}
}
Self::PendingTransactions(tx, id) => {
let res = ready!(tx.poll_next_unpin(cx))
.map(SubscriptionResult::<Transaction>::TransactionHash)
.map(to_rpc_result)
.map(|result| {
let params = EthSubscriptionParams { subscription: id.clone(), result };
EthSubscriptionResponse::new(params)
});
Poll::Ready(res)
}
Self::FullPendingTransactions(tx, id) => {
let res = ready!(tx.poll_recv(cx)).map(to_rpc_result).map(|result| {
let params = EthSubscriptionParams { subscription: id.clone(), result };
EthSubscriptionResponse::new(params)
});
Poll::Ready(res)
}
}
}
}
impl Stream for EthSubscription {
type Item = serde_json::Value;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let pin = self.get_mut();
match ready!(pin.poll_response(cx)) {
None => Poll::Ready(None),
Some(res) => Poll::Ready(Some(serde_json::to_value(res).expect("can't fail;"))),
}
}
}
/// Returns all the logs that match the given filter
pub fn filter_logs(
block: Block,
receipts: Vec<FoundryReceiptEnvelope>,
filter: &FilteredParams,
) -> Vec<Log> {
/// Determines whether to add this log
fn add_log(
block_hash: B256,
l: &alloy_primitives::Log,
block: &Block,
params: &FilteredParams,
) -> bool {
if params.filter.is_some() {
let block_number = block.header.number;
if !params.filter_block_range(block_number)
|| !params.filter_block_hash(block_hash)
|| !params.filter_address(&l.address)
|| !params.filter_topics(l.topics())
{
return false;
}
}
true
}
let block_hash = block.header.hash_slow();
let mut logs = vec![];
let mut log_index: u32 = 0;
for (receipt_index, receipt) in receipts.into_iter().enumerate() {
let transaction_hash = block.body.transactions[receipt_index].hash();
for log in receipt.logs() {
if add_log(block_hash, log, &block, filter) {
logs.push(Log {
inner: log.clone(),
block_hash: Some(block_hash),
block_number: Some(block.header.number),
transaction_hash: Some(transaction_hash),
transaction_index: Some(receipt_index as u64),
log_index: Some(log_index as u64),
removed: false,
block_timestamp: Some(block.header.timestamp),
});
}
log_index += 1;
}
}
logs
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/src/error.rs | crates/anvil/src/error.rs | /// Result alias
pub type NodeResult<T> = Result<T, NodeError>;
/// An error that can occur when launching a anvil instance
#[derive(Debug, thiserror::Error)]
pub enum NodeError {
#[error(transparent)]
Hyper(#[from] hyper::Error),
#[error(transparent)]
Io(#[from] std::io::Error),
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/src/service.rs | crates/anvil/src/service.rs | //! background service
use crate::{
NodeResult,
eth::{
fees::FeeHistoryService,
miner::Miner,
pool::{Pool, transactions::PoolTransaction},
},
filter::Filters,
mem::{Backend, storage::MinedBlockOutcome},
};
use futures::{FutureExt, Stream, StreamExt};
use std::{
collections::VecDeque,
pin::Pin,
sync::Arc,
task::{Context, Poll},
};
use tokio::{task::JoinHandle, time::Interval};
/// The type that drives the blockchain's state
///
/// This service is basically an endless future that continuously polls the miner which returns
/// transactions for the next block, then those transactions are handed off to the backend to
/// construct a new block, if all transactions were successfully included in a new block they get
/// purged from the `Pool`.
pub struct NodeService {
/// The pool that holds all transactions.
pool: Arc<Pool>,
/// Creates new blocks.
block_producer: BlockProducer,
/// The miner responsible to select transactions from the `pool`.
miner: Miner,
/// Maintenance task for fee history related tasks.
fee_history: FeeHistoryService,
/// Tracks all active filters
filters: Filters,
/// The interval at which to check for filters that need to be evicted
filter_eviction_interval: Interval,
}
impl NodeService {
pub fn new(
pool: Arc<Pool>,
backend: Arc<Backend>,
miner: Miner,
fee_history: FeeHistoryService,
filters: Filters,
) -> Self {
let start = tokio::time::Instant::now() + filters.keep_alive();
let filter_eviction_interval = tokio::time::interval_at(start, filters.keep_alive());
Self {
pool,
block_producer: BlockProducer::new(backend),
miner,
fee_history,
filter_eviction_interval,
filters,
}
}
}
impl Future for NodeService {
type Output = NodeResult<()>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let pin = self.get_mut();
// this drives block production and feeds new sets of ready transactions to the block
// producer
loop {
// advance block production until pending
while let Poll::Ready(Some(outcome)) = pin.block_producer.poll_next_unpin(cx) {
trace!(target: "node", "mined block {}", outcome.block_number);
// prune the transactions from the pool
pin.pool.on_mined_block(outcome);
}
if let Poll::Ready(transactions) = pin.miner.poll(&pin.pool, cx) {
// miner returned a set of transaction that we feed to the producer
pin.block_producer.queued.push_back(transactions);
} else {
// no progress made
break;
}
}
// poll the fee history task
let _ = pin.fee_history.poll_unpin(cx);
if pin.filter_eviction_interval.poll_tick(cx).is_ready() {
let filters = pin.filters.clone();
// evict filters that timed out
tokio::task::spawn(async move { filters.evict().await });
}
Poll::Pending
}
}
/// A type that exclusively mines one block at a time
#[must_use = "streams do nothing unless polled"]
struct BlockProducer {
/// Holds the backend if no block is being mined
idle_backend: Option<Arc<Backend>>,
/// Single active future that mines a new block
block_mining: Option<JoinHandle<(MinedBlockOutcome, Arc<Backend>)>>,
/// backlog of sets of transactions ready to be mined
queued: VecDeque<Vec<Arc<PoolTransaction>>>,
}
impl BlockProducer {
fn new(backend: Arc<Backend>) -> Self {
Self { idle_backend: Some(backend), block_mining: None, queued: Default::default() }
}
}
impl Stream for BlockProducer {
type Item = MinedBlockOutcome;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let pin = self.get_mut();
if !pin.queued.is_empty() {
// only spawn a building task if there's none in progress already
if let Some(backend) = pin.idle_backend.take() {
let transactions = pin.queued.pop_front().expect("not empty; qed");
// we spawn this on as blocking task because this can be blocking for a while in
// forking mode, because of all the rpc calls to fetch the required state
let handle = tokio::runtime::Handle::current();
let mining = tokio::task::spawn_blocking(move || {
handle.block_on(async move {
trace!(target: "miner", "creating new block");
let block = backend.mine_block(transactions).await;
trace!(target: "miner", "created new block: {}", block.block_number);
(block, backend)
})
});
pin.block_mining = Some(mining);
}
}
if let Some(mut mining) = pin.block_mining.take() {
if let Poll::Ready(res) = mining.poll_unpin(cx) {
return match res {
Ok((outcome, backend)) => {
pin.idle_backend = Some(backend);
Poll::Ready(Some(outcome))
}
Err(err) => {
panic!("miner task failed: {err}");
}
};
} else {
pin.block_mining = Some(mining)
}
}
Poll::Pending
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/src/logging.rs | crates/anvil/src/logging.rs | //! User facing Logger
use parking_lot::RwLock;
use std::sync::Arc;
use tracing::{Metadata, subscriber::Interest};
use tracing_subscriber::{Layer, layer::Context};
/// The target that identifies the events intended to be logged to stdout
pub(crate) const NODE_USER_LOG_TARGET: &str = "node::user";
/// The target that identifies the events coming from the `console.log` invocations.
pub(crate) const EVM_CONSOLE_LOG_TARGET: &str = "node::console";
/// A logger that listens for node related events and displays them.
///
/// This layer is intended to be used as filter for `NODE_USER_LOG_TARGET` events that will
/// eventually be logged to stdout
#[derive(Clone, Debug, Default)]
pub struct NodeLogLayer {
state: LoggingManager,
}
impl NodeLogLayer {
/// Returns a new instance of this layer
pub fn new(state: LoggingManager) -> Self {
Self { state }
}
}
// use `Layer`'s filter function to globally enable/disable `NODE_USER_LOG_TARGET` events
impl<S> Layer<S> for NodeLogLayer
where
S: tracing::Subscriber,
{
fn register_callsite(&self, metadata: &'static Metadata<'static>) -> Interest {
if metadata.target() == NODE_USER_LOG_TARGET || metadata.target() == EVM_CONSOLE_LOG_TARGET
{
Interest::sometimes()
} else {
Interest::never()
}
}
fn enabled(&self, metadata: &Metadata<'_>, _ctx: Context<'_, S>) -> bool {
self.state.is_enabled()
&& (metadata.target() == NODE_USER_LOG_TARGET
|| metadata.target() == EVM_CONSOLE_LOG_TARGET)
}
}
/// Contains the configuration of the logger
#[derive(Clone, Debug)]
pub struct LoggingManager {
/// Whether the logger is currently enabled
pub enabled: Arc<RwLock<bool>>,
}
impl LoggingManager {
/// Returns true if logging is currently enabled
pub fn is_enabled(&self) -> bool {
*self.enabled.read()
}
/// Updates the `enabled` state
pub fn set_enabled(&self, enabled: bool) {
let mut current = self.enabled.write();
*current = enabled;
}
}
impl Default for LoggingManager {
fn default() -> Self {
Self { enabled: Arc::new(RwLock::new(true)) }
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/src/opts.rs | crates/anvil/src/opts.rs | use crate::cmd::NodeArgs;
use clap::{Parser, Subcommand};
use foundry_cli::opts::GlobalArgs;
use foundry_common::version::{LONG_VERSION, SHORT_VERSION};
/// A fast local Ethereum development node.
#[derive(Parser)]
#[command(name = "anvil", version = SHORT_VERSION, long_version = LONG_VERSION, next_display_order = None)]
pub struct Anvil {
/// Include the global arguments.
#[command(flatten)]
pub global: GlobalArgs,
#[command(flatten)]
pub node: NodeArgs,
#[command(subcommand)]
pub cmd: Option<AnvilSubcommand>,
}
#[derive(Subcommand)]
pub enum AnvilSubcommand {
/// Generate shell completions script.
#[command(visible_alias = "com")]
Completions {
#[arg(value_enum)]
shell: foundry_cli::clap::Shell,
},
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/src/cmd.rs | crates/anvil/src/cmd.rs | use crate::{
AccountGenerator, CHAIN_ID, NodeConfig,
config::{DEFAULT_MNEMONIC, ForkChoice},
eth::{EthApi, backend::db::SerializableState, pool::transactions::TransactionOrder},
};
use alloy_genesis::Genesis;
use alloy_primitives::{B256, U256, utils::Unit};
use alloy_signer_local::coins_bip39::{English, Mnemonic};
use anvil_server::ServerConfig;
use clap::Parser;
use core::fmt;
use foundry_common::shell;
use foundry_config::{Chain, Config, FigmentProviders};
use foundry_evm::hardfork::{EthereumHardfork, OpHardfork};
use foundry_evm_networks::NetworkConfigs;
use futures::FutureExt;
use rand_08::{SeedableRng, rngs::StdRng};
use std::{
net::IpAddr,
path::{Path, PathBuf},
pin::Pin,
str::FromStr,
sync::{
Arc,
atomic::{AtomicUsize, Ordering},
},
task::{Context, Poll},
time::Duration,
};
use tokio::time::{Instant, Interval};
#[derive(Clone, Debug, Parser)]
pub struct NodeArgs {
/// Port number to listen on.
#[arg(long, short, default_value = "8545", value_name = "NUM")]
pub port: u16,
/// Number of dev accounts to generate and configure.
#[arg(long, short, default_value = "10", value_name = "NUM")]
pub accounts: u64,
/// The balance of every dev account in Ether.
#[arg(long, default_value = "10000", value_name = "NUM")]
pub balance: u64,
/// The timestamp of the genesis block.
#[arg(long, value_name = "NUM")]
pub timestamp: Option<u64>,
/// The number of the genesis block.
#[arg(long, value_name = "NUM")]
pub number: Option<u64>,
/// BIP39 mnemonic phrase used for generating accounts.
/// Cannot be used if `mnemonic_random` or `mnemonic_seed` are used.
#[arg(long, short, conflicts_with_all = &["mnemonic_seed", "mnemonic_random"])]
pub mnemonic: Option<String>,
/// Automatically generates a BIP39 mnemonic phrase, and derives accounts from it.
/// Cannot be used with other `mnemonic` options.
/// You can specify the number of words you want in the mnemonic.
/// [default: 12]
#[arg(long, conflicts_with_all = &["mnemonic", "mnemonic_seed"], default_missing_value = "12", num_args(0..=1))]
pub mnemonic_random: Option<usize>,
/// Generates a BIP39 mnemonic phrase from a given seed
/// Cannot be used with other `mnemonic` options.
///
/// CAREFUL: This is NOT SAFE and should only be used for testing.
/// Never use the private keys generated in production.
#[arg(long = "mnemonic-seed-unsafe", conflicts_with_all = &["mnemonic", "mnemonic_random"])]
pub mnemonic_seed: Option<u64>,
/// Sets the derivation path of the child key to be derived.
///
/// [default: m/44'/60'/0'/0/]
#[arg(long)]
pub derivation_path: Option<String>,
/// The EVM hardfork to use.
///
/// Choose the hardfork by name, e.g. `prague`, `cancun`, `shanghai`, `paris`, `london`, etc...
/// [default: latest]
#[arg(long)]
pub hardfork: Option<String>,
/// Block time in seconds for interval mining.
#[arg(short, long, visible_alias = "blockTime", value_name = "SECONDS", value_parser = duration_from_secs_f64)]
pub block_time: Option<Duration>,
/// Slots in an epoch
#[arg(long, value_name = "SLOTS_IN_AN_EPOCH", default_value_t = 32)]
pub slots_in_an_epoch: u64,
/// Writes output of `anvil` as json to user-specified file.
#[arg(long, value_name = "FILE", value_hint = clap::ValueHint::FilePath)]
pub config_out: Option<PathBuf>,
/// Disable auto and interval mining, and mine on demand instead.
#[arg(long, visible_alias = "no-mine", conflicts_with = "block_time")]
pub no_mining: bool,
#[arg(long, requires = "block_time")]
pub mixed_mining: bool,
/// The hosts the server will listen on.
#[arg(
long,
value_name = "IP_ADDR",
env = "ANVIL_IP_ADDR",
default_value = "127.0.0.1",
help_heading = "Server options",
value_delimiter = ','
)]
pub host: Vec<IpAddr>,
/// How transactions are sorted in the mempool.
#[arg(long, default_value = "fees")]
pub order: TransactionOrder,
/// Initialize the genesis block with the given `genesis.json` file.
#[arg(long, value_name = "PATH", value_parser= read_genesis_file)]
pub init: Option<Genesis>,
/// This is an alias for both --load-state and --dump-state.
///
/// It initializes the chain with the state and block environment stored at the file, if it
/// exists, and dumps the chain's state on exit.
#[arg(
long,
value_name = "PATH",
value_parser = StateFile::parse,
conflicts_with_all = &[
"init",
"dump_state",
"load_state"
]
)]
pub state: Option<StateFile>,
/// Interval in seconds at which the state and block environment is to be dumped to disk.
///
/// See --state and --dump-state
#[arg(short, long, value_name = "SECONDS")]
pub state_interval: Option<u64>,
/// Dump the state and block environment of chain on exit to the given file.
///
/// If the value is a directory, the state will be written to `<VALUE>/state.json`.
#[arg(long, value_name = "PATH", conflicts_with = "init")]
pub dump_state: Option<PathBuf>,
/// Preserve historical state snapshots when dumping the state.
///
/// This will save the in-memory states of the chain at particular block hashes.
///
/// These historical states will be loaded into the memory when `--load-state` / `--state`, and
/// aids in RPC calls beyond the block at which state was dumped.
#[arg(long, conflicts_with = "init", default_value = "false")]
pub preserve_historical_states: bool,
/// Initialize the chain from a previously saved state snapshot.
#[arg(
long,
value_name = "PATH",
value_parser = SerializableState::parse,
conflicts_with = "init"
)]
pub load_state: Option<SerializableState>,
#[arg(long, help = IPC_HELP, value_name = "PATH", visible_alias = "ipcpath")]
pub ipc: Option<Option<String>>,
/// Don't keep full chain history.
/// If a number argument is specified, at most this number of states is kept in memory.
///
/// If enabled, no state will be persisted on disk, so `max_persisted_states` will be 0.
#[arg(long)]
pub prune_history: Option<Option<usize>>,
/// Max number of states to persist on disk.
///
/// Note that `prune_history` will overwrite `max_persisted_states` to 0.
#[arg(long, conflicts_with = "prune_history")]
pub max_persisted_states: Option<usize>,
/// Number of blocks with transactions to keep in memory.
#[arg(long)]
pub transaction_block_keeper: Option<usize>,
#[command(flatten)]
pub evm: AnvilEvmArgs,
#[command(flatten)]
pub server_config: ServerConfig,
/// Path to the cache directory where states are stored.
#[arg(long, value_name = "PATH")]
pub cache_path: Option<PathBuf>,
}
#[cfg(windows)]
const IPC_HELP: &str =
"Launch an ipc server at the given path or default path = `\\.\\pipe\\anvil.ipc`";
/// The default IPC endpoint
#[cfg(not(windows))]
const IPC_HELP: &str = "Launch an ipc server at the given path or default path = `/tmp/anvil.ipc`";
/// Default interval for periodically dumping the state.
const DEFAULT_DUMP_INTERVAL: Duration = Duration::from_secs(60);
impl NodeArgs {
pub fn into_node_config(self) -> eyre::Result<NodeConfig> {
let genesis_balance = Unit::ETHER.wei().saturating_mul(U256::from(self.balance));
let compute_units_per_second =
if self.evm.no_rate_limit { Some(u64::MAX) } else { self.evm.compute_units_per_second };
let hardfork = match &self.hardfork {
Some(hf) => {
if self.evm.networks.is_optimism() {
Some(OpHardfork::from_str(hf)?.into())
} else {
Some(EthereumHardfork::from_str(hf)?.into())
}
}
None => None,
};
Ok(NodeConfig::default()
.with_gas_limit(self.evm.gas_limit)
.disable_block_gas_limit(self.evm.disable_block_gas_limit)
.with_gas_price(self.evm.gas_price)
.with_hardfork(hardfork)
.with_blocktime(self.block_time)
.with_no_mining(self.no_mining)
.with_mixed_mining(self.mixed_mining, self.block_time)
.with_account_generator(self.account_generator())?
.with_genesis_balance(genesis_balance)
.with_genesis_timestamp(self.timestamp)
.with_genesis_block_number(self.number)
.with_port(self.port)
.with_fork_choice(match (self.evm.fork_block_number, self.evm.fork_transaction_hash) {
(Some(block), None) => Some(ForkChoice::Block(block)),
(None, Some(hash)) => Some(ForkChoice::Transaction(hash)),
_ => self
.evm
.fork_url
.as_ref()
.and_then(|f| f.block)
.map(|num| ForkChoice::Block(num as i128)),
})
.with_fork_headers(self.evm.fork_headers)
.with_fork_chain_id(self.evm.fork_chain_id.map(u64::from).map(U256::from))
.fork_request_timeout(self.evm.fork_request_timeout.map(Duration::from_millis))
.fork_request_retries(self.evm.fork_request_retries)
.fork_retry_backoff(self.evm.fork_retry_backoff.map(Duration::from_millis))
.fork_compute_units_per_second(compute_units_per_second)
.with_eth_rpc_url(self.evm.fork_url.map(|fork| fork.url))
.with_base_fee(self.evm.block_base_fee_per_gas)
.disable_min_priority_fee(self.evm.disable_min_priority_fee)
.with_storage_caching(self.evm.no_storage_caching)
.with_server_config(self.server_config)
.with_host(self.host)
.set_silent(shell::is_quiet())
.set_config_out(self.config_out)
.with_chain_id(self.evm.chain_id)
.with_transaction_order(self.order)
.with_genesis(self.init)
.with_steps_tracing(self.evm.steps_tracing)
.with_print_logs(!self.evm.disable_console_log)
.with_print_traces(self.evm.print_traces)
.with_auto_impersonate(self.evm.auto_impersonate)
.with_ipc(self.ipc)
.with_code_size_limit(self.evm.code_size_limit)
.disable_code_size_limit(self.evm.disable_code_size_limit)
.set_pruned_history(self.prune_history)
.with_init_state(self.load_state.or_else(|| self.state.and_then(|s| s.state)))
.with_transaction_block_keeper(self.transaction_block_keeper)
.with_max_persisted_states(self.max_persisted_states)
.with_networks(self.evm.networks)
.with_disable_default_create2_deployer(self.evm.disable_default_create2_deployer)
.with_disable_pool_balance_checks(self.evm.disable_pool_balance_checks)
.with_slots_in_an_epoch(self.slots_in_an_epoch)
.with_memory_limit(self.evm.memory_limit)
.with_cache_path(self.cache_path))
}
fn account_generator(&self) -> AccountGenerator {
let mut generator = AccountGenerator::new(self.accounts as usize)
.phrase(DEFAULT_MNEMONIC)
.chain_id(self.evm.chain_id.unwrap_or(CHAIN_ID.into()));
if let Some(ref mnemonic) = self.mnemonic {
generator = generator.phrase(mnemonic);
} else if let Some(count) = self.mnemonic_random {
let mut rng = rand_08::thread_rng();
let mnemonic = match Mnemonic::<English>::new_with_count(&mut rng, count) {
Ok(mnemonic) => mnemonic.to_phrase(),
Err(err) => {
warn!(target: "node", ?count, %err, "failed to generate mnemonic, falling back to 12-word random mnemonic");
// Fallback: generate a valid 12-word random mnemonic instead of using
// DEFAULT_MNEMONIC
Mnemonic::<English>::new_with_count(&mut rng, 12)
.expect("valid default word count")
.to_phrase()
}
};
generator = generator.phrase(mnemonic);
} else if let Some(seed) = self.mnemonic_seed {
let mut seed = StdRng::seed_from_u64(seed);
let mnemonic = Mnemonic::<English>::new(&mut seed).to_phrase();
generator = generator.phrase(mnemonic);
}
if let Some(ref derivation) = self.derivation_path {
generator = generator.derivation_path(derivation);
}
generator
}
/// Returns the location where to dump the state to.
fn dump_state_path(&self) -> Option<PathBuf> {
self.dump_state.as_ref().or_else(|| self.state.as_ref().map(|s| &s.path)).cloned()
}
/// Starts the node
///
/// See also [crate::spawn()]
pub async fn run(self) -> eyre::Result<()> {
let dump_state = self.dump_state_path();
let dump_interval =
self.state_interval.map(Duration::from_secs).unwrap_or(DEFAULT_DUMP_INTERVAL);
let preserve_historical_states = self.preserve_historical_states;
let (api, mut handle) = crate::try_spawn(self.into_node_config()?).await?;
// sets the signal handler to gracefully shutdown.
let mut fork = api.get_fork();
let running = Arc::new(AtomicUsize::new(0));
// handle for the currently running rt, this must be obtained before setting the crtlc
// handler, See [Handle::current]
let mut signal = handle.shutdown_signal_mut().take();
let task_manager = handle.task_manager();
let mut on_shutdown = task_manager.on_shutdown();
let mut state_dumper =
PeriodicStateDumper::new(api, dump_state, dump_interval, preserve_historical_states);
task_manager.spawn(async move {
// wait for the SIGTERM signal on unix systems
#[cfg(unix)]
let mut sigterm = Box::pin(async {
if let Ok(mut stream) =
tokio::signal::unix::signal(tokio::signal::unix::SignalKind::terminate())
{
stream.recv().await;
} else {
futures::future::pending::<()>().await;
}
});
// On windows, this will never fire.
#[cfg(not(unix))]
let mut sigterm = Box::pin(futures::future::pending::<()>());
// await shutdown signal but also periodically flush state
tokio::select! {
_ = &mut sigterm => {
trace!("received sigterm signal, shutting down");
}
_ = &mut on_shutdown => {}
_ = &mut state_dumper => {}
}
// shutdown received
state_dumper.dump().await;
// cleaning up and shutting down
// this will make sure that the fork RPC cache is flushed if caching is configured
if let Some(fork) = fork.take() {
trace!("flushing cache on shutdown");
fork.database
.read()
.await
.maybe_flush_cache()
.expect("Could not flush cache on fork DB");
// cleaning up and shutting down
// this will make sure that the fork RPC cache is flushed if caching is configured
}
std::process::exit(0);
});
ctrlc::set_handler(move || {
let prev = running.fetch_add(1, Ordering::SeqCst);
if prev == 0 {
trace!("received shutdown signal, shutting down");
let _ = signal.take();
}
})
.expect("Error setting Ctrl-C handler");
Ok(handle.await??)
}
}
/// Anvil's EVM related arguments.
#[derive(Clone, Debug, Parser)]
#[command(next_help_heading = "EVM options")]
pub struct AnvilEvmArgs {
/// Fetch state over a remote endpoint instead of starting from an empty state.
///
/// If you want to fetch state from a specific block number, add a block number like `http://localhost:8545@1400000` or use the `--fork-block-number` argument.
#[arg(
long,
short,
visible_alias = "rpc-url",
value_name = "URL",
help_heading = "Fork config"
)]
pub fork_url: Option<ForkUrl>,
/// Headers to use for the rpc client, e.g. "User-Agent: test-agent"
///
/// See --fork-url.
#[arg(
long = "fork-header",
value_name = "HEADERS",
help_heading = "Fork config",
requires = "fork_url"
)]
pub fork_headers: Vec<String>,
/// Timeout in ms for requests sent to remote JSON-RPC server in forking mode.
///
/// Default value 45000
#[arg(id = "timeout", long = "timeout", help_heading = "Fork config", requires = "fork_url")]
pub fork_request_timeout: Option<u64>,
/// Number of retry requests for spurious networks (timed out requests)
///
/// Default value 5
#[arg(id = "retries", long = "retries", help_heading = "Fork config", requires = "fork_url")]
pub fork_request_retries: Option<u32>,
/// Fetch state from a specific block number over a remote endpoint.
///
/// If negative, the given value is subtracted from the `latest` block number.
///
/// See --fork-url.
#[arg(
long,
requires = "fork_url",
value_name = "BLOCK",
help_heading = "Fork config",
allow_hyphen_values = true
)]
pub fork_block_number: Option<i128>,
/// Fetch state from after a specific transaction hash has been applied over a remote endpoint.
///
/// See --fork-url.
#[arg(
long,
requires = "fork_url",
value_name = "TRANSACTION",
help_heading = "Fork config",
conflicts_with = "fork_block_number"
)]
pub fork_transaction_hash: Option<B256>,
/// Initial retry backoff on encountering errors.
///
/// See --fork-url.
#[arg(long, requires = "fork_url", value_name = "BACKOFF", help_heading = "Fork config")]
pub fork_retry_backoff: Option<u64>,
/// Specify chain id to skip fetching it from remote endpoint. This enables offline-start mode.
///
/// You still must pass both `--fork-url` and `--fork-block-number`, and already have your
/// required state cached on disk, anything missing locally would be fetched from the
/// remote.
#[arg(
long,
help_heading = "Fork config",
value_name = "CHAIN",
requires = "fork_block_number"
)]
pub fork_chain_id: Option<Chain>,
/// Sets the number of assumed available compute units per second for this provider
///
/// default value: 330
///
/// See also --fork-url and <https://docs.alchemy.com/reference/compute-units#what-are-cups-compute-units-per-second>
#[arg(
long,
requires = "fork_url",
alias = "cups",
value_name = "CUPS",
help_heading = "Fork config"
)]
pub compute_units_per_second: Option<u64>,
/// Disables rate limiting for this node's provider.
///
/// default value: false
///
/// See also --fork-url and <https://docs.alchemy.com/reference/compute-units#what-are-cups-compute-units-per-second>
#[arg(
long,
requires = "fork_url",
value_name = "NO_RATE_LIMITS",
help_heading = "Fork config",
visible_alias = "no-rpc-rate-limit"
)]
pub no_rate_limit: bool,
/// Explicitly disables the use of RPC caching.
///
/// All storage slots are read entirely from the endpoint.
///
/// This flag overrides the project's configuration file.
///
/// See --fork-url.
#[arg(long, requires = "fork_url", help_heading = "Fork config")]
pub no_storage_caching: bool,
/// The block gas limit.
#[arg(long, alias = "block-gas-limit", help_heading = "Environment config")]
pub gas_limit: Option<u64>,
/// Disable the `call.gas_limit <= block.gas_limit` constraint.
#[arg(
long,
value_name = "DISABLE_GAS_LIMIT",
help_heading = "Environment config",
alias = "disable-gas-limit",
conflicts_with = "gas_limit"
)]
pub disable_block_gas_limit: bool,
/// EIP-170: Contract code size limit in bytes. Useful to increase this because of tests. To
/// disable entirely, use `--disable-code-size-limit`. By default, it is 0x6000 (~25kb).
#[arg(long, value_name = "CODE_SIZE", help_heading = "Environment config")]
pub code_size_limit: Option<usize>,
/// Disable EIP-170: Contract code size limit.
#[arg(
long,
value_name = "DISABLE_CODE_SIZE_LIMIT",
conflicts_with = "code_size_limit",
help_heading = "Environment config"
)]
pub disable_code_size_limit: bool,
/// The gas price.
#[arg(long, help_heading = "Environment config")]
pub gas_price: Option<u128>,
/// The base fee in a block.
#[arg(
long,
visible_alias = "base-fee",
value_name = "FEE",
help_heading = "Environment config"
)]
pub block_base_fee_per_gas: Option<u64>,
/// Disable the enforcement of a minimum suggested priority fee.
#[arg(long, visible_alias = "no-priority-fee", help_heading = "Environment config")]
pub disable_min_priority_fee: bool,
/// The chain ID.
#[arg(long, alias = "chain", help_heading = "Environment config")]
pub chain_id: Option<Chain>,
/// Enable steps tracing used for debug calls returning geth-style traces
#[arg(long, visible_alias = "tracing")]
pub steps_tracing: bool,
/// Disable printing of `console.log` invocations to stdout.
#[arg(long, visible_alias = "no-console-log")]
pub disable_console_log: bool,
/// Enable printing of traces for executed transactions and `eth_call` to stdout.
#[arg(long, visible_alias = "enable-trace-printing")]
pub print_traces: bool,
/// Enables automatic impersonation on startup. This allows any transaction sender to be
/// simulated as different accounts, which is useful for testing contract behavior.
#[arg(long, visible_alias = "auto-unlock")]
pub auto_impersonate: bool,
/// Disable the default create2 deployer
#[arg(long, visible_alias = "no-create2")]
pub disable_default_create2_deployer: bool,
/// Disable pool balance checks
#[arg(long)]
pub disable_pool_balance_checks: bool,
/// The memory limit per EVM execution in bytes.
#[arg(long)]
pub memory_limit: Option<u64>,
#[command(flatten)]
pub networks: NetworkConfigs,
}
/// Resolves an alias passed as fork-url to the matching url defined in the rpc_endpoints section
/// of the project configuration file.
/// Does nothing if the fork-url is not a configured alias.
impl AnvilEvmArgs {
pub fn resolve_rpc_alias(&mut self) {
if let Some(fork_url) = &self.fork_url
&& let Ok(config) = Config::load_with_providers(FigmentProviders::Anvil)
&& let Some(Ok(url)) = config.get_rpc_url_with_alias(&fork_url.url)
{
self.fork_url = Some(ForkUrl { url: url.to_string(), block: fork_url.block });
}
}
}
/// Helper type to periodically dump the state of the chain to disk
struct PeriodicStateDumper {
in_progress_dump: Option<Pin<Box<dyn Future<Output = ()> + Send + Sync + 'static>>>,
api: EthApi,
dump_state: Option<PathBuf>,
preserve_historical_states: bool,
interval: Interval,
}
impl PeriodicStateDumper {
fn new(
api: EthApi,
dump_state: Option<PathBuf>,
interval: Duration,
preserve_historical_states: bool,
) -> Self {
let dump_state = dump_state.map(|mut dump_state| {
if dump_state.is_dir() {
dump_state = dump_state.join("state.json");
}
dump_state
});
// periodically flush the state
let interval = tokio::time::interval_at(Instant::now() + interval, interval);
Self { in_progress_dump: None, api, dump_state, preserve_historical_states, interval }
}
async fn dump(&self) {
if let Some(state) = self.dump_state.clone() {
Self::dump_state(self.api.clone(), state, self.preserve_historical_states).await
}
}
/// Infallible state dump
async fn dump_state(api: EthApi, dump_state: PathBuf, preserve_historical_states: bool) {
trace!(path=?dump_state, "Dumping state on shutdown");
match api.serialized_state(preserve_historical_states).await {
Ok(state) => {
if let Err(err) = foundry_common::fs::write_json_file(&dump_state, &state) {
error!(?err, "Failed to dump state");
} else {
trace!(path=?dump_state, "Dumped state on shutdown");
}
}
Err(err) => {
error!(?err, "Failed to extract state");
}
}
}
}
// An endless future that periodically dumps the state to disk if configured.
impl Future for PeriodicStateDumper {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.get_mut();
if this.dump_state.is_none() {
return Poll::Pending;
}
loop {
if let Some(mut flush) = this.in_progress_dump.take() {
match flush.poll_unpin(cx) {
Poll::Ready(_) => {
this.interval.reset();
}
Poll::Pending => {
this.in_progress_dump = Some(flush);
return Poll::Pending;
}
}
}
if this.interval.poll_tick(cx).is_ready() {
let api = this.api.clone();
let path = this.dump_state.clone().expect("exists; see above");
this.in_progress_dump =
Some(Box::pin(Self::dump_state(api, path, this.preserve_historical_states)));
} else {
break;
}
}
Poll::Pending
}
}
/// Represents the --state flag and where to load from, or dump the state to
#[derive(Clone, Debug)]
pub struct StateFile {
pub path: PathBuf,
pub state: Option<SerializableState>,
}
impl StateFile {
/// This is used as the clap `value_parser` implementation to parse from file but only if it
/// exists
fn parse(path: &str) -> Result<Self, String> {
Self::parse_path(path)
}
/// Parse from file but only if it exists
pub fn parse_path(path: impl AsRef<Path>) -> Result<Self, String> {
let mut path = path.as_ref().to_path_buf();
if path.is_dir() {
path = path.join("state.json");
}
let mut state = Self { path, state: None };
if !state.path.exists() {
return Ok(state);
}
state.state = Some(SerializableState::load(&state.path).map_err(|err| err.to_string())?);
Ok(state)
}
}
/// Represents the input URL for a fork with an optional trailing block number:
/// `http://localhost:8545@1000000`
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct ForkUrl {
/// The endpoint url
pub url: String,
/// Optional trailing block
pub block: Option<u64>,
}
impl fmt::Display for ForkUrl {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.url.fmt(f)?;
if let Some(block) = self.block {
write!(f, "@{block}")?;
}
Ok(())
}
}
impl FromStr for ForkUrl {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
if let Some((url, block)) = s.rsplit_once('@') {
if block == "latest" {
return Ok(Self { url: url.to_string(), block: None });
}
// this will prevent false positives for auths `user:password@example.com`
if !block.is_empty() && !block.contains(':') && !block.contains('.') {
let block: u64 = block
.parse()
.map_err(|_| format!("Failed to parse block number: `{block}`"))?;
return Ok(Self { url: url.to_string(), block: Some(block) });
}
}
Ok(Self { url: s.to_string(), block: None })
}
}
/// Clap's value parser for genesis. Loads a genesis.json file.
fn read_genesis_file(path: &str) -> Result<Genesis, String> {
foundry_common::fs::read_json_file(path.as_ref()).map_err(|err| err.to_string())
}
fn duration_from_secs_f64(s: &str) -> Result<Duration, String> {
let s = s.parse::<f64>().map_err(|e| e.to_string())?;
if s == 0.0 {
return Err("Duration must be greater than 0".to_string());
}
Duration::try_from_secs_f64(s).map_err(|e| e.to_string())
}
#[cfg(test)]
mod tests {
use super::*;
use std::{env, net::Ipv4Addr};
#[test]
fn test_parse_fork_url() {
let fork: ForkUrl = "http://localhost:8545@1000000".parse().unwrap();
assert_eq!(
fork,
ForkUrl { url: "http://localhost:8545".to_string(), block: Some(1000000) }
);
let fork: ForkUrl = "http://localhost:8545".parse().unwrap();
assert_eq!(fork, ForkUrl { url: "http://localhost:8545".to_string(), block: None });
let fork: ForkUrl = "wss://user:password@example.com/".parse().unwrap();
assert_eq!(
fork,
ForkUrl { url: "wss://user:password@example.com/".to_string(), block: None }
);
let fork: ForkUrl = "wss://user:password@example.com/@latest".parse().unwrap();
assert_eq!(
fork,
ForkUrl { url: "wss://user:password@example.com/".to_string(), block: None }
);
let fork: ForkUrl = "wss://user:password@example.com/@100000".parse().unwrap();
assert_eq!(
fork,
ForkUrl { url: "wss://user:password@example.com/".to_string(), block: Some(100000) }
);
}
#[test]
fn can_parse_ethereum_hardfork() {
let args: NodeArgs = NodeArgs::parse_from(["anvil", "--hardfork", "berlin"]);
let config = args.into_node_config().unwrap();
assert_eq!(config.hardfork, Some(EthereumHardfork::Berlin.into()));
}
#[test]
fn can_parse_optimism_hardfork() {
let args: NodeArgs =
NodeArgs::parse_from(["anvil", "--optimism", "--hardfork", "Regolith"]);
let config = args.into_node_config().unwrap();
assert_eq!(config.hardfork, Some(OpHardfork::Regolith.into()));
}
#[test]
fn cant_parse_invalid_hardfork() {
let args: NodeArgs = NodeArgs::parse_from(["anvil", "--hardfork", "Regolith"]);
let config = args.into_node_config();
assert!(config.is_err());
}
#[test]
fn can_parse_fork_headers() {
let args: NodeArgs = NodeArgs::parse_from([
"anvil",
"--fork-url",
"http,://localhost:8545",
"--fork-header",
"User-Agent: test-agent",
"--fork-header",
"Referrer: example.com",
]);
assert_eq!(args.evm.fork_headers, vec!["User-Agent: test-agent", "Referrer: example.com"]);
}
#[test]
fn can_parse_prune_config() {
let args: NodeArgs = NodeArgs::parse_from(["anvil", "--prune-history"]);
assert!(args.prune_history.is_some());
let args: NodeArgs = NodeArgs::parse_from(["anvil", "--prune-history", "100"]);
assert_eq!(args.prune_history, Some(Some(100)));
}
#[test]
fn can_parse_max_persisted_states_config() {
let args: NodeArgs = NodeArgs::parse_from(["anvil", "--max-persisted-states", "500"]);
assert_eq!(args.max_persisted_states, (Some(500)));
}
#[test]
fn can_parse_disable_block_gas_limit() {
let args: NodeArgs = NodeArgs::parse_from(["anvil", "--disable-block-gas-limit"]);
assert!(args.evm.disable_block_gas_limit);
let args =
NodeArgs::try_parse_from(["anvil", "--disable-block-gas-limit", "--gas-limit", "100"]);
assert!(args.is_err());
}
#[test]
fn can_parse_disable_code_size_limit() {
let args: NodeArgs = NodeArgs::parse_from(["anvil", "--disable-code-size-limit"]);
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | true |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/src/tasks/mod.rs | crates/anvil/src/tasks/mod.rs | //! Task management support
#![allow(rustdoc::private_doc_tests)]
use crate::{EthApi, shutdown::Shutdown, tasks::block_listener::BlockListener};
use alloy_network::{AnyHeader, AnyNetwork};
use alloy_primitives::B256;
use alloy_provider::Provider;
use alloy_rpc_types::anvil::Forking;
use futures::StreamExt;
use std::fmt;
use tokio::{runtime::Handle, task::JoinHandle};
pub mod block_listener;
/// A helper struct for managing additional tokio tasks.
#[derive(Clone)]
pub struct TaskManager {
/// Tokio runtime handle that's used to spawn futures, See [tokio::runtime::Handle].
tokio_handle: Handle,
/// A receiver for the shutdown signal
on_shutdown: Shutdown,
}
impl TaskManager {
/// Creates a new instance of the task manager
pub fn new(tokio_handle: Handle, on_shutdown: Shutdown) -> Self {
Self { tokio_handle, on_shutdown }
}
/// Returns a receiver for the shutdown event
pub fn on_shutdown(&self) -> Shutdown {
self.on_shutdown.clone()
}
/// Spawns the given task.
pub fn spawn(&self, task: impl Future<Output = ()> + Send + 'static) -> JoinHandle<()> {
self.tokio_handle.spawn(task)
}
/// Spawns the blocking task and returns a handle to it.
///
/// Returning the `JoinHandle` allows callers to cancel the task or await its completion.
pub fn spawn_blocking(
&self,
task: impl Future<Output = ()> + Send + 'static,
) -> JoinHandle<()> {
let handle = self.tokio_handle.clone();
self.tokio_handle.spawn_blocking(move || {
handle.block_on(task);
})
}
/// Spawns a new task that listens for new blocks and resets the forked provider for every new
/// block
///
/// ```
/// use alloy_network::Ethereum;
/// use alloy_provider::RootProvider;
/// use anvil::{NodeConfig, spawn};
///
/// # async fn t() {
/// let endpoint = "http://....";
/// let (api, handle) = spawn(NodeConfig::default().with_eth_rpc_url(Some(endpoint))).await;
///
/// let provider = RootProvider::connect(endpoint).await.unwrap();
///
/// handle.task_manager().spawn_reset_on_new_polled_blocks(provider, api);
/// # }
/// ```
pub fn spawn_reset_on_new_polled_blocks<P>(&self, provider: P, api: EthApi)
where
P: Provider<AnyNetwork> + Clone + Unpin + 'static,
{
self.spawn_block_poll_listener(provider.clone(), move |hash| {
let provider = provider.clone();
let api = api.clone();
async move {
if let Ok(Some(block)) = provider.get_block(hash.into()).await {
let _ = api
.anvil_reset(Some(Forking {
json_rpc_url: None,
block_number: Some(block.header.number),
}))
.await;
}
}
})
}
/// Spawns a new [`BlockListener`] task that listens for new blocks (poll-based) See also
/// [`Provider::watch_blocks`] and executes the future the `task_factory` returns for the new
/// block hash
pub fn spawn_block_poll_listener<P, F, Fut>(&self, provider: P, task_factory: F)
where
P: Provider<AnyNetwork> + 'static,
F: Fn(B256) -> Fut + Unpin + Send + Sync + 'static,
Fut: Future<Output = ()> + Send,
{
let shutdown = self.on_shutdown.clone();
self.spawn(async move {
let blocks = provider
.watch_blocks()
.await
.unwrap()
.into_stream()
.flat_map(futures::stream::iter);
BlockListener::new(shutdown, blocks, task_factory).await;
});
}
/// Spawns a new task that listens for new blocks and resets the forked provider for every new
/// block
///
/// ```
/// use alloy_network::Ethereum;
/// use alloy_provider::RootProvider;
/// use anvil::{NodeConfig, spawn};
///
/// # async fn t() {
/// let (api, handle) = spawn(NodeConfig::default().with_eth_rpc_url(Some("http://...."))).await;
///
/// let provider = RootProvider::connect("ws://...").await.unwrap();
///
/// handle.task_manager().spawn_reset_on_subscribed_blocks(provider, api);
///
/// # }
/// ```
pub fn spawn_reset_on_subscribed_blocks<P>(&self, provider: P, api: EthApi)
where
P: Provider<AnyNetwork> + 'static,
{
self.spawn_block_subscription(provider, move |header| {
let api = api.clone();
async move {
let _ = api
.anvil_reset(Some(Forking {
json_rpc_url: None,
block_number: Some(header.number),
}))
.await;
}
})
}
/// Spawns a new [`BlockListener`] task that listens for new blocks (via subscription) See also
/// [`Provider::subscribe_blocks()`] and executes the future the `task_factory` returns for the
/// new block hash
pub fn spawn_block_subscription<P, F, Fut>(&self, provider: P, task_factory: F)
where
P: Provider<AnyNetwork> + 'static,
F: Fn(alloy_rpc_types::Header<AnyHeader>) -> Fut + Unpin + Send + Sync + 'static,
Fut: Future<Output = ()> + Send,
{
let shutdown = self.on_shutdown.clone();
self.spawn(async move {
let blocks = provider.subscribe_blocks().await.unwrap().into_stream();
BlockListener::new(shutdown, blocks, task_factory).await;
});
}
}
impl fmt::Debug for TaskManager {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("TaskManager").finish_non_exhaustive()
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/src/tasks/block_listener.rs | crates/anvil/src/tasks/block_listener.rs | //! A task that listens for new blocks
use crate::shutdown::Shutdown;
use futures::{FutureExt, Stream, StreamExt};
use std::{
pin::Pin,
task::{Context, Poll},
};
/// A Future that will execute a given `task` for each new block that arrives on the stream.
pub struct BlockListener<St, F, Fut> {
stream: St,
task_factory: F,
task: Option<Pin<Box<Fut>>>,
on_shutdown: Shutdown,
}
impl<St, F, Fut> BlockListener<St, F, Fut>
where
St: Stream,
F: Fn(<St as Stream>::Item) -> Fut,
{
pub fn new(on_shutdown: Shutdown, block_stream: St, task_factory: F) -> Self {
Self { stream: block_stream, task_factory, task: None, on_shutdown }
}
}
impl<St, F, Fut> Future for BlockListener<St, F, Fut>
where
St: Stream + Unpin,
F: Fn(<St as Stream>::Item) -> Fut + Unpin + Send + Sync + 'static,
Fut: Future<Output = ()> + Send,
{
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let pin = self.get_mut();
if pin.on_shutdown.poll_unpin(cx).is_ready() {
return Poll::Ready(());
}
let mut block = None;
// drain the stream
while let Poll::Ready(maybe_block) = pin.stream.poll_next_unpin(cx) {
if maybe_block.is_none() {
// stream complete
return Poll::Ready(());
}
block = maybe_block;
}
if let Some(block) = block {
pin.task = Some(Box::pin((pin.task_factory)(block)));
}
if let Some(mut task) = pin.task.take()
&& task.poll_unpin(cx).is_pending()
{
pin.task = Some(task);
}
Poll::Pending
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/src/eth/miner.rs | crates/anvil/src/eth/miner.rs | //! Mines transactions
use crate::eth::pool::{Pool, transactions::PoolTransaction};
use alloy_primitives::TxHash;
use futures::{
channel::mpsc::Receiver,
stream::{Fuse, StreamExt},
task::AtomicWaker,
};
use parking_lot::{RawRwLock, RwLock, lock_api::RwLockWriteGuard};
use std::{
fmt,
sync::Arc,
task::{Context, Poll, ready},
time::Duration,
};
use tokio::time::{Interval, MissedTickBehavior};
#[derive(Clone, Debug)]
pub struct Miner {
/// The mode this miner currently operates in
mode: Arc<RwLock<MiningMode>>,
/// used for task wake up when the mining mode was forcefully changed
///
/// This will register the task so we can manually wake it up if the mining mode was changed
inner: Arc<MinerInner>,
/// Transactions included into the pool before any others are.
/// Done once on startup.
force_transactions: Option<Vec<Arc<PoolTransaction>>>,
}
impl Miner {
/// Returns a new miner with that operates in the given `mode`.
pub fn new(mode: MiningMode) -> Self {
Self {
mode: Arc::new(RwLock::new(mode)),
inner: Default::default(),
force_transactions: None,
}
}
/// Provide transactions that will cause a block to be mined with transactions
/// as soon as the miner is polled.
/// Providing an empty list of transactions will cause the miner to mine an empty block assuming
/// there are not other transactions in the pool.
pub fn with_forced_transactions(
mut self,
force_transactions: Option<Vec<PoolTransaction>>,
) -> Self {
self.force_transactions =
force_transactions.map(|tx| tx.into_iter().map(Arc::new).collect());
self
}
/// Returns the write lock of the mining mode
pub fn mode_write(&self) -> RwLockWriteGuard<'_, RawRwLock, MiningMode> {
self.mode.write()
}
/// Returns `true` if auto mining is enabled
pub fn is_auto_mine(&self) -> bool {
let mode = self.mode.read();
matches!(*mode, MiningMode::Auto(_))
}
pub fn get_interval(&self) -> Option<u64> {
let mode = self.mode.read();
if let MiningMode::FixedBlockTime(ref mm) = *mode {
return Some(mm.interval.period().as_secs());
}
None
}
/// Sets the mining mode to operate in
pub fn set_mining_mode(&self, mode: MiningMode) {
let new_mode = format!("{mode:?}");
let mode = std::mem::replace(&mut *self.mode_write(), mode);
trace!(target: "miner", "updated mining mode from {:?} to {}", mode, new_mode);
self.inner.wake();
}
/// polls the [Pool] and returns those transactions that should be put in a block according to
/// the current mode.
///
/// May return an empty list, if no transactions are ready.
pub fn poll(
&mut self,
pool: &Arc<Pool>,
cx: &mut Context<'_>,
) -> Poll<Vec<Arc<PoolTransaction>>> {
self.inner.register(cx);
let next = ready!(self.mode.write().poll(pool, cx));
if let Some(mut transactions) = self.force_transactions.take() {
transactions.extend(next);
Poll::Ready(transactions)
} else {
Poll::Ready(next)
}
}
}
/// A Mining mode that does nothing
#[derive(Debug)]
pub struct MinerInner {
waker: AtomicWaker,
}
impl MinerInner {
/// Call the waker again
fn wake(&self) {
self.waker.wake();
}
fn register(&self, cx: &Context<'_>) {
self.waker.register(cx.waker());
}
}
impl Default for MinerInner {
fn default() -> Self {
Self { waker: AtomicWaker::new() }
}
}
/// Mode of operations for the `Miner`
#[derive(Debug)]
pub enum MiningMode {
/// A miner that does nothing
None,
/// A miner that listens for new transactions that are ready.
///
/// Either one transaction will be mined per block, or any number of transactions will be
/// allowed
Auto(ReadyTransactionMiner),
/// A miner that constructs a new block every `interval` tick
FixedBlockTime(FixedBlockTimeMiner),
/// A miner that uses both Auto and FixedBlockTime
Mixed(ReadyTransactionMiner, FixedBlockTimeMiner),
}
impl MiningMode {
pub fn instant(max_transactions: usize, listener: Receiver<TxHash>) -> Self {
Self::Auto(ReadyTransactionMiner {
max_transactions,
has_pending_txs: None,
rx: listener.fuse(),
})
}
pub fn interval(duration: Duration) -> Self {
Self::FixedBlockTime(FixedBlockTimeMiner::new(duration))
}
pub fn mixed(max_transactions: usize, listener: Receiver<TxHash>, duration: Duration) -> Self {
Self::Mixed(
ReadyTransactionMiner { max_transactions, has_pending_txs: None, rx: listener.fuse() },
FixedBlockTimeMiner::new(duration),
)
}
/// polls the [Pool] and returns those transactions that should be put in a block, if any.
pub fn poll(
&mut self,
pool: &Arc<Pool>,
cx: &mut Context<'_>,
) -> Poll<Vec<Arc<PoolTransaction>>> {
match self {
Self::None => Poll::Pending,
Self::Auto(miner) => miner.poll(pool, cx),
Self::FixedBlockTime(miner) => miner.poll(pool, cx),
Self::Mixed(auto, fixed) => {
let auto_txs = auto.poll(pool, cx);
let fixed_txs = fixed.poll(pool, cx);
match (auto_txs, fixed_txs) {
// Both auto and fixed transactions are ready, combine them
(Poll::Ready(mut auto_txs), Poll::Ready(fixed_txs)) => {
for tx in fixed_txs {
// filter unique transactions
if auto_txs.iter().any(|auto_tx| auto_tx.hash() == tx.hash()) {
continue;
}
auto_txs.push(tx);
}
Poll::Ready(auto_txs)
}
// Only auto transactions are ready, return them
(Poll::Ready(auto_txs), Poll::Pending) => Poll::Ready(auto_txs),
// Only fixed transactions are ready or both are pending,
// return fixed transactions or pending status
(Poll::Pending, fixed_txs) => fixed_txs,
}
}
}
}
}
/// A miner that's supposed to create a new block every `interval`, mining all transactions that are
/// ready at that time.
///
/// The default blocktime is set to 6 seconds
#[derive(Debug)]
pub struct FixedBlockTimeMiner {
/// The interval this fixed block time miner operates with
interval: Interval,
}
impl FixedBlockTimeMiner {
/// Creates a new instance with an interval of `duration`
pub fn new(duration: Duration) -> Self {
let start = tokio::time::Instant::now() + duration;
let mut interval = tokio::time::interval_at(start, duration);
// we use delay here, to ensure ticks are not shortened and to tick at multiples of interval
// from when tick was called rather than from start
interval.set_missed_tick_behavior(MissedTickBehavior::Delay);
Self { interval }
}
fn poll(&mut self, pool: &Arc<Pool>, cx: &mut Context<'_>) -> Poll<Vec<Arc<PoolTransaction>>> {
if self.interval.poll_tick(cx).is_ready() {
// drain the pool
return Poll::Ready(pool.ready_transactions().collect());
}
Poll::Pending
}
}
impl Default for FixedBlockTimeMiner {
fn default() -> Self {
Self::new(Duration::from_secs(6))
}
}
/// A miner that Listens for new ready transactions
pub struct ReadyTransactionMiner {
/// how many transactions to mine per block
max_transactions: usize,
/// stores whether there are pending transactions (if known)
has_pending_txs: Option<bool>,
/// Receives hashes of transactions that are ready
rx: Fuse<Receiver<TxHash>>,
}
impl ReadyTransactionMiner {
fn poll(&mut self, pool: &Arc<Pool>, cx: &mut Context<'_>) -> Poll<Vec<Arc<PoolTransaction>>> {
// always drain the notification stream so that we're woken up as soon as there's a new tx
while let Poll::Ready(Some(_hash)) = self.rx.poll_next_unpin(cx) {
self.has_pending_txs = Some(true);
}
if self.has_pending_txs == Some(false) {
return Poll::Pending;
}
let transactions =
pool.ready_transactions().take(self.max_transactions).collect::<Vec<_>>();
// there are pending transactions if we didn't drain the pool
self.has_pending_txs = Some(transactions.len() >= self.max_transactions);
if transactions.is_empty() {
return Poll::Pending;
}
Poll::Ready(transactions)
}
}
impl fmt::Debug for ReadyTransactionMiner {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("ReadyTransactionMiner")
.field("max_transactions", &self.max_transactions)
.finish_non_exhaustive()
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/src/eth/sign.rs | crates/anvil/src/eth/sign.rs | use crate::eth::error::BlockchainError;
use alloy_consensus::{Sealed, SignableTransaction};
use alloy_dyn_abi::TypedData;
use alloy_network::TxSignerSync;
use alloy_primitives::{Address, B256, Signature, map::AddressHashMap};
use alloy_signer::Signer as AlloySigner;
use alloy_signer_local::PrivateKeySigner;
use foundry_primitives::{FoundryTxEnvelope, FoundryTypedTx};
use tempo_primitives::TempoSignature;
/// A transaction signer
#[async_trait::async_trait]
pub trait Signer: Send + Sync {
/// returns the available accounts for this signer
fn accounts(&self) -> Vec<Address>;
/// Returns `true` whether this signer can sign for this address
fn is_signer_for(&self, addr: Address) -> bool {
self.accounts().contains(&addr)
}
/// Returns the signature
async fn sign(&self, address: Address, message: &[u8]) -> Result<Signature, BlockchainError>;
/// Encodes and signs the typed data according EIP-712. Payload must conform to the EIP-712
/// standard.
async fn sign_typed_data(
&self,
address: Address,
payload: &TypedData,
) -> Result<Signature, BlockchainError>;
/// Signs the given hash.
async fn sign_hash(&self, address: Address, hash: B256) -> Result<Signature, BlockchainError>;
/// signs a transaction request using the given account in request
fn sign_transaction(
&self,
request: FoundryTypedTx,
address: &Address,
) -> Result<Signature, BlockchainError>;
}
/// Maintains developer keys
pub struct DevSigner {
addresses: Vec<Address>,
accounts: AddressHashMap<PrivateKeySigner>,
}
impl DevSigner {
pub fn new(accounts: Vec<PrivateKeySigner>) -> Self {
let addresses = accounts.iter().map(|wallet| wallet.address()).collect::<Vec<_>>();
let accounts = addresses.iter().copied().zip(accounts).collect();
Self { addresses, accounts }
}
}
#[async_trait::async_trait]
impl Signer for DevSigner {
fn accounts(&self) -> Vec<Address> {
self.addresses.clone()
}
fn is_signer_for(&self, addr: Address) -> bool {
self.accounts.contains_key(&addr)
}
async fn sign(&self, address: Address, message: &[u8]) -> Result<Signature, BlockchainError> {
let signer = self.accounts.get(&address).ok_or(BlockchainError::NoSignerAvailable)?;
Ok(signer.sign_message(message).await?)
}
async fn sign_typed_data(
&self,
address: Address,
payload: &TypedData,
) -> Result<Signature, BlockchainError> {
let mut signer =
self.accounts.get(&address).ok_or(BlockchainError::NoSignerAvailable)?.to_owned();
// Explicitly set chainID as none, to avoid any EIP-155 application to `v` when signing
// typed data.
signer.set_chain_id(None);
Ok(signer.sign_dynamic_typed_data(payload).await?)
}
async fn sign_hash(&self, address: Address, hash: B256) -> Result<Signature, BlockchainError> {
let signer = self.accounts.get(&address).ok_or(BlockchainError::NoSignerAvailable)?;
Ok(signer.sign_hash(&hash).await?)
}
fn sign_transaction(
&self,
request: FoundryTypedTx,
address: &Address,
) -> Result<Signature, BlockchainError> {
let signer = self.accounts.get(address).ok_or(BlockchainError::NoSignerAvailable)?;
match request {
FoundryTypedTx::Legacy(mut tx) => Ok(signer.sign_transaction_sync(&mut tx)?),
FoundryTypedTx::Eip2930(mut tx) => Ok(signer.sign_transaction_sync(&mut tx)?),
FoundryTypedTx::Eip1559(mut tx) => Ok(signer.sign_transaction_sync(&mut tx)?),
FoundryTypedTx::Eip7702(mut tx) => Ok(signer.sign_transaction_sync(&mut tx)?),
FoundryTypedTx::Eip4844(mut tx) => Ok(signer.sign_transaction_sync(&mut tx)?),
FoundryTypedTx::Deposit(_) => {
unreachable!("op deposit txs should not be signed")
}
FoundryTypedTx::Tempo(mut tx) => Ok(signer.sign_transaction_sync(&mut tx)?),
}
}
}
/// converts the `request` into a [`FoundryTypedTx`] with the given signature
///
/// # Errors
///
/// This will fail if the `signature` contains an erroneous recovery id.
pub fn build_typed_transaction(
request: FoundryTypedTx,
signature: Signature,
) -> Result<FoundryTxEnvelope, BlockchainError> {
let tx = match request {
FoundryTypedTx::Legacy(tx) => FoundryTxEnvelope::Legacy(tx.into_signed(signature)),
FoundryTypedTx::Eip2930(tx) => FoundryTxEnvelope::Eip2930(tx.into_signed(signature)),
FoundryTypedTx::Eip1559(tx) => FoundryTxEnvelope::Eip1559(tx.into_signed(signature)),
FoundryTypedTx::Eip7702(tx) => FoundryTxEnvelope::Eip7702(tx.into_signed(signature)),
FoundryTypedTx::Eip4844(tx) => FoundryTxEnvelope::Eip4844(tx.into_signed(signature)),
FoundryTypedTx::Deposit(tx) => FoundryTxEnvelope::Deposit(Sealed::new(tx)),
FoundryTypedTx::Tempo(tx) => {
let tempo_sig: TempoSignature = signature.into();
FoundryTxEnvelope::Tempo(tx.into_signed(tempo_sig))
}
};
Ok(tx)
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/src/eth/error.rs | crates/anvil/src/eth/error.rs | //! Aggregated error type for this module
use crate::eth::pool::transactions::PoolTransaction;
use alloy_consensus::crypto::RecoveryError;
use alloy_evm::overrides::StateOverrideError;
use alloy_primitives::{B256, Bytes, SignatureError};
use alloy_rpc_types::BlockNumberOrTag;
use alloy_signer::Error as SignerError;
use alloy_transport::TransportError;
use anvil_core::eth::wallet::WalletError;
use anvil_rpc::{
error::{ErrorCode, RpcError},
response::ResponseResult,
};
use foundry_evm::{backend::DatabaseError, decode::RevertDecoder};
use op_revm::OpTransactionError;
use revm::{
context_interface::result::{EVMError, InvalidHeader, InvalidTransaction},
interpreter::InstructionResult,
};
use serde::Serialize;
use tokio::time::Duration;
pub(crate) type Result<T> = std::result::Result<T, BlockchainError>;
#[derive(Debug, thiserror::Error)]
pub enum BlockchainError {
#[error(transparent)]
Pool(#[from] PoolError),
#[error("No signer available")]
NoSignerAvailable,
#[error("Chain Id not available")]
ChainIdNotAvailable,
#[error("Invalid input: `max_priority_fee_per_gas` greater than `max_fee_per_gas`")]
InvalidFeeInput,
#[error("Transaction data is empty")]
EmptyRawTransactionData,
#[error("Failed to decode signed transaction")]
FailedToDecodeSignedTransaction,
#[error("Failed to decode transaction")]
FailedToDecodeTransaction,
#[error("Failed to decode receipt")]
FailedToDecodeReceipt,
#[error("Failed to decode state")]
FailedToDecodeStateDump,
#[error("Prevrandao not in the EVM's environment after merge")]
PrevrandaoNotSet,
#[error(transparent)]
SignatureError(#[from] SignatureError),
#[error(transparent)]
RecoveryError(#[from] RecoveryError),
#[error(transparent)]
SignerError(#[from] SignerError),
#[error("Rpc Endpoint not implemented")]
RpcUnimplemented,
#[error("Rpc error {0:?}")]
RpcError(RpcError),
#[error(transparent)]
InvalidTransaction(#[from] InvalidTransactionError),
#[error(transparent)]
FeeHistory(#[from] FeeHistoryError),
#[error(transparent)]
AlloyForkProvider(#[from] TransportError),
#[error("EVM error {0:?}")]
EvmError(InstructionResult),
#[error("Evm override error: {0}")]
EvmOverrideError(String),
#[error("Invalid url {0:?}")]
InvalidUrl(String),
#[error("Internal error: {0:?}")]
Internal(String),
#[error("BlockOutOfRangeError: block height is {0} but requested was {1}")]
BlockOutOfRange(u64, u64),
#[error("Resource not found")]
BlockNotFound,
/// Thrown when a requested transaction is not found
#[error("transaction not found")]
TransactionNotFound,
#[error("Required data unavailable")]
DataUnavailable,
#[error("Trie error: {0}")]
TrieError(String),
#[error("{0}")]
UintConversion(&'static str),
#[error("State override error: {0}")]
StateOverrideError(String),
#[error("Timestamp error: {0}")]
TimestampError(String),
#[error(transparent)]
DatabaseError(#[from] DatabaseError),
#[error(
"EIP-1559 style fee params (maxFeePerGas or maxPriorityFeePerGas) received but they are not supported by the current hardfork.\n\nYou can use them by running anvil with '--hardfork london' or later."
)]
EIP1559TransactionUnsupportedAtHardfork,
#[error(
"Access list received but is not supported by the current hardfork.\n\nYou can use it by running anvil with '--hardfork berlin' or later."
)]
EIP2930TransactionUnsupportedAtHardfork,
#[error(
"EIP-4844 fields received but is not supported by the current hardfork.\n\nYou can use it by running anvil with '--hardfork cancun' or later."
)]
EIP4844TransactionUnsupportedAtHardfork,
#[error(
"EIP-7702 fields received but is not supported by the current hardfork.\n\nYou can use it by running anvil with '--hardfork prague' or later."
)]
EIP7702TransactionUnsupportedAtHardfork,
#[error(
"op-stack deposit tx received but is not supported.\n\nYou can use it by running anvil with '--optimism'."
)]
DepositTransactionUnsupported,
#[error("Unknown transaction type not supported")]
UnknownTransactionType,
#[error("Excess blob gas not set.")]
ExcessBlobGasNotSet,
#[error("{0}")]
Message(String),
#[error("Transaction {hash} was added to the mempool but wasn't confirmed within {duration:?}")]
TransactionConfirmationTimeout {
/// Hash of the transaction that timed out
hash: B256,
/// Duration that was waited before timing out
duration: Duration,
},
#[error("Invalid transaction request: {0}")]
InvalidTransactionRequest(String),
}
impl From<eyre::Report> for BlockchainError {
fn from(err: eyre::Report) -> Self {
Self::Message(err.to_string())
}
}
impl From<RpcError> for BlockchainError {
fn from(err: RpcError) -> Self {
Self::RpcError(err)
}
}
impl<T> From<EVMError<T>> for BlockchainError
where
T: Into<Self>,
{
fn from(err: EVMError<T>) -> Self {
match err {
EVMError::Transaction(err) => InvalidTransactionError::from(err).into(),
EVMError::Header(err) => match err {
InvalidHeader::ExcessBlobGasNotSet => Self::ExcessBlobGasNotSet,
InvalidHeader::PrevrandaoNotSet => Self::PrevrandaoNotSet,
},
EVMError::Database(err) => err.into(),
EVMError::Custom(err) => Self::Message(err),
}
}
}
impl<T> From<EVMError<T, OpTransactionError>> for BlockchainError
where
T: Into<Self>,
{
fn from(err: EVMError<T, OpTransactionError>) -> Self {
match err {
EVMError::Transaction(err) => match err {
OpTransactionError::Base(err) => InvalidTransactionError::from(err).into(),
OpTransactionError::DepositSystemTxPostRegolith => {
Self::DepositTransactionUnsupported
}
OpTransactionError::HaltedDepositPostRegolith => {
Self::DepositTransactionUnsupported
}
OpTransactionError::MissingEnvelopedTx => Self::InvalidTransaction(err.into()),
},
EVMError::Header(err) => match err {
InvalidHeader::ExcessBlobGasNotSet => Self::ExcessBlobGasNotSet,
InvalidHeader::PrevrandaoNotSet => Self::PrevrandaoNotSet,
},
EVMError::Database(err) => err.into(),
EVMError::Custom(err) => Self::Message(err),
}
}
}
impl From<WalletError> for BlockchainError {
fn from(value: WalletError) -> Self {
Self::Message(value.to_string())
}
}
impl<E> From<StateOverrideError<E>> for BlockchainError
where
E: Into<Self>,
{
fn from(value: StateOverrideError<E>) -> Self {
match value {
StateOverrideError::InvalidBytecode(err) => Self::StateOverrideError(err.to_string()),
StateOverrideError::BothStateAndStateDiff(addr) => Self::StateOverrideError(format!(
"state and state_diff can't be used together for account {addr}",
)),
StateOverrideError::Database(err) => err.into(),
}
}
}
/// Errors that can occur in the transaction pool
#[derive(Debug, thiserror::Error)]
pub enum PoolError {
#[error("Transaction with cyclic dependent transactions")]
CyclicTransaction,
/// Thrown if a replacement transaction's gas price is below the already imported transaction
#[error("Tx: [{0:?}] insufficient gas price to replace existing transaction")]
ReplacementUnderpriced(Box<PoolTransaction>),
#[error("Tx: [{0:?}] already Imported")]
AlreadyImported(Box<PoolTransaction>),
}
/// Errors that can occur with `eth_feeHistory`
#[derive(Debug, thiserror::Error)]
pub enum FeeHistoryError {
#[error("requested block range is out of bounds")]
InvalidBlockRange,
#[error("could not find newest block number requested: {0}")]
BlockNotFound(BlockNumberOrTag),
}
#[derive(Debug)]
pub struct ErrDetail {
pub detail: String,
}
/// An error due to invalid transaction
#[derive(Debug, thiserror::Error)]
pub enum InvalidTransactionError {
/// returned if the nonce of a transaction is lower than the one present in the local chain.
#[error("nonce too low")]
NonceTooLow,
/// returned if the nonce of a transaction is higher than the next one expected based on the
/// local chain.
#[error("Nonce too high")]
NonceTooHigh,
/// Returned if the nonce of a transaction is too high
/// Incrementing the nonce would lead to invalid state (overflow)
#[error("nonce has max value")]
NonceMaxValue,
/// thrown if the transaction sender doesn't have enough funds for a transfer
#[error("insufficient funds for transfer")]
InsufficientFundsForTransfer,
/// thrown if creation transaction provides the init code bigger than init code size limit.
#[error("max initcode size exceeded")]
MaxInitCodeSizeExceeded,
/// Represents the inability to cover max cost + value (account balance too low).
#[error("Insufficient funds for gas * price + value")]
InsufficientFunds,
/// Thrown when calculating gas usage
#[error("gas uint64 overflow")]
GasUintOverflow,
/// returned if the transaction is specified to use less gas than required to start the
/// invocation.
#[error("intrinsic gas too low")]
GasTooLow,
/// returned if the transaction gas exceeds the limit
#[error("intrinsic gas too high -- {}",.0.detail)]
GasTooHigh(ErrDetail),
/// Thrown to ensure no one is able to specify a transaction with a tip higher than the total
/// fee cap.
#[error("max priority fee per gas higher than max fee per gas")]
TipAboveFeeCap,
/// Thrown post London if the transaction's fee is less than the base fee of the block
#[error("max fee per gas less than block base fee")]
FeeCapTooLow,
/// Thrown during estimate if caller has insufficient funds to cover the tx.
#[error("Out of gas: gas required exceeds allowance: {0:?}")]
BasicOutOfGas(u128),
/// Thrown if executing a transaction failed during estimate/call
#[error("execution reverted: {0:?}")]
Revert(Option<Bytes>),
/// Thrown if the sender of a transaction is a contract.
#[error("sender not an eoa")]
SenderNoEOA,
/// Thrown when a tx was signed with a different chain_id
#[error("invalid chain id for signer")]
InvalidChainId,
/// Thrown when a legacy tx was signed for a different chain
#[error("Incompatible EIP-155 transaction, signed for another chain")]
IncompatibleEIP155,
/// Thrown when an access list is used before the berlin hard fork.
#[error("Access lists are not supported before the Berlin hardfork")]
AccessListNotSupported,
/// Thrown when the block's `blob_gas_price` is greater than tx-specified
/// `max_fee_per_blob_gas` after Cancun.
#[error("Block `blob_gas_price` is greater than tx-specified `max_fee_per_blob_gas`")]
BlobFeeCapTooLow(u128, u128),
/// Thrown when we receive a tx with `blob_versioned_hashes` and we're not on the Cancun hard
/// fork.
#[error("Block `blob_versioned_hashes` is not supported before the Cancun hardfork")]
BlobVersionedHashesNotSupported,
/// Thrown when `max_fee_per_blob_gas` is not supported for blocks before the Cancun hardfork.
#[error("`max_fee_per_blob_gas` is not supported for blocks before the Cancun hardfork.")]
MaxFeePerBlobGasNotSupported,
/// Thrown when there are no `blob_hashes` in the transaction, and it is an EIP-4844 tx.
#[error("`blob_hashes` are required for EIP-4844 transactions")]
NoBlobHashes,
#[error("too many blobs in one transaction, have: {0}, max: {1}")]
TooManyBlobs(usize, usize),
/// Thrown when there's a blob validation error
#[error(transparent)]
BlobTransactionValidationError(#[from] alloy_consensus::BlobTransactionValidationError),
/// Thrown when Blob transaction is a create transaction. `to` must be present.
#[error("Blob transaction can't be a create transaction. `to` must be present.")]
BlobCreateTransaction,
/// Thrown when Blob transaction contains a versioned hash with an incorrect version.
#[error("Blob transaction contains a versioned hash with an incorrect version")]
BlobVersionNotSupported,
/// Thrown when there are no `blob_hashes` in the transaction.
#[error("There should be at least one blob in a Blob transaction.")]
EmptyBlobs,
/// Thrown when an access list is used before the berlin hard fork.
#[error("EIP-7702 authorization lists are not supported before the Prague hardfork")]
AuthorizationListNotSupported,
#[error("Transaction gas limit is greater than the block gas limit, gas_limit: {0}, cap: {1}")]
TxGasLimitGreaterThanCap(u64, u64),
/// Forwards error from the revm
#[error(transparent)]
Revm(revm::context_interface::result::InvalidTransaction),
/// Deposit transaction error post regolith
#[error("op-deposit failure post regolith")]
DepositTxErrorPostRegolith,
/// Missing enveloped transaction
#[error("missing enveloped transaction")]
MissingEnvelopedTx,
}
impl From<InvalidTransaction> for InvalidTransactionError {
fn from(err: InvalidTransaction) -> Self {
match err {
InvalidTransaction::InvalidChainId => Self::InvalidChainId,
InvalidTransaction::PriorityFeeGreaterThanMaxFee => Self::TipAboveFeeCap,
InvalidTransaction::GasPriceLessThanBasefee => Self::FeeCapTooLow,
InvalidTransaction::CallerGasLimitMoreThanBlock => {
Self::GasTooHigh(ErrDetail { detail: String::from("CallerGasLimitMoreThanBlock") })
}
InvalidTransaction::CallGasCostMoreThanGasLimit { .. } => {
Self::GasTooHigh(ErrDetail { detail: String::from("CallGasCostMoreThanGasLimit") })
}
InvalidTransaction::GasFloorMoreThanGasLimit { .. } => {
Self::GasTooHigh(ErrDetail { detail: String::from("GasFloorMoreThanGasLimit") })
}
InvalidTransaction::RejectCallerWithCode => Self::SenderNoEOA,
InvalidTransaction::LackOfFundForMaxFee { .. } => Self::InsufficientFunds,
InvalidTransaction::OverflowPaymentInTransaction => Self::GasUintOverflow,
InvalidTransaction::NonceOverflowInTransaction => Self::NonceMaxValue,
InvalidTransaction::CreateInitCodeSizeLimit => Self::MaxInitCodeSizeExceeded,
InvalidTransaction::NonceTooHigh { .. } => Self::NonceTooHigh,
InvalidTransaction::NonceTooLow { .. } => Self::NonceTooLow,
InvalidTransaction::AccessListNotSupported => Self::AccessListNotSupported,
InvalidTransaction::BlobGasPriceGreaterThanMax {
block_blob_gas_price,
tx_max_fee_per_blob_gas,
} => Self::BlobFeeCapTooLow(block_blob_gas_price, tx_max_fee_per_blob_gas),
InvalidTransaction::BlobVersionedHashesNotSupported => {
Self::BlobVersionedHashesNotSupported
}
InvalidTransaction::MaxFeePerBlobGasNotSupported => Self::MaxFeePerBlobGasNotSupported,
InvalidTransaction::BlobCreateTransaction => Self::BlobCreateTransaction,
InvalidTransaction::BlobVersionNotSupported => Self::BlobVersionNotSupported,
InvalidTransaction::EmptyBlobs => Self::EmptyBlobs,
InvalidTransaction::TooManyBlobs { have, max } => Self::TooManyBlobs(have, max),
InvalidTransaction::AuthorizationListNotSupported => {
Self::AuthorizationListNotSupported
}
InvalidTransaction::TxGasLimitGreaterThanCap { gas_limit, cap } => {
Self::TxGasLimitGreaterThanCap(gas_limit, cap)
}
InvalidTransaction::AuthorizationListInvalidFields
| InvalidTransaction::Eip1559NotSupported
| InvalidTransaction::Eip2930NotSupported
| InvalidTransaction::Eip4844NotSupported
| InvalidTransaction::Eip7702NotSupported
| InvalidTransaction::EmptyAuthorizationList
| InvalidTransaction::Eip7873NotSupported
| InvalidTransaction::Eip7873MissingTarget
| InvalidTransaction::MissingChainId
| InvalidTransaction::Str(_) => Self::Revm(err),
}
}
}
impl From<OpTransactionError> for InvalidTransactionError {
fn from(value: OpTransactionError) -> Self {
match value {
OpTransactionError::Base(err) => err.into(),
OpTransactionError::DepositSystemTxPostRegolith
| OpTransactionError::HaltedDepositPostRegolith => Self::DepositTxErrorPostRegolith,
OpTransactionError::MissingEnvelopedTx => Self::MissingEnvelopedTx,
}
}
}
/// Helper trait to easily convert results to rpc results
pub(crate) trait ToRpcResponseResult {
fn to_rpc_result(self) -> ResponseResult;
}
/// Converts a serializable value into a `ResponseResult`
pub fn to_rpc_result<T: Serialize>(val: T) -> ResponseResult {
match serde_json::to_value(val) {
Ok(success) => ResponseResult::Success(success),
Err(err) => {
error!(%err, "Failed serialize rpc response");
ResponseResult::error(RpcError::internal_error())
}
}
}
impl<T: Serialize> ToRpcResponseResult for Result<T> {
fn to_rpc_result(self) -> ResponseResult {
match self {
Ok(val) => to_rpc_result(val),
Err(err) => match err {
BlockchainError::Pool(err) => {
error!(%err, "txpool error");
match err {
PoolError::CyclicTransaction => {
RpcError::transaction_rejected("Cyclic transaction detected")
}
PoolError::ReplacementUnderpriced(_) => {
RpcError::transaction_rejected("replacement transaction underpriced")
}
PoolError::AlreadyImported(_) => {
RpcError::transaction_rejected("transaction already imported")
}
}
}
BlockchainError::NoSignerAvailable => {
RpcError::invalid_params("No Signer available")
}
BlockchainError::ChainIdNotAvailable => {
RpcError::invalid_params("Chain Id not available")
}
BlockchainError::TransactionConfirmationTimeout { .. } => {
RpcError::internal_error_with("Transaction confirmation timeout")
}
BlockchainError::InvalidTransaction(err) => match err {
InvalidTransactionError::Revert(data) => {
// this mimics geth revert error
let mut msg = "execution reverted".to_string();
if let Some(reason) = data
.as_ref()
.and_then(|data| RevertDecoder::new().maybe_decode(data, None))
{
msg = format!("{msg}: {reason}");
}
RpcError {
// geth returns this error code on reverts, See <https://eips.ethereum.org/EIPS/eip-1474#specification>
code: ErrorCode::ExecutionError,
message: msg.into(),
data: serde_json::to_value(data).ok(),
}
}
InvalidTransactionError::GasTooLow => {
// <https://eips.ethereum.org/EIPS/eip-1898>
RpcError {
code: ErrorCode::ServerError(-32000),
message: err.to_string().into(),
data: None,
}
}
InvalidTransactionError::GasTooHigh(_) => {
// <https://eips.ethereum.org/EIPS/eip-1898>
RpcError {
code: ErrorCode::ServerError(-32000),
message: err.to_string().into(),
data: None,
}
}
_ => RpcError::transaction_rejected(err.to_string()),
},
BlockchainError::FeeHistory(err) => RpcError::invalid_params(err.to_string()),
BlockchainError::EmptyRawTransactionData => {
RpcError::invalid_params("Empty transaction data")
}
BlockchainError::FailedToDecodeSignedTransaction => {
RpcError::invalid_params("Failed to decode transaction")
}
BlockchainError::FailedToDecodeTransaction => {
RpcError::invalid_params("Failed to decode transaction")
}
BlockchainError::FailedToDecodeReceipt => {
RpcError::invalid_params("Failed to decode receipt")
}
BlockchainError::FailedToDecodeStateDump => {
RpcError::invalid_params("Failed to decode state dump")
}
BlockchainError::SignerError(err) => RpcError::invalid_params(err.to_string()),
BlockchainError::SignatureError(err) => RpcError::invalid_params(err.to_string()),
BlockchainError::RpcUnimplemented => {
RpcError::internal_error_with("Not implemented")
}
BlockchainError::PrevrandaoNotSet => RpcError::internal_error_with(err.to_string()),
BlockchainError::RpcError(err) => err,
BlockchainError::InvalidFeeInput => RpcError::invalid_params(
"Invalid input: `max_priority_fee_per_gas` greater than `max_fee_per_gas`",
),
BlockchainError::AlloyForkProvider(err) => {
error!(target: "backend", %err, "fork provider error");
match err {
TransportError::ErrorResp(err) => RpcError {
code: ErrorCode::from(err.code),
message: err.message,
data: err.data.and_then(|data| serde_json::to_value(data).ok()),
},
err => RpcError::internal_error_with(format!("Fork Error: {err:?}")),
}
}
err @ BlockchainError::EvmError(_) => {
RpcError::internal_error_with(err.to_string())
}
err @ BlockchainError::EvmOverrideError(_) => {
RpcError::invalid_params(err.to_string())
}
err @ BlockchainError::InvalidUrl(_) => RpcError::invalid_params(err.to_string()),
BlockchainError::Internal(err) => RpcError::internal_error_with(err),
err @ BlockchainError::BlockOutOfRange(_, _) => {
RpcError::invalid_params(err.to_string())
}
err @ BlockchainError::BlockNotFound => RpcError {
// <https://eips.ethereum.org/EIPS/eip-1898>
code: ErrorCode::ServerError(-32001),
message: err.to_string().into(),
data: None,
},
err @ BlockchainError::TransactionNotFound => RpcError {
code: ErrorCode::ServerError(-32001),
message: err.to_string().into(),
data: None,
},
err @ BlockchainError::DataUnavailable => {
RpcError::internal_error_with(err.to_string())
}
err @ BlockchainError::TrieError(_) => {
RpcError::internal_error_with(err.to_string())
}
BlockchainError::UintConversion(err) => RpcError::invalid_params(err),
err @ BlockchainError::StateOverrideError(_) => {
RpcError::invalid_params(err.to_string())
}
err @ BlockchainError::TimestampError(_) => {
RpcError::invalid_params(err.to_string())
}
BlockchainError::DatabaseError(err) => {
RpcError::internal_error_with(err.to_string())
}
err @ BlockchainError::EIP1559TransactionUnsupportedAtHardfork => {
RpcError::invalid_params(err.to_string())
}
err @ BlockchainError::EIP2930TransactionUnsupportedAtHardfork => {
RpcError::invalid_params(err.to_string())
}
err @ BlockchainError::EIP4844TransactionUnsupportedAtHardfork => {
RpcError::invalid_params(err.to_string())
}
err @ BlockchainError::EIP7702TransactionUnsupportedAtHardfork => {
RpcError::invalid_params(err.to_string())
}
err @ BlockchainError::DepositTransactionUnsupported => {
RpcError::invalid_params(err.to_string())
}
err @ BlockchainError::ExcessBlobGasNotSet => {
RpcError::invalid_params(err.to_string())
}
err @ BlockchainError::Message(_) => RpcError::internal_error_with(err.to_string()),
err @ BlockchainError::UnknownTransactionType => {
RpcError::invalid_params(err.to_string())
}
err @ BlockchainError::InvalidTransactionRequest(_) => {
RpcError::invalid_params(err.to_string())
}
err @ BlockchainError::RecoveryError(_) => {
RpcError::invalid_params(err.to_string())
}
}
.into(),
}
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/src/eth/api.rs | crates/anvil/src/eth/api.rs | use super::{
backend::mem::{BlockRequest, DatabaseRef, State},
sign::build_typed_transaction,
};
use crate::{
ClientFork, LoggingManager, Miner, MiningMode, StorageInfo,
eth::{
backend::{
self,
db::SerializableState,
mem::{MIN_CREATE_GAS, MIN_TRANSACTION_GAS},
notifications::NewBlockNotifications,
validate::TransactionValidator,
},
error::{
BlockchainError, FeeHistoryError, InvalidTransactionError, Result, ToRpcResponseResult,
},
fees::{FeeDetails, FeeHistoryCache, MIN_SUGGESTED_PRIORITY_FEE},
macros::node_info,
miner::FixedBlockTimeMiner,
pool::{
Pool,
transactions::{
PoolTransaction, TransactionOrder, TransactionPriority, TxMarker, to_marker,
},
},
sign::{self, Signer},
},
filter::{EthFilter, Filters, LogsFilter},
mem::transaction_build,
};
use alloy_consensus::{Blob, Transaction, TrieAccount, TxEip4844Variant, transaction::Recovered};
use alloy_dyn_abi::TypedData;
use alloy_eips::{
eip2718::Encodable2718,
eip4844::BlobTransactionSidecar,
eip7910::{EthConfig, EthForkConfig},
};
use alloy_evm::overrides::{OverrideBlockHashes, apply_state_overrides};
use alloy_network::{
AnyRpcBlock, AnyRpcTransaction, BlockResponse, ReceiptResponse, TransactionBuilder,
TransactionBuilder4844, TransactionResponse, eip2718::Decodable2718,
};
use alloy_primitives::{
Address, B64, B256, Bytes, Signature, TxHash, TxKind, U64, U256,
map::{HashMap, HashSet},
};
use alloy_rpc_types::{
AccessList, AccessListResult, BlockId, BlockNumberOrTag as BlockNumber, BlockTransactions,
EIP1186AccountProofResponse, FeeHistory, Filter, FilteredParams, Index, Log, Work,
anvil::{
ForkedNetwork, Forking, Metadata, MineOptions, NodeEnvironment, NodeForkConfig, NodeInfo,
},
request::TransactionRequest,
simulate::{SimulatePayload, SimulatedBlock},
state::{AccountOverride, EvmOverrides, StateOverridesBuilder},
trace::{
filter::TraceFilter,
geth::{GethDebugTracingCallOptions, GethDebugTracingOptions, GethTrace},
parity::LocalizedTransactionTrace,
},
txpool::{TxpoolContent, TxpoolInspect, TxpoolInspectSummary, TxpoolStatus},
};
use alloy_rpc_types_eth::FillTransaction;
use alloy_serde::WithOtherFields;
use alloy_sol_types::{SolCall, SolValue, sol};
use alloy_transport::TransportErrorKind;
use anvil_core::{
eth::{
EthRequest,
block::BlockInfo,
transaction::{MaybeImpersonatedTransaction, PendingTransaction},
wallet::WalletCapabilities,
},
types::{ReorgOptions, TransactionData},
};
use anvil_rpc::{error::RpcError, response::ResponseResult};
use foundry_common::provider::ProviderBuilder;
use foundry_evm::decode::RevertDecoder;
use foundry_primitives::{
FoundryTransactionRequest, FoundryTxEnvelope, FoundryTxReceipt, FoundryTxType, FoundryTypedTx,
};
use futures::{
StreamExt, TryFutureExt,
channel::{mpsc::Receiver, oneshot},
};
use parking_lot::RwLock;
use revm::{
context::BlockEnv,
context_interface::{block::BlobExcessGasAndPrice, result::Output},
database::CacheDB,
interpreter::{InstructionResult, return_ok, return_revert},
primitives::eip7702::PER_EMPTY_ACCOUNT_COST,
};
use std::{sync::Arc, time::Duration};
use tokio::{
sync::mpsc::{UnboundedReceiver, unbounded_channel},
try_join,
};
/// The client version: `anvil/v{major}.{minor}.{patch}`
pub const CLIENT_VERSION: &str = concat!("anvil/v", env!("CARGO_PKG_VERSION"));
/// The entry point for executing eth api RPC call - The Eth RPC interface.
///
/// This type is cheap to clone and can be used concurrently
#[derive(Clone)]
pub struct EthApi {
/// The transaction pool
pool: Arc<Pool>,
/// Holds all blockchain related data
/// In-Memory only for now
pub backend: Arc<backend::mem::Backend>,
/// Whether this node is mining
is_mining: bool,
/// available signers
signers: Arc<Vec<Box<dyn Signer>>>,
/// data required for `eth_feeHistory`
fee_history_cache: FeeHistoryCache,
/// max number of items kept in fee cache
fee_history_limit: u64,
/// access to the actual miner
///
/// This access is required in order to adjust miner settings based on requests received from
/// custom RPC endpoints
miner: Miner,
/// allows to enabled/disable logging
logger: LoggingManager,
/// Tracks all active filters
filters: Filters,
/// How transactions are ordered in the pool
transaction_order: Arc<RwLock<TransactionOrder>>,
/// Whether we're listening for RPC calls
net_listening: bool,
/// The instance ID. Changes on every reset.
instance_id: Arc<RwLock<B256>>,
}
impl EthApi {
/// Creates a new instance
#[expect(clippy::too_many_arguments)]
pub fn new(
pool: Arc<Pool>,
backend: Arc<backend::mem::Backend>,
signers: Arc<Vec<Box<dyn Signer>>>,
fee_history_cache: FeeHistoryCache,
fee_history_limit: u64,
miner: Miner,
logger: LoggingManager,
filters: Filters,
transactions_order: TransactionOrder,
) -> Self {
Self {
pool,
backend,
is_mining: true,
signers,
fee_history_cache,
fee_history_limit,
miner,
logger,
filters,
net_listening: true,
transaction_order: Arc::new(RwLock::new(transactions_order)),
instance_id: Arc::new(RwLock::new(B256::random())),
}
}
/// Executes the [EthRequest] and returns an RPC [ResponseResult].
pub async fn execute(&self, request: EthRequest) -> ResponseResult {
trace!(target: "rpc::api", "executing eth request");
let response = match request.clone() {
EthRequest::EthProtocolVersion(()) => self.protocol_version().to_rpc_result(),
EthRequest::Web3ClientVersion(()) => self.client_version().to_rpc_result(),
EthRequest::Web3Sha3(content) => self.sha3(content).to_rpc_result(),
EthRequest::EthGetAccount(addr, block) => {
self.get_account(addr, block).await.to_rpc_result()
}
EthRequest::EthGetAccountInfo(addr, block) => {
self.get_account_info(addr, block).await.to_rpc_result()
}
EthRequest::EthGetBalance(addr, block) => {
self.balance(addr, block).await.to_rpc_result()
}
EthRequest::EthGetTransactionByHash(hash) => {
self.transaction_by_hash(hash).await.to_rpc_result()
}
EthRequest::EthSendTransaction(request) => {
self.send_transaction(*request).await.to_rpc_result()
}
EthRequest::EthSendTransactionSync(request) => {
self.send_transaction_sync(*request).await.to_rpc_result()
}
EthRequest::EthChainId(_) => self.eth_chain_id().to_rpc_result(),
EthRequest::EthNetworkId(_) => self.network_id().to_rpc_result(),
EthRequest::NetListening(_) => self.net_listening().to_rpc_result(),
EthRequest::EthHashrate(()) => self.hashrate().to_rpc_result(),
EthRequest::EthGasPrice(_) => self.eth_gas_price().to_rpc_result(),
EthRequest::EthMaxPriorityFeePerGas(_) => {
self.gas_max_priority_fee_per_gas().to_rpc_result()
}
EthRequest::EthBlobBaseFee(_) => self.blob_base_fee().to_rpc_result(),
EthRequest::EthAccounts(_) => self.accounts().to_rpc_result(),
EthRequest::EthBlockNumber(_) => self.block_number().to_rpc_result(),
EthRequest::EthCoinbase(()) => self.author().to_rpc_result(),
EthRequest::EthGetStorageAt(addr, slot, block) => {
self.storage_at(addr, slot, block).await.to_rpc_result()
}
EthRequest::EthGetBlockByHash(hash, full) => {
if full {
self.block_by_hash_full(hash).await.to_rpc_result()
} else {
self.block_by_hash(hash).await.to_rpc_result()
}
}
EthRequest::EthGetBlockByNumber(num, full) => {
if full {
self.block_by_number_full(num).await.to_rpc_result()
} else {
self.block_by_number(num).await.to_rpc_result()
}
}
EthRequest::EthGetTransactionCount(addr, block) => {
self.transaction_count(addr, block).await.to_rpc_result()
}
EthRequest::EthGetTransactionCountByHash(hash) => {
self.block_transaction_count_by_hash(hash).await.to_rpc_result()
}
EthRequest::EthGetTransactionCountByNumber(num) => {
self.block_transaction_count_by_number(num).await.to_rpc_result()
}
EthRequest::EthGetUnclesCountByHash(hash) => {
self.block_uncles_count_by_hash(hash).await.to_rpc_result()
}
EthRequest::EthGetUnclesCountByNumber(num) => {
self.block_uncles_count_by_number(num).await.to_rpc_result()
}
EthRequest::EthGetCodeAt(addr, block) => {
self.get_code(addr, block).await.to_rpc_result()
}
EthRequest::EthGetProof(addr, keys, block) => {
self.get_proof(addr, keys, block).await.to_rpc_result()
}
EthRequest::EthSign(addr, content) => self.sign(addr, content).await.to_rpc_result(),
EthRequest::PersonalSign(content, addr) => {
self.sign(addr, content).await.to_rpc_result()
}
EthRequest::EthSignTransaction(request) => {
self.sign_transaction(*request).await.to_rpc_result()
}
EthRequest::EthSignTypedData(addr, data) => {
self.sign_typed_data(addr, data).await.to_rpc_result()
}
EthRequest::EthSignTypedDataV3(addr, data) => {
self.sign_typed_data_v3(addr, data).await.to_rpc_result()
}
EthRequest::EthSignTypedDataV4(addr, data) => {
self.sign_typed_data_v4(addr, &data).await.to_rpc_result()
}
EthRequest::EthSendRawTransaction(tx) => {
self.send_raw_transaction(tx).await.to_rpc_result()
}
EthRequest::EthSendRawTransactionSync(tx) => {
self.send_raw_transaction_sync(tx).await.to_rpc_result()
}
EthRequest::EthCall(call, block, state_override, block_overrides) => self
.call(call, block, EvmOverrides::new(state_override, block_overrides))
.await
.to_rpc_result(),
EthRequest::EthSimulateV1(simulation, block) => {
self.simulate_v1(simulation, block).await.to_rpc_result()
}
EthRequest::EthCreateAccessList(call, block) => {
self.create_access_list(call, block).await.to_rpc_result()
}
EthRequest::EthEstimateGas(call, block, state_override, block_overrides) => self
.estimate_gas(call, block, EvmOverrides::new(state_override, block_overrides))
.await
.to_rpc_result(),
EthRequest::EthFillTransaction(request) => {
self.fill_transaction(request).await.to_rpc_result()
}
EthRequest::EthGetRawTransactionByHash(hash) => {
self.raw_transaction(hash).await.to_rpc_result()
}
EthRequest::GetBlobByHash(hash) => {
self.anvil_get_blob_by_versioned_hash(hash).to_rpc_result()
}
EthRequest::GetBlobByTransactionHash(hash) => {
self.anvil_get_blob_by_tx_hash(hash).to_rpc_result()
}
EthRequest::GetBlobSidecarsByBlockId(block_id) => {
self.anvil_get_blob_sidecars_by_block_id(block_id).to_rpc_result()
}
EthRequest::GetGenesisTime(()) => self.anvil_get_genesis_time().to_rpc_result(),
EthRequest::EthGetRawTransactionByBlockHashAndIndex(hash, index) => {
self.raw_transaction_by_block_hash_and_index(hash, index).await.to_rpc_result()
}
EthRequest::EthGetRawTransactionByBlockNumberAndIndex(num, index) => {
self.raw_transaction_by_block_number_and_index(num, index).await.to_rpc_result()
}
EthRequest::EthGetTransactionByBlockHashAndIndex(hash, index) => {
self.transaction_by_block_hash_and_index(hash, index).await.to_rpc_result()
}
EthRequest::EthGetTransactionByBlockNumberAndIndex(num, index) => {
self.transaction_by_block_number_and_index(num, index).await.to_rpc_result()
}
EthRequest::EthGetTransactionReceipt(tx) => {
self.transaction_receipt(tx).await.to_rpc_result()
}
EthRequest::EthGetBlockReceipts(number) => {
self.block_receipts(number).await.to_rpc_result()
}
EthRequest::EthGetUncleByBlockHashAndIndex(hash, index) => {
self.uncle_by_block_hash_and_index(hash, index).await.to_rpc_result()
}
EthRequest::EthGetUncleByBlockNumberAndIndex(num, index) => {
self.uncle_by_block_number_and_index(num, index).await.to_rpc_result()
}
EthRequest::EthGetLogs(filter) => self.logs(filter).await.to_rpc_result(),
EthRequest::EthGetWork(_) => self.work().to_rpc_result(),
EthRequest::EthSyncing(_) => self.syncing().to_rpc_result(),
EthRequest::EthConfig(_) => self.config().to_rpc_result(),
EthRequest::EthSubmitWork(nonce, pow, digest) => {
self.submit_work(nonce, pow, digest).to_rpc_result()
}
EthRequest::EthSubmitHashRate(rate, id) => {
self.submit_hashrate(rate, id).to_rpc_result()
}
EthRequest::EthFeeHistory(count, newest, reward_percentiles) => {
self.fee_history(count, newest, reward_percentiles).await.to_rpc_result()
}
// non eth-standard rpc calls
EthRequest::DebugGetRawTransaction(hash) => {
self.raw_transaction(hash).await.to_rpc_result()
}
// non eth-standard rpc calls
EthRequest::DebugTraceTransaction(tx, opts) => {
self.debug_trace_transaction(tx, opts).await.to_rpc_result()
}
// non eth-standard rpc calls
EthRequest::DebugTraceCall(tx, block, opts) => {
self.debug_trace_call(tx, block, opts).await.to_rpc_result()
}
EthRequest::DebugCodeByHash(hash, block) => {
self.debug_code_by_hash(hash, block).await.to_rpc_result()
}
EthRequest::DebugDbGet(key) => self.debug_db_get(key).await.to_rpc_result(),
EthRequest::TraceTransaction(tx) => self.trace_transaction(tx).await.to_rpc_result(),
EthRequest::TraceBlock(block) => self.trace_block(block).await.to_rpc_result(),
EthRequest::TraceFilter(filter) => self.trace_filter(filter).await.to_rpc_result(),
EthRequest::ImpersonateAccount(addr) => {
self.anvil_impersonate_account(addr).await.to_rpc_result()
}
EthRequest::StopImpersonatingAccount(addr) => {
self.anvil_stop_impersonating_account(addr).await.to_rpc_result()
}
EthRequest::AutoImpersonateAccount(enable) => {
self.anvil_auto_impersonate_account(enable).await.to_rpc_result()
}
EthRequest::ImpersonateSignature(signature, address) => {
self.anvil_impersonate_signature(signature, address).await.to_rpc_result()
}
EthRequest::GetAutoMine(()) => self.anvil_get_auto_mine().to_rpc_result(),
EthRequest::Mine(blocks, interval) => {
self.anvil_mine(blocks, interval).await.to_rpc_result()
}
EthRequest::SetAutomine(enabled) => {
self.anvil_set_auto_mine(enabled).await.to_rpc_result()
}
EthRequest::SetIntervalMining(interval) => {
self.anvil_set_interval_mining(interval).to_rpc_result()
}
EthRequest::GetIntervalMining(()) => self.anvil_get_interval_mining().to_rpc_result(),
EthRequest::DropTransaction(tx) => {
self.anvil_drop_transaction(tx).await.to_rpc_result()
}
EthRequest::DropAllTransactions() => {
self.anvil_drop_all_transactions().await.to_rpc_result()
}
EthRequest::Reset(fork) => {
self.anvil_reset(fork.and_then(|p| p.params)).await.to_rpc_result()
}
EthRequest::SetBalance(addr, val) => {
self.anvil_set_balance(addr, val).await.to_rpc_result()
}
EthRequest::AddBalance(addr, val) => {
self.anvil_add_balance(addr, val).await.to_rpc_result()
}
EthRequest::DealERC20(addr, token_addr, val) => {
self.anvil_deal_erc20(addr, token_addr, val).await.to_rpc_result()
}
EthRequest::SetERC20Allowance(owner, spender, token_addr, val) => self
.anvil_set_erc20_allowance(owner, spender, token_addr, val)
.await
.to_rpc_result(),
EthRequest::SetCode(addr, code) => {
self.anvil_set_code(addr, code).await.to_rpc_result()
}
EthRequest::SetNonce(addr, nonce) => {
self.anvil_set_nonce(addr, nonce).await.to_rpc_result()
}
EthRequest::SetStorageAt(addr, slot, val) => {
self.anvil_set_storage_at(addr, slot, val).await.to_rpc_result()
}
EthRequest::SetCoinbase(addr) => self.anvil_set_coinbase(addr).await.to_rpc_result(),
EthRequest::SetChainId(id) => self.anvil_set_chain_id(id).await.to_rpc_result(),
EthRequest::SetLogging(log) => self.anvil_set_logging(log).await.to_rpc_result(),
EthRequest::SetMinGasPrice(gas) => {
self.anvil_set_min_gas_price(gas).await.to_rpc_result()
}
EthRequest::SetNextBlockBaseFeePerGas(gas) => {
self.anvil_set_next_block_base_fee_per_gas(gas).await.to_rpc_result()
}
EthRequest::DumpState(preserve_historical_states) => self
.anvil_dump_state(preserve_historical_states.and_then(|s| s.params))
.await
.to_rpc_result(),
EthRequest::LoadState(buf) => self.anvil_load_state(buf).await.to_rpc_result(),
EthRequest::NodeInfo(_) => self.anvil_node_info().await.to_rpc_result(),
EthRequest::AnvilMetadata(_) => self.anvil_metadata().await.to_rpc_result(),
EthRequest::EvmSnapshot(_) => self.evm_snapshot().await.to_rpc_result(),
EthRequest::EvmRevert(id) => self.evm_revert(id).await.to_rpc_result(),
EthRequest::EvmIncreaseTime(time) => self.evm_increase_time(time).await.to_rpc_result(),
EthRequest::EvmSetNextBlockTimeStamp(time) => {
if time >= U256::from(u64::MAX) {
return ResponseResult::Error(RpcError::invalid_params(
"The timestamp is too big",
));
}
let time = time.to::<u64>();
self.evm_set_next_block_timestamp(time).to_rpc_result()
}
EthRequest::EvmSetTime(timestamp) => {
if timestamp >= U256::from(u64::MAX) {
return ResponseResult::Error(RpcError::invalid_params(
"The timestamp is too big",
));
}
let time = timestamp.to::<u64>();
self.evm_set_time(time).to_rpc_result()
}
EthRequest::EvmSetBlockGasLimit(gas_limit) => {
self.evm_set_block_gas_limit(gas_limit).to_rpc_result()
}
EthRequest::EvmSetBlockTimeStampInterval(time) => {
self.evm_set_block_timestamp_interval(time).to_rpc_result()
}
EthRequest::EvmRemoveBlockTimeStampInterval(()) => {
self.evm_remove_block_timestamp_interval().to_rpc_result()
}
EthRequest::EvmMine(mine) => {
self.evm_mine(mine.and_then(|p| p.params)).await.to_rpc_result()
}
EthRequest::EvmMineDetailed(mine) => {
self.evm_mine_detailed(mine.and_then(|p| p.params)).await.to_rpc_result()
}
EthRequest::SetRpcUrl(url) => self.anvil_set_rpc_url(url).to_rpc_result(),
EthRequest::EthSendUnsignedTransaction(tx) => {
self.eth_send_unsigned_transaction(*tx).await.to_rpc_result()
}
EthRequest::EnableTraces(_) => self.anvil_enable_traces().await.to_rpc_result(),
EthRequest::EthNewFilter(filter) => self.new_filter(filter).await.to_rpc_result(),
EthRequest::EthGetFilterChanges(id) => self.get_filter_changes(&id).await,
EthRequest::EthNewBlockFilter(_) => self.new_block_filter().await.to_rpc_result(),
EthRequest::EthNewPendingTransactionFilter(_) => {
self.new_pending_transaction_filter().await.to_rpc_result()
}
EthRequest::EthGetFilterLogs(id) => self.get_filter_logs(&id).await.to_rpc_result(),
EthRequest::EthUninstallFilter(id) => self.uninstall_filter(&id).await.to_rpc_result(),
EthRequest::TxPoolStatus(_) => self.txpool_status().await.to_rpc_result(),
EthRequest::TxPoolInspect(_) => self.txpool_inspect().await.to_rpc_result(),
EthRequest::TxPoolContent(_) => self.txpool_content().await.to_rpc_result(),
EthRequest::ErigonGetHeaderByNumber(num) => {
self.erigon_get_header_by_number(num).await.to_rpc_result()
}
EthRequest::OtsGetApiLevel(_) => self.ots_get_api_level().await.to_rpc_result(),
EthRequest::OtsGetInternalOperations(hash) => {
self.ots_get_internal_operations(hash).await.to_rpc_result()
}
EthRequest::OtsHasCode(addr, num) => self.ots_has_code(addr, num).await.to_rpc_result(),
EthRequest::OtsTraceTransaction(hash) => {
self.ots_trace_transaction(hash).await.to_rpc_result()
}
EthRequest::OtsGetTransactionError(hash) => {
self.ots_get_transaction_error(hash).await.to_rpc_result()
}
EthRequest::OtsGetBlockDetails(num) => {
self.ots_get_block_details(num).await.to_rpc_result()
}
EthRequest::OtsGetBlockDetailsByHash(hash) => {
self.ots_get_block_details_by_hash(hash).await.to_rpc_result()
}
EthRequest::OtsGetBlockTransactions(num, page, page_size) => {
self.ots_get_block_transactions(num, page, page_size).await.to_rpc_result()
}
EthRequest::OtsSearchTransactionsBefore(address, num, page_size) => {
self.ots_search_transactions_before(address, num, page_size).await.to_rpc_result()
}
EthRequest::OtsSearchTransactionsAfter(address, num, page_size) => {
self.ots_search_transactions_after(address, num, page_size).await.to_rpc_result()
}
EthRequest::OtsGetTransactionBySenderAndNonce(address, nonce) => {
self.ots_get_transaction_by_sender_and_nonce(address, nonce).await.to_rpc_result()
}
EthRequest::EthGetTransactionBySenderAndNonce(sender, nonce) => {
self.transaction_by_sender_and_nonce(sender, nonce).await.to_rpc_result()
}
EthRequest::OtsGetContractCreator(address) => {
self.ots_get_contract_creator(address).await.to_rpc_result()
}
EthRequest::RemovePoolTransactions(address) => {
self.anvil_remove_pool_transactions(address).await.to_rpc_result()
}
EthRequest::Reorg(reorg_options) => {
self.anvil_reorg(reorg_options).await.to_rpc_result()
}
EthRequest::Rollback(depth) => self.anvil_rollback(depth).await.to_rpc_result(),
EthRequest::WalletGetCapabilities(()) => self.get_capabilities().to_rpc_result(),
EthRequest::AnvilAddCapability(addr) => self.anvil_add_capability(addr).to_rpc_result(),
EthRequest::AnvilSetExecutor(executor_pk) => {
self.anvil_set_executor(executor_pk).to_rpc_result()
}
};
if let ResponseResult::Error(err) = &response {
node_info!("\nRPC request failed:");
node_info!(" Request: {:?}", request);
node_info!(" Error: {}\n", err);
}
response
}
fn sign_request(&self, from: &Address, request: FoundryTypedTx) -> Result<FoundryTxEnvelope> {
match request {
FoundryTypedTx::Deposit(_) => {
let nil_signature = Signature::from_scalars_and_parity(
B256::with_last_byte(1),
B256::with_last_byte(1),
false,
);
return build_typed_transaction(request, nil_signature);
}
_ => {
for signer in self.signers.iter() {
if signer.accounts().contains(from) {
let signature = signer.sign_transaction(request.clone(), from)?;
return build_typed_transaction(request, signature);
}
}
}
}
Err(BlockchainError::NoSignerAvailable)
}
async fn block_request(&self, block_number: Option<BlockId>) -> Result<BlockRequest> {
let block_request = match block_number {
Some(BlockId::Number(BlockNumber::Pending)) => {
let pending_txs = self.pool.ready_transactions().collect();
BlockRequest::Pending(pending_txs)
}
_ => {
let number = self.backend.ensure_block_number(block_number).await?;
BlockRequest::Number(number)
}
};
Ok(block_request)
}
async fn inner_raw_transaction(&self, hash: B256) -> Result<Option<Bytes>> {
match self.pool.get_transaction(hash) {
Some(tx) => Ok(Some(tx.transaction.encoded_2718().into())),
None => match self.backend.transaction_by_hash(hash).await? {
Some(tx) => Ok(Some(tx.inner.inner.encoded_2718().into())),
None => Ok(None),
},
}
}
/// Returns the current client version.
///
/// Handler for ETH RPC call: `web3_clientVersion`
pub fn client_version(&self) -> Result<String> {
node_info!("web3_clientVersion");
Ok(CLIENT_VERSION.to_string())
}
/// Returns Keccak-256 (not the standardized SHA3-256) of the given data.
///
/// Handler for ETH RPC call: `web3_sha3`
pub fn sha3(&self, bytes: Bytes) -> Result<String> {
node_info!("web3_sha3");
let hash = alloy_primitives::keccak256(bytes.as_ref());
Ok(alloy_primitives::hex::encode_prefixed(&hash[..]))
}
/// Returns protocol version encoded as a string (quotes are necessary).
///
/// Handler for ETH RPC call: `eth_protocolVersion`
pub fn protocol_version(&self) -> Result<u64> {
node_info!("eth_protocolVersion");
Ok(1)
}
/// Returns the number of hashes per second that the node is mining with.
///
/// Handler for ETH RPC call: `eth_hashrate`
pub fn hashrate(&self) -> Result<U256> {
node_info!("eth_hashrate");
Ok(U256::ZERO)
}
/// Returns the client coinbase address.
///
/// Handler for ETH RPC call: `eth_coinbase`
pub fn author(&self) -> Result<Address> {
node_info!("eth_coinbase");
Ok(self.backend.coinbase())
}
/// Returns true if client is actively mining new blocks.
///
/// Handler for ETH RPC call: `eth_mining`
pub fn is_mining(&self) -> Result<bool> {
node_info!("eth_mining");
Ok(self.is_mining)
}
/// Returns the chain ID used for transaction signing at the
/// current best block. None is returned if not
/// available.
///
/// Handler for ETH RPC call: `eth_chainId`
pub fn eth_chain_id(&self) -> Result<Option<U64>> {
node_info!("eth_chainId");
Ok(Some(self.backend.chain_id().to::<U64>()))
}
/// Returns the same as `chain_id`
///
/// Handler for ETH RPC call: `eth_networkId`
pub fn network_id(&self) -> Result<Option<String>> {
node_info!("eth_networkId");
let chain_id = self.backend.chain_id().to::<u64>();
Ok(Some(format!("{chain_id}")))
}
/// Returns true if client is actively listening for network connections.
///
/// Handler for ETH RPC call: `net_listening`
pub fn net_listening(&self) -> Result<bool> {
node_info!("net_listening");
Ok(self.net_listening)
}
/// Returns the current gas price
fn eth_gas_price(&self) -> Result<U256> {
node_info!("eth_gasPrice");
Ok(U256::from(self.gas_price()))
}
/// Returns the current gas price
pub fn gas_price(&self) -> u128 {
if self.backend.is_eip1559() {
if self.backend.is_min_priority_fee_enforced() {
(self.backend.base_fee() as u128).saturating_add(self.lowest_suggestion_tip())
} else {
self.backend.base_fee() as u128
}
} else {
self.backend.fees().raw_gas_price()
}
}
/// Returns the excess blob gas and current blob gas price
pub fn excess_blob_gas_and_price(&self) -> Result<Option<BlobExcessGasAndPrice>> {
Ok(self.backend.excess_blob_gas_and_price())
}
/// Returns a fee per gas that is an estimate of how much you can pay as a priority fee, or
/// 'tip', to get a transaction included in the current block.
///
/// Handler for ETH RPC call: `eth_maxPriorityFeePerGas`
pub fn gas_max_priority_fee_per_gas(&self) -> Result<U256> {
self.max_priority_fee_per_gas()
}
/// Returns the base fee per blob required to send a EIP-4844 tx.
///
/// Handler for ETH RPC call: `eth_blobBaseFee`
pub fn blob_base_fee(&self) -> Result<U256> {
Ok(U256::from(self.backend.fees().base_fee_per_blob_gas()))
}
/// Returns the block gas limit
pub fn gas_limit(&self) -> U256 {
U256::from(self.backend.gas_limit())
}
/// Returns the accounts list
///
/// Handler for ETH RPC call: `eth_accounts`
pub fn accounts(&self) -> Result<Vec<Address>> {
node_info!("eth_accounts");
let mut unique = HashSet::new();
let mut accounts: Vec<Address> = Vec::new();
for signer in self.signers.iter() {
accounts.extend(signer.accounts().into_iter().filter(|acc| unique.insert(*acc)));
}
accounts.extend(
self.backend
.cheats()
.impersonated_accounts()
.into_iter()
.filter(|acc| unique.insert(*acc)),
);
Ok(accounts.into_iter().collect())
}
/// Returns the number of most recent block.
///
/// Handler for ETH RPC call: `eth_blockNumber`
pub fn block_number(&self) -> Result<U256> {
node_info!("eth_blockNumber");
Ok(U256::from(self.backend.best_number()))
}
/// Returns balance of the given account.
///
/// Handler for ETH RPC call: `eth_getBalance`
pub async fn balance(&self, address: Address, block_number: Option<BlockId>) -> Result<U256> {
node_info!("eth_getBalance");
let block_request = self.block_request(block_number).await?;
// check if the number predates the fork, if in fork mode
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | true |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/src/eth/util.rs | crates/anvil/src/eth/util.rs | use alloy_primitives::hex;
use itertools::Itertools;
/// Formats values as hex strings, separated by commas.
pub fn hex_fmt_many<I, T>(i: I) -> String
where
I: IntoIterator<Item = T>,
T: AsRef<[u8]>,
{
let items = i.into_iter().map(|item| hex::encode(item.as_ref())).format(", ");
format!("{items}")
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/src/eth/macros.rs | crates/anvil/src/eth/macros.rs | /// A `info!` helper macro that emits to the target, the node logger listens for
macro_rules! node_info {
($($arg:tt)*) => {
tracing::info!(target: $crate::logging::NODE_USER_LOG_TARGET, $($arg)*);
};
}
pub(crate) use node_info;
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/src/eth/mod.rs | crates/anvil/src/eth/mod.rs | pub mod api;
pub mod otterscan;
pub mod sign;
pub use api::EthApi;
pub mod backend;
pub mod error;
pub mod fees;
pub(crate) mod macros;
pub mod miner;
pub mod pool;
pub mod util;
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/src/eth/fees.rs | crates/anvil/src/eth/fees.rs | use std::{
collections::BTreeMap,
fmt,
pin::Pin,
sync::Arc,
task::{Context, Poll},
};
use alloy_consensus::{Header, Transaction};
use alloy_eips::{
calc_next_block_base_fee, eip1559::BaseFeeParams, eip7691::MAX_BLOBS_PER_BLOCK_ELECTRA,
eip7840::BlobParams,
};
use alloy_primitives::B256;
use futures::StreamExt;
use parking_lot::{Mutex, RwLock};
use revm::{context_interface::block::BlobExcessGasAndPrice, primitives::hardfork::SpecId};
use crate::eth::{
backend::{info::StorageInfo, notifications::NewBlockNotifications},
error::BlockchainError,
};
/// Maximum number of entries in the fee history cache
pub const MAX_FEE_HISTORY_CACHE_SIZE: u64 = 2048u64;
/// Initial base fee for EIP-1559 blocks.
pub const INITIAL_BASE_FEE: u64 = 1_000_000_000;
/// Initial default gas price for the first block
pub const INITIAL_GAS_PRICE: u128 = 1_875_000_000;
/// Bounds the amount the base fee can change between blocks.
pub const BASE_FEE_CHANGE_DENOMINATOR: u128 = 8;
/// Minimum suggested priority fee
pub const MIN_SUGGESTED_PRIORITY_FEE: u128 = 1e9 as u128;
/// Stores the fee related information
#[derive(Clone, Debug)]
pub struct FeeManager {
/// Hardfork identifier
spec_id: SpecId,
/// The blob params that determine blob fees
blob_params: Arc<RwLock<BlobParams>>,
/// Tracks the base fee for the next block post London
///
/// This value will be updated after a new block was mined
base_fee: Arc<RwLock<u64>>,
/// Whether the minimum suggested priority fee is enforced
is_min_priority_fee_enforced: bool,
/// Tracks the excess blob gas, and the base fee, for the next block post Cancun
///
/// This value will be updated after a new block was mined
blob_excess_gas_and_price: Arc<RwLock<BlobExcessGasAndPrice>>,
/// The base price to use Pre London
///
/// This will be constant value unless changed manually
gas_price: Arc<RwLock<u128>>,
elasticity: Arc<RwLock<f64>>,
/// Network-specific base fee params for EIP-1559 calculations
base_fee_params: BaseFeeParams,
}
impl FeeManager {
pub fn new(
spec_id: SpecId,
base_fee: u64,
is_min_priority_fee_enforced: bool,
gas_price: u128,
blob_excess_gas_and_price: BlobExcessGasAndPrice,
blob_params: BlobParams,
base_fee_params: BaseFeeParams,
) -> Self {
let elasticity = 1f64 / base_fee_params.elasticity_multiplier as f64;
Self {
spec_id,
blob_params: Arc::new(RwLock::new(blob_params)),
base_fee: Arc::new(RwLock::new(base_fee)),
is_min_priority_fee_enforced,
gas_price: Arc::new(RwLock::new(gas_price)),
blob_excess_gas_and_price: Arc::new(RwLock::new(blob_excess_gas_and_price)),
elasticity: Arc::new(RwLock::new(elasticity)),
base_fee_params,
}
}
/// Returns the base fee params used for EIP-1559 calculations
pub fn base_fee_params(&self) -> BaseFeeParams {
self.base_fee_params
}
pub fn elasticity(&self) -> f64 {
*self.elasticity.read()
}
/// Returns true for post London
pub fn is_eip1559(&self) -> bool {
(self.spec_id as u8) >= (SpecId::LONDON as u8)
}
pub fn is_eip4844(&self) -> bool {
(self.spec_id as u8) >= (SpecId::CANCUN as u8)
}
/// Calculates the current blob gas price
pub fn blob_gas_price(&self) -> u128 {
if self.is_eip4844() { self.base_fee_per_blob_gas() } else { 0 }
}
pub fn base_fee(&self) -> u64 {
if self.is_eip1559() { *self.base_fee.read() } else { 0 }
}
pub fn is_min_priority_fee_enforced(&self) -> bool {
self.is_min_priority_fee_enforced
}
/// Raw base gas price
pub fn raw_gas_price(&self) -> u128 {
*self.gas_price.read()
}
pub fn excess_blob_gas_and_price(&self) -> Option<BlobExcessGasAndPrice> {
if self.is_eip4844() { Some(*self.blob_excess_gas_and_price.read()) } else { None }
}
pub fn base_fee_per_blob_gas(&self) -> u128 {
if self.is_eip4844() { self.blob_excess_gas_and_price.read().blob_gasprice } else { 0 }
}
/// Returns the current gas price
pub fn set_gas_price(&self, price: u128) {
let mut gas = self.gas_price.write();
*gas = price;
}
/// Returns the current base fee
pub fn set_base_fee(&self, fee: u64) {
trace!(target: "backend::fees", "updated base fee {:?}", fee);
let mut base = self.base_fee.write();
*base = fee;
}
/// Sets the current blob excess gas and price
pub fn set_blob_excess_gas_and_price(&self, blob_excess_gas_and_price: BlobExcessGasAndPrice) {
trace!(target: "backend::fees", "updated blob base fee {:?}", blob_excess_gas_and_price);
let mut base = self.blob_excess_gas_and_price.write();
*base = blob_excess_gas_and_price;
}
/// Calculates the base fee for the next block
pub fn get_next_block_base_fee_per_gas(
&self,
gas_used: u64,
gas_limit: u64,
last_fee_per_gas: u64,
) -> u64 {
// It's naturally impossible for base fee to be 0;
// It means it was set by the user deliberately and therefore we treat it as a constant.
// Therefore, we skip the base fee calculation altogether and we return 0.
if self.base_fee() == 0 {
return 0;
}
calc_next_block_base_fee(gas_used, gas_limit, last_fee_per_gas, self.base_fee_params)
}
/// Calculates the next block blob base fee.
pub fn get_next_block_blob_base_fee_per_gas(&self) -> u128 {
self.blob_params().calc_blob_fee(self.blob_excess_gas_and_price.read().excess_blob_gas)
}
/// Calculates the next block blob excess gas, using the provided parent blob gas used and
/// parent blob excess gas
pub fn get_next_block_blob_excess_gas(&self, blob_gas_used: u64, blob_excess_gas: u64) -> u64 {
self.blob_params().next_block_excess_blob_gas_osaka(
blob_excess_gas,
blob_gas_used,
self.base_fee(),
)
}
/// Configures the blob params
pub fn set_blob_params(&self, blob_params: BlobParams) {
*self.blob_params.write() = blob_params;
}
/// Returns the active [`BlobParams`]
pub fn blob_params(&self) -> BlobParams {
*self.blob_params.read()
}
}
/// Calculate base fee for next block using Ethereum mainnet parameters.
///
/// See [EIP-1559](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-1559.md) spec.
///
/// Note: This uses Ethereum's base fee params. For network-specific calculations
/// (e.g., Optimism), use [`FeeManager::get_next_block_base_fee_per_gas`] instead.
pub fn calculate_next_block_base_fee(gas_used: u64, gas_limit: u64, base_fee: u64) -> u64 {
calc_next_block_base_fee(gas_used, gas_limit, base_fee, BaseFeeParams::ethereum())
}
/// An async service that takes care of the `FeeHistory` cache
pub struct FeeHistoryService {
/// blob parameters for the current spec
blob_params: BlobParams,
/// incoming notifications about new blocks
new_blocks: NewBlockNotifications,
/// contains all fee history related entries
cache: FeeHistoryCache,
/// number of items to consider
fee_history_limit: u64,
/// a type that can fetch ethereum-storage data
storage_info: StorageInfo,
}
impl FeeHistoryService {
pub fn new(
blob_params: BlobParams,
new_blocks: NewBlockNotifications,
cache: FeeHistoryCache,
storage_info: StorageInfo,
) -> Self {
Self {
blob_params,
new_blocks,
cache,
fee_history_limit: MAX_FEE_HISTORY_CACHE_SIZE,
storage_info,
}
}
/// Returns the configured history limit
pub fn fee_history_limit(&self) -> u64 {
self.fee_history_limit
}
/// Inserts a new cache entry for the given block
pub(crate) fn insert_cache_entry_for_block(&self, hash: B256, header: &Header) {
let (result, block_number) = self.create_cache_entry(hash, header);
self.insert_cache_entry(result, block_number);
}
/// Create a new history entry for the block
fn create_cache_entry(
&self,
hash: B256,
header: &Header,
) -> (FeeHistoryCacheItem, Option<u64>) {
// percentile list from 0.0 to 100.0 with a 0.5 resolution.
// this will create 200 percentile points
let reward_percentiles: Vec<f64> = {
let mut percentile: f64 = 0.0;
(0..=200)
.map(|_| {
let val = percentile;
percentile += 0.5;
val
})
.collect()
};
let mut block_number: Option<u64> = None;
let base_fee = header.base_fee_per_gas.unwrap_or_default();
let excess_blob_gas = header.excess_blob_gas.map(|g| g as u128);
let blob_gas_used = header.blob_gas_used.map(|g| g as u128);
let base_fee_per_blob_gas = header.blob_fee(self.blob_params);
let mut item = FeeHistoryCacheItem {
base_fee: base_fee as u128,
gas_used_ratio: 0f64,
blob_gas_used_ratio: 0f64,
rewards: Vec::new(),
excess_blob_gas,
base_fee_per_blob_gas,
blob_gas_used,
};
let current_block = self.storage_info.block(hash);
let current_receipts = self.storage_info.receipts(hash);
if let (Some(block), Some(receipts)) = (current_block, current_receipts) {
block_number = Some(block.header.number);
let gas_used = block.header.gas_used as f64;
let blob_gas_used = block.header.blob_gas_used.map(|g| g as f64);
item.gas_used_ratio = gas_used / block.header.gas_limit as f64;
item.blob_gas_used_ratio =
blob_gas_used.map(|g| g / MAX_BLOBS_PER_BLOCK_ELECTRA as f64).unwrap_or(0 as f64);
// extract useful tx info (gas_used, effective_reward)
let mut transactions: Vec<(_, _)> = receipts
.iter()
.enumerate()
.map(|(i, receipt)| {
let gas_used = receipt.cumulative_gas_used();
let effective_reward = block
.body
.transactions
.get(i)
.map(|tx| tx.as_ref().effective_tip_per_gas(base_fee).unwrap_or(0))
.unwrap_or(0);
(gas_used, effective_reward)
})
.collect();
// sort by effective reward asc
transactions.sort_by(|(_, a), (_, b)| a.cmp(b));
// calculate percentile rewards
item.rewards = reward_percentiles
.into_iter()
.filter_map(|p| {
let target_gas = (p * gas_used / 100f64) as u64;
let mut sum_gas = 0;
for (gas_used, effective_reward) in transactions.iter().copied() {
sum_gas += gas_used;
if target_gas <= sum_gas {
return Some(effective_reward);
}
}
None
})
.collect();
} else {
item.rewards = vec![0; reward_percentiles.len()];
}
(item, block_number)
}
fn insert_cache_entry(&self, item: FeeHistoryCacheItem, block_number: Option<u64>) {
if let Some(block_number) = block_number {
trace!(target: "fees", "insert new history item={:?} for {}", item, block_number);
let mut cache = self.cache.lock();
cache.insert(block_number, item);
// adhere to cache limit
let pop_next = block_number.saturating_sub(self.fee_history_limit);
let num_remove = (cache.len() as u64).saturating_sub(self.fee_history_limit);
for num in 0..num_remove {
let key = pop_next - num;
cache.remove(&key);
}
}
}
}
// An endless future that listens for new blocks and updates the cache
impl Future for FeeHistoryService {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let pin = self.get_mut();
while let Poll::Ready(Some(notification)) = pin.new_blocks.poll_next_unpin(cx) {
// add the imported block.
pin.insert_cache_entry_for_block(notification.hash, notification.header.as_ref());
}
Poll::Pending
}
}
pub type FeeHistoryCache = Arc<Mutex<BTreeMap<u64, FeeHistoryCacheItem>>>;
/// A single item in the whole fee history cache
#[derive(Clone, Debug)]
pub struct FeeHistoryCacheItem {
pub base_fee: u128,
pub gas_used_ratio: f64,
pub base_fee_per_blob_gas: Option<u128>,
pub blob_gas_used_ratio: f64,
pub excess_blob_gas: Option<u128>,
pub blob_gas_used: Option<u128>,
pub rewards: Vec<u128>,
}
#[derive(Clone, Default)]
pub struct FeeDetails {
pub gas_price: Option<u128>,
pub max_fee_per_gas: Option<u128>,
pub max_priority_fee_per_gas: Option<u128>,
pub max_fee_per_blob_gas: Option<u128>,
}
impl FeeDetails {
/// All values zero
pub fn zero() -> Self {
Self {
gas_price: Some(0),
max_fee_per_gas: Some(0),
max_priority_fee_per_gas: Some(0),
max_fee_per_blob_gas: None,
}
}
/// If neither `gas_price` nor `max_fee_per_gas` is `Some`, this will set both to `0`
pub fn or_zero_fees(self) -> Self {
let Self { gas_price, max_fee_per_gas, max_priority_fee_per_gas, max_fee_per_blob_gas } =
self;
let no_fees = gas_price.is_none() && max_fee_per_gas.is_none();
let gas_price = if no_fees { Some(0) } else { gas_price };
let max_fee_per_gas = if no_fees { Some(0) } else { max_fee_per_gas };
let max_fee_per_blob_gas = if no_fees { None } else { max_fee_per_blob_gas };
Self { gas_price, max_fee_per_gas, max_priority_fee_per_gas, max_fee_per_blob_gas }
}
/// Turns this type into a tuple
pub fn split(self) -> (Option<u128>, Option<u128>, Option<u128>, Option<u128>) {
let Self { gas_price, max_fee_per_gas, max_priority_fee_per_gas, max_fee_per_blob_gas } =
self;
(gas_price, max_fee_per_gas, max_priority_fee_per_gas, max_fee_per_blob_gas)
}
/// Creates a new instance from the request's gas related values
pub fn new(
request_gas_price: Option<u128>,
request_max_fee: Option<u128>,
request_priority: Option<u128>,
max_fee_per_blob_gas: Option<u128>,
) -> Result<Self, BlockchainError> {
match (request_gas_price, request_max_fee, request_priority, max_fee_per_blob_gas) {
(gas_price, None, None, None) => {
// Legacy request, all default to gas price.
Ok(Self {
gas_price,
max_fee_per_gas: gas_price,
max_priority_fee_per_gas: gas_price,
max_fee_per_blob_gas: None,
})
}
(_, max_fee, max_priority, None) => {
// eip-1559
// Ensure `max_priority_fee_per_gas` is less or equal to `max_fee_per_gas`.
if let Some(max_priority) = max_priority {
let max_fee = max_fee.unwrap_or_default();
if max_priority > max_fee {
return Err(BlockchainError::InvalidFeeInput);
}
}
Ok(Self {
gas_price: max_fee,
max_fee_per_gas: max_fee,
max_priority_fee_per_gas: max_priority,
max_fee_per_blob_gas: None,
})
}
(_, max_fee, max_priority, max_fee_per_blob_gas) => {
// eip-1559
// Ensure `max_priority_fee_per_gas` is less or equal to `max_fee_per_gas`.
if let Some(max_priority) = max_priority {
let max_fee = max_fee.unwrap_or_default();
if max_priority > max_fee {
return Err(BlockchainError::InvalidFeeInput);
}
}
Ok(Self {
gas_price: max_fee,
max_fee_per_gas: max_fee,
max_priority_fee_per_gas: max_priority,
max_fee_per_blob_gas,
})
}
}
}
}
impl fmt::Debug for FeeDetails {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(fmt, "Fees {{ ")?;
write!(fmt, "gas_price: {:?}, ", self.gas_price)?;
write!(fmt, "max_fee_per_gas: {:?}, ", self.max_fee_per_gas)?;
write!(fmt, "max_priority_fee_per_gas: {:?}, ", self.max_priority_fee_per_gas)?;
write!(fmt, "}}")?;
Ok(())
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/src/eth/otterscan/api.rs | crates/anvil/src/eth/otterscan/api.rs | use crate::eth::{
EthApi,
error::{BlockchainError, Result},
macros::node_info,
};
use alloy_consensus::Transaction as TransactionTrait;
use alloy_network::{
AnyHeader, AnyRpcBlock, AnyRpcHeader, AnyRpcTransaction, AnyTxEnvelope, BlockResponse,
ReceiptResponse, TransactionResponse,
};
use alloy_primitives::{Address, B256, Bytes, U256};
use alloy_rpc_types::{
Block, BlockId, BlockNumberOrTag as BlockNumber, BlockTransactions,
trace::{
otterscan::{
BlockDetails, ContractCreator, InternalOperation, OtsBlock, OtsBlockTransactions,
OtsReceipt, OtsSlimBlock, OtsTransactionReceipt, TraceEntry, TransactionsWithReceipts,
},
parity::{Action, CreateAction, CreateOutput, TraceOutput},
},
};
use futures::future::join_all;
use itertools::Itertools;
impl EthApi {
/// Otterscan currently requires this endpoint, even though it's not part of the `ots_*`.
/// Ref: <https://github.com/otterscan/otterscan/blob/071d8c55202badf01804f6f8d53ef9311d4a9e47/src/useProvider.ts#L71>
///
/// As a faster alternative to `eth_getBlockByNumber` (by excluding uncle block
/// information), which is not relevant in the context of an anvil node
pub async fn erigon_get_header_by_number(
&self,
number: BlockNumber,
) -> Result<Option<AnyRpcBlock>> {
node_info!("erigon_getHeaderByNumber");
self.backend.block_by_number(number).await
}
/// As per the latest Otterscan source code, at least version 8 is needed.
/// Ref: <https://github.com/otterscan/otterscan/blob/071d8c55202badf01804f6f8d53ef9311d4a9e47/src/params.ts#L1C2-L1C2>
pub async fn ots_get_api_level(&self) -> Result<u64> {
node_info!("ots_getApiLevel");
// as required by current otterscan's source code
Ok(8)
}
/// Trace internal ETH transfers, contracts creation (CREATE/CREATE2) and self-destructs for a
/// certain transaction.
pub async fn ots_get_internal_operations(&self, hash: B256) -> Result<Vec<InternalOperation>> {
node_info!("ots_getInternalOperations");
self.backend
.mined_transaction(hash)
.map(|tx| tx.ots_internal_operations())
.ok_or_else(|| BlockchainError::DataUnavailable)
}
/// Check if an ETH address contains code at a certain block number.
pub async fn ots_has_code(&self, address: Address, block_number: BlockNumber) -> Result<bool> {
node_info!("ots_hasCode");
let block_id = Some(BlockId::Number(block_number));
Ok(!self.get_code(address, block_id).await?.is_empty())
}
/// Trace a transaction and generate a trace call tree.
/// Converts the list of traces for a transaction into the expected Otterscan format.
///
/// Follows format specified in the [`ots_traceTransaction`](https://docs.otterscan.io/api-docs/ots-api#ots_tracetransaction) spec.
pub async fn ots_trace_transaction(&self, hash: B256) -> Result<Vec<TraceEntry>> {
node_info!("ots_traceTransaction");
let traces = self
.backend
.trace_transaction(hash)
.await?
.into_iter()
.filter_map(|trace| TraceEntry::from_transaction_trace(&trace.trace))
.collect();
Ok(traces)
}
/// Given a transaction hash, returns its raw revert reason.
pub async fn ots_get_transaction_error(&self, hash: B256) -> Result<Bytes> {
node_info!("ots_getTransactionError");
if let Some(receipt) = self.backend.mined_transaction_receipt(hash)
&& !receipt.inner.as_ref().status()
{
return Ok(receipt.out.unwrap_or_default());
}
Ok(Bytes::default())
}
/// For simplicity purposes, we return the entire block instead of emptying the values that
/// Otterscan doesn't want. This is the original purpose of the endpoint (to save bandwidth),
/// but it doesn't seem necessary in the context of an anvil node
pub async fn ots_get_block_details(
&self,
number: BlockNumber,
) -> Result<BlockDetails<AnyRpcHeader>> {
node_info!("ots_getBlockDetails");
if let Some(block) = self.backend.block_by_number(number).await? {
let ots_block = self.build_ots_block_details(block).await?;
Ok(ots_block)
} else {
Err(BlockchainError::BlockNotFound)
}
}
/// For simplicity purposes, we return the entire block instead of emptying the values that
/// Otterscan doesn't want. This is the original purpose of the endpoint (to save bandwidth),
/// but it doesn't seem necessary in the context of an anvil node
pub async fn ots_get_block_details_by_hash(
&self,
hash: B256,
) -> Result<BlockDetails<AnyRpcHeader>> {
node_info!("ots_getBlockDetailsByHash");
if let Some(block) = self.backend.block_by_hash(hash).await? {
let ots_block = self.build_ots_block_details(block).await?;
Ok(ots_block)
} else {
Err(BlockchainError::BlockNotFound)
}
}
/// Gets paginated transaction data for a certain block. Return data is similar to
/// eth_getBlockBy* + eth_getTransactionReceipt.
pub async fn ots_get_block_transactions(
&self,
number: u64,
page: usize,
page_size: usize,
) -> Result<OtsBlockTransactions<AnyRpcTransaction, AnyRpcHeader>> {
node_info!("ots_getBlockTransactions");
match self.backend.block_by_number_full(number.into()).await? {
Some(block) => self.build_ots_block_tx(block, page, page_size).await,
None => Err(BlockchainError::BlockNotFound),
}
}
/// Address history navigation. searches backwards from certain point in time.
pub async fn ots_search_transactions_before(
&self,
address: Address,
block_number: u64,
page_size: usize,
) -> Result<TransactionsWithReceipts<alloy_rpc_types::Transaction<AnyTxEnvelope>>> {
node_info!("ots_searchTransactionsBefore");
let best = self.backend.best_number();
// we go from given block (defaulting to best) down to first block
// considering only post-fork
let from = if block_number == 0 { best } else { block_number - 1 };
let to = self.get_fork().map(|f| f.block_number() + 1).unwrap_or(1);
let first_page = from >= best;
let mut last_page = false;
let mut res: Vec<_> = vec![];
for n in (to..=from).rev() {
if let Some(traces) = self.backend.mined_parity_trace_block(n) {
let hashes = traces
.into_iter()
.rev()
.filter(|trace| trace.contains_address(address))
.filter_map(|trace| trace.transaction_hash)
.unique();
if res.len() >= page_size {
break;
}
res.extend(hashes);
}
if n == to {
last_page = true;
}
}
self.build_ots_search_transactions(res, first_page, last_page).await
}
/// Address history navigation. searches forward from certain point in time.
pub async fn ots_search_transactions_after(
&self,
address: Address,
block_number: u64,
page_size: usize,
) -> Result<TransactionsWithReceipts<alloy_rpc_types::Transaction<AnyTxEnvelope>>> {
node_info!("ots_searchTransactionsAfter");
let best = self.backend.best_number();
// we go from the first post-fork block, up to the tip
let first_block = self.get_fork().map(|f| f.block_number() + 1).unwrap_or(1);
let from = if block_number == 0 { first_block } else { block_number + 1 };
let to = best;
let mut first_page = from >= best;
let mut last_page = false;
let mut res: Vec<_> = vec![];
for n in from..=to {
if n == first_block {
last_page = true;
}
if let Some(traces) = self.backend.mined_parity_trace_block(n) {
let hashes = traces
.into_iter()
.rev()
.filter(|trace| trace.contains_address(address))
.filter_map(|trace| trace.transaction_hash)
.unique();
if res.len() >= page_size {
break;
}
res.extend(hashes);
}
if n == to {
first_page = true;
}
}
// Results are always sent in reverse chronological order, according to the Otterscan spec
res.reverse();
self.build_ots_search_transactions(res, first_page, last_page).await
}
/// Given a sender address and a nonce, returns the tx hash or null if not found. It returns
/// only the tx hash on success, you can use the standard eth_getTransactionByHash after that to
/// get the full transaction data.
pub async fn ots_get_transaction_by_sender_and_nonce(
&self,
address: Address,
nonce: U256,
) -> Result<Option<B256>> {
node_info!("ots_getTransactionBySenderAndNonce");
let from = self.get_fork().map(|f| f.block_number() + 1).unwrap_or_default();
let to = self.backend.best_number();
for n in (from..=to).rev() {
if let Some(txs) = self.backend.mined_transactions_by_block_number(n.into()).await {
for tx in txs {
if U256::from(tx.nonce()) == nonce && tx.from() == address {
return Ok(Some(tx.tx_hash()));
}
}
}
}
Ok(None)
}
/// Given an ETH contract address, returns the tx hash and the direct address who created the
/// contract.
pub async fn ots_get_contract_creator(&self, addr: Address) -> Result<Option<ContractCreator>> {
node_info!("ots_getContractCreator");
let from = self.get_fork().map(|f| f.block_number()).unwrap_or_default();
let to = self.backend.best_number();
// loop in reverse, since we want the latest deploy to the address
for n in (from..=to).rev() {
if let Some(traces) = self.backend.mined_parity_trace_block(n) {
for trace in traces.into_iter().rev() {
match (trace.trace.action, trace.trace.result) {
(
Action::Create(CreateAction { from, .. }),
Some(TraceOutput::Create(CreateOutput { address, .. })),
) if address == addr => {
return Ok(Some(ContractCreator {
hash: trace.transaction_hash.unwrap(),
creator: from,
}));
}
_ => {}
}
}
}
}
Ok(None)
}
/// The response for ots_getBlockDetails includes an `issuance` object that requires computing
/// the total gas spent in a given block.
///
/// The only way to do this with the existing API is to explicitly fetch all receipts, to get
/// their `gas_used`. This would be extremely inefficient in a real blockchain RPC, but we can
/// get away with that in this context.
///
/// The [original spec](https://docs.otterscan.io/api-docs/ots-api#ots_getblockdetails)
/// also mentions we can hardcode `transactions` and `logsBloom` to an empty array to save
/// bandwidth, because fields weren't intended to be used in the Otterscan UI at this point.
///
/// This has two problems though:
/// - It makes the endpoint too specific to Otterscan's implementation
/// - It breaks the abstraction built in `OtsBlock<TX>` which computes `transaction_count`
/// based on the existing list.
///
/// Therefore we keep it simple by keeping the data in the response
pub async fn build_ots_block_details(
&self,
block: AnyRpcBlock,
) -> Result<BlockDetails<alloy_rpc_types::Header<AnyHeader>>> {
if block.transactions.is_uncle() {
return Err(BlockchainError::DataUnavailable);
}
let receipts_futs = block
.transactions
.hashes()
.map(|hash| async move { self.transaction_receipt(hash).await });
// fetch all receipts
let receipts = join_all(receipts_futs)
.await
.into_iter()
.map(|r| match r {
Ok(Some(r)) => Ok(r),
_ => Err(BlockchainError::DataUnavailable),
})
.collect::<Result<Vec<_>>>()?;
let total_fees = receipts.iter().fold(0, |acc, receipt| {
acc + (receipt.gas_used() as u128) * receipt.effective_gas_price()
});
let Block { header, uncles, transactions, withdrawals } = block.into_inner();
let block =
OtsSlimBlock { header, uncles, transaction_count: transactions.len(), withdrawals };
Ok(BlockDetails {
block,
total_fees: U256::from(total_fees),
// issuance has no meaningful value in anvil's backend. just default to 0
issuance: Default::default(),
})
}
/// Fetches all receipts for the blocks's transactions, as required by the
/// [`ots_getBlockTransactions`] endpoint spec, and returns the final response object.
///
/// [`ots_getBlockTransactions`]: https://docs.otterscan.io/api-docs/ots-api#ots_getblocktransactions
pub async fn build_ots_block_tx(
&self,
mut block: AnyRpcBlock,
page: usize,
page_size: usize,
) -> Result<OtsBlockTransactions<AnyRpcTransaction, AnyRpcHeader>> {
if block.transactions.is_uncle() {
return Err(BlockchainError::DataUnavailable);
}
block.transactions = match block.transactions() {
BlockTransactions::Full(txs) => BlockTransactions::Full(
txs.iter().skip(page * page_size).take(page_size).cloned().collect(),
),
BlockTransactions::Hashes(txs) => BlockTransactions::Hashes(
txs.iter().skip(page * page_size).take(page_size).copied().collect(),
),
BlockTransactions::Uncle => unreachable!(),
};
let receipt_futs = block.transactions.hashes().map(|hash| self.transaction_receipt(hash));
let receipts = join_all(receipt_futs.map(|r| async {
if let Ok(Some(r)) = r.await {
let block = self.block_by_number(r.block_number().unwrap().into()).await?;
let timestamp = block.ok_or(BlockchainError::BlockNotFound)?.header.timestamp;
let receipt = r.as_ref().inner.clone().map_inner(OtsReceipt::from);
Ok(OtsTransactionReceipt { receipt, timestamp: Some(timestamp) })
} else {
Err(BlockchainError::BlockNotFound)
}
}))
.await
.into_iter()
.collect::<Result<Vec<_>>>()?;
let transaction_count = block.transactions().len();
let fullblock = OtsBlock { block: block.inner.clone(), transaction_count };
let ots_block_txs = OtsBlockTransactions { fullblock, receipts };
Ok(ots_block_txs)
}
pub async fn build_ots_search_transactions(
&self,
hashes: Vec<B256>,
first_page: bool,
last_page: bool,
) -> Result<TransactionsWithReceipts<alloy_rpc_types::Transaction<AnyTxEnvelope>>> {
let txs_futs = hashes.iter().map(|hash| async { self.transaction_by_hash(*hash).await });
let txs = join_all(txs_futs)
.await
.into_iter()
.map(|t| match t {
Ok(Some(t)) => Ok(t.into_inner()),
_ => Err(BlockchainError::DataUnavailable),
})
.collect::<Result<Vec<_>>>()?;
let receipt_futs = hashes.iter().map(|hash| self.transaction_receipt(*hash));
let receipts = join_all(receipt_futs.map(|r| async {
if let Ok(Some(r)) = r.await {
let block = self.block_by_number(r.block_number().unwrap().into()).await?;
let timestamp = block.ok_or(BlockchainError::BlockNotFound)?.header.timestamp;
let receipt = r.as_ref().inner.clone().map_inner(OtsReceipt::from);
Ok(OtsTransactionReceipt { receipt, timestamp: Some(timestamp) })
} else {
Err(BlockchainError::BlockNotFound)
}
}))
.await
.into_iter()
.collect::<Result<Vec<_>>>()?;
Ok(TransactionsWithReceipts { txs, receipts, first_page, last_page })
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/src/eth/otterscan/mod.rs | crates/anvil/src/eth/otterscan/mod.rs | pub mod api;
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/src/eth/pool/transactions.rs | crates/anvil/src/eth/pool/transactions.rs | use crate::eth::{error::PoolError, util::hex_fmt_many};
use alloy_consensus::{Transaction, Typed2718};
use alloy_network::AnyRpcTransaction;
use alloy_primitives::{
Address, TxHash,
map::{HashMap, HashSet},
};
use anvil_core::eth::transaction::PendingTransaction;
use foundry_primitives::FoundryTxEnvelope;
use parking_lot::RwLock;
use std::{cmp::Ordering, collections::BTreeSet, fmt, str::FromStr, sync::Arc, time::Instant};
/// A unique identifying marker for a transaction
pub type TxMarker = Vec<u8>;
/// creates an unique identifier for aan (`nonce` + `Address`) combo
pub fn to_marker(nonce: u64, from: Address) -> TxMarker {
let mut data = [0u8; 28];
data[..8].copy_from_slice(&nonce.to_le_bytes()[..]);
data[8..].copy_from_slice(&from.0[..]);
data.to_vec()
}
/// Modes that determine the transaction ordering of the mempool
///
/// This type controls the transaction order via the priority metric of a transaction
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
pub enum TransactionOrder {
/// Keep the pool transaction transactions sorted in the order they arrive.
///
/// This will essentially assign every transaction the exact priority so the order is
/// determined by their internal id
Fifo,
/// This means that it prioritizes transactions based on the fees paid to the miner.
#[default]
Fees,
}
impl TransactionOrder {
/// Returns the priority of the transactions
pub fn priority(&self, tx: &FoundryTxEnvelope) -> TransactionPriority {
match self {
Self::Fifo => TransactionPriority::default(),
Self::Fees => TransactionPriority(tx.max_fee_per_gas()),
}
}
}
impl FromStr for TransactionOrder {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let s = s.to_lowercase();
let order = match s.as_str() {
"fees" => Self::Fees,
"fifo" => Self::Fifo,
_ => return Err(format!("Unknown TransactionOrder: `{s}`")),
};
Ok(order)
}
}
/// Metric value for the priority of a transaction.
///
/// The `TransactionPriority` determines the ordering of two transactions that have all their
/// markers satisfied.
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, PartialOrd, Ord)]
pub struct TransactionPriority(pub u128);
/// Internal Transaction type
#[derive(Clone, PartialEq, Eq)]
pub struct PoolTransaction {
/// the pending eth transaction
pub pending_transaction: PendingTransaction,
/// Markers required by the transaction
pub requires: Vec<TxMarker>,
/// Markers that this transaction provides
pub provides: Vec<TxMarker>,
/// priority of the transaction
pub priority: TransactionPriority,
}
// == impl PoolTransaction ==
impl PoolTransaction {
pub fn new(transaction: PendingTransaction) -> Self {
Self {
pending_transaction: transaction,
requires: vec![],
provides: vec![],
priority: TransactionPriority(0),
}
}
/// Returns the hash of this transaction
pub fn hash(&self) -> TxHash {
*self.pending_transaction.hash()
}
/// Returns the max fee per gas of this transaction
pub fn max_fee_per_gas(&self) -> u128 {
self.pending_transaction.transaction.max_fee_per_gas()
}
/// Returns the type of the transaction
pub fn tx_type(&self) -> u8 {
self.pending_transaction.transaction.ty()
}
}
impl fmt::Debug for PoolTransaction {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(fmt, "Transaction {{ ")?;
write!(fmt, "hash: {:?}, ", &self.pending_transaction.hash())?;
write!(fmt, "requires: [{}], ", hex_fmt_many(self.requires.iter()))?;
write!(fmt, "provides: [{}], ", hex_fmt_many(self.provides.iter()))?;
write!(fmt, "raw tx: {:?}", &self.pending_transaction)?;
write!(fmt, "}}")?;
Ok(())
}
}
impl TryFrom<AnyRpcTransaction> for PoolTransaction {
type Error = eyre::Error;
fn try_from(value: AnyRpcTransaction) -> Result<Self, Self::Error> {
let typed_transaction = FoundryTxEnvelope::try_from(value)?;
let pending_transaction = PendingTransaction::new(typed_transaction)?;
Ok(Self {
pending_transaction,
requires: vec![],
provides: vec![],
priority: TransactionPriority(0),
})
}
}
/// A waiting pool of transaction that are pending, but not yet ready to be included in a new block.
///
/// Keeps a set of transactions that are waiting for other transactions
#[derive(Clone, Debug, Default)]
pub struct PendingTransactions {
/// markers that aren't yet provided by any transaction
required_markers: HashMap<TxMarker, HashSet<TxHash>>,
/// mapping of the markers of a transaction to the hash of the transaction
waiting_markers: HashMap<Vec<TxMarker>, TxHash>,
/// the transactions that are not ready yet are waiting for another tx to finish
waiting_queue: HashMap<TxHash, PendingPoolTransaction>,
}
// == impl PendingTransactions ==
impl PendingTransactions {
/// Returns the number of transactions that are currently waiting
pub fn len(&self) -> usize {
self.waiting_queue.len()
}
pub fn is_empty(&self) -> bool {
self.waiting_queue.is_empty()
}
/// Clears internal state
pub fn clear(&mut self) {
self.required_markers.clear();
self.waiting_markers.clear();
self.waiting_queue.clear();
}
/// Returns an iterator over all transactions in the waiting pool
pub fn transactions(&self) -> impl Iterator<Item = Arc<PoolTransaction>> + '_ {
self.waiting_queue.values().map(|tx| tx.transaction.clone())
}
/// Adds a transaction to Pending queue of transactions
pub fn add_transaction(&mut self, tx: PendingPoolTransaction) -> Result<(), PoolError> {
assert!(!tx.is_ready(), "transaction must not be ready");
assert!(
!self.waiting_queue.contains_key(&tx.transaction.hash()),
"transaction is already added"
);
if let Some(replace) = self
.waiting_markers
.get(&tx.transaction.provides)
.and_then(|hash| self.waiting_queue.get(hash))
{
// check if underpriced
if tx.transaction.max_fee_per_gas() < replace.transaction.max_fee_per_gas() {
warn!(target: "txpool", "pending replacement transaction underpriced [{:?}]", tx.transaction.hash());
return Err(PoolError::ReplacementUnderpriced(Box::new(
tx.transaction.as_ref().clone(),
)));
}
}
// add all missing markers
for marker in &tx.missing_markers {
self.required_markers.entry(marker.clone()).or_default().insert(tx.transaction.hash());
}
// also track identifying markers
self.waiting_markers.insert(tx.transaction.provides.clone(), tx.transaction.hash());
// add tx to the queue
self.waiting_queue.insert(tx.transaction.hash(), tx);
Ok(())
}
/// Returns true if given transaction is part of the queue
pub fn contains(&self, hash: &TxHash) -> bool {
self.waiting_queue.contains_key(hash)
}
/// Returns the transaction for the hash if it's pending
pub fn get(&self, hash: &TxHash) -> Option<&PendingPoolTransaction> {
self.waiting_queue.get(hash)
}
/// This will check off the markers of pending transactions.
///
/// Returns the those transactions that become unlocked (all markers checked) and can be moved
/// to the ready queue.
pub fn mark_and_unlock(
&mut self,
markers: impl IntoIterator<Item = impl AsRef<TxMarker>>,
) -> Vec<PendingPoolTransaction> {
let mut unlocked_ready = Vec::new();
for mark in markers {
let mark = mark.as_ref();
if let Some(tx_hashes) = self.required_markers.remove(mark) {
for hash in tx_hashes {
let tx = self.waiting_queue.get_mut(&hash).expect("tx is included;");
tx.mark(mark);
if tx.is_ready() {
let tx = self.waiting_queue.remove(&hash).expect("tx is included;");
self.waiting_markers.remove(&tx.transaction.provides);
unlocked_ready.push(tx);
}
}
}
}
unlocked_ready
}
/// Removes the transactions associated with the given hashes
///
/// Returns all removed transactions.
pub fn remove(&mut self, hashes: Vec<TxHash>) -> Vec<Arc<PoolTransaction>> {
let mut removed = vec![];
for hash in hashes {
if let Some(waiting_tx) = self.waiting_queue.remove(&hash) {
self.waiting_markers.remove(&waiting_tx.transaction.provides);
for marker in waiting_tx.missing_markers {
let remove = if let Some(required) = self.required_markers.get_mut(&marker) {
required.remove(&hash);
required.is_empty()
} else {
false
};
if remove {
self.required_markers.remove(&marker);
}
}
removed.push(waiting_tx.transaction)
}
}
removed
}
}
/// A transaction in the pool
#[derive(Clone)]
pub struct PendingPoolTransaction {
pub transaction: Arc<PoolTransaction>,
/// markers required and have not been satisfied yet by other transactions in the pool
pub missing_markers: HashSet<TxMarker>,
/// timestamp when the tx was added
pub added_at: Instant,
}
// == impl PendingTransaction ==
impl PendingPoolTransaction {
/// Creates a new `PendingTransaction`.
///
/// Determines the markers that are still missing before this transaction can be moved to the
/// ready queue.
pub fn new(transaction: PoolTransaction, provided: &HashMap<TxMarker, TxHash>) -> Self {
let missing_markers = transaction
.requires
.iter()
.filter(|marker| {
// is true if the marker is already satisfied either via transaction in the pool
!provided.contains_key(&**marker)
})
.cloned()
.collect();
Self { transaction: Arc::new(transaction), missing_markers, added_at: Instant::now() }
}
/// Removes the required marker
pub fn mark(&mut self, marker: &TxMarker) {
self.missing_markers.remove(marker);
}
/// Returns true if transaction has all requirements satisfied.
pub fn is_ready(&self) -> bool {
self.missing_markers.is_empty()
}
}
impl fmt::Debug for PendingPoolTransaction {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(fmt, "PendingTransaction {{ ")?;
write!(fmt, "added_at: {:?}, ", self.added_at)?;
write!(fmt, "tx: {:?}, ", self.transaction)?;
write!(fmt, "missing_markers: {{{}}}", hex_fmt_many(self.missing_markers.iter()))?;
write!(fmt, "}}")
}
}
pub struct TransactionsIterator {
all: HashMap<TxHash, ReadyTransaction>,
awaiting: HashMap<TxHash, (usize, PoolTransactionRef)>,
independent: BTreeSet<PoolTransactionRef>,
_invalid: HashSet<TxHash>,
}
// == impl TransactionsIterator ==
impl TransactionsIterator {
/// Depending on number of satisfied requirements insert given ref
/// either to awaiting set or to best set.
fn independent_or_awaiting(&mut self, satisfied: usize, tx_ref: PoolTransactionRef) {
if satisfied >= tx_ref.transaction.requires.len() {
// If we have satisfied all deps insert to best
self.independent.insert(tx_ref);
} else {
// otherwise we're still awaiting for some deps
self.awaiting.insert(tx_ref.transaction.hash(), (satisfied, tx_ref));
}
}
}
impl Iterator for TransactionsIterator {
type Item = Arc<PoolTransaction>;
fn next(&mut self) -> Option<Self::Item> {
loop {
let best = self.independent.iter().next_back()?.clone();
let best = self.independent.take(&best)?;
let hash = best.transaction.hash();
let ready =
if let Some(ready) = self.all.get(&hash).cloned() { ready } else { continue };
// Insert transactions that just got unlocked.
for hash in &ready.unlocks {
// first check local awaiting transactions
let res = if let Some((mut satisfied, tx_ref)) = self.awaiting.remove(hash) {
satisfied += 1;
Some((satisfied, tx_ref))
// then get from the pool
} else {
self.all
.get(hash)
.map(|next| (next.requires_offset + 1, next.transaction.clone()))
};
if let Some((satisfied, tx_ref)) = res {
self.independent_or_awaiting(satisfied, tx_ref)
}
}
return Some(best.transaction);
}
}
}
/// transactions that are ready to be included in a block.
#[derive(Clone, Debug, Default)]
pub struct ReadyTransactions {
/// keeps track of transactions inserted in the pool
///
/// this way we can determine when transactions where submitted to the pool
id: u64,
/// markers that are provided by `ReadyTransaction`s
provided_markers: HashMap<TxMarker, TxHash>,
/// transactions that are ready
ready_tx: Arc<RwLock<HashMap<TxHash, ReadyTransaction>>>,
/// independent transactions that can be included directly and don't require other transactions
/// Sorted by their id
independent_transactions: BTreeSet<PoolTransactionRef>,
}
// == impl ReadyTransactions ==
impl ReadyTransactions {
/// Returns an iterator over all transactions
pub fn get_transactions(&self) -> TransactionsIterator {
TransactionsIterator {
all: self.ready_tx.read().clone(),
independent: self.independent_transactions.clone(),
awaiting: Default::default(),
_invalid: Default::default(),
}
}
/// Clears the internal state
pub fn clear(&mut self) {
self.provided_markers.clear();
self.ready_tx.write().clear();
self.independent_transactions.clear();
}
/// Returns true if the transaction is part of the queue.
pub fn contains(&self, hash: &TxHash) -> bool {
self.ready_tx.read().contains_key(hash)
}
/// Returns the number of ready transactions without cloning the snapshot
pub fn len(&self) -> usize {
self.ready_tx.read().len()
}
/// Returns true if there are no ready transactions
pub fn is_empty(&self) -> bool {
self.ready_tx.read().is_empty()
}
/// Returns the transaction for the hash if it's in the ready pool but not yet mined
pub fn get(&self, hash: &TxHash) -> Option<ReadyTransaction> {
self.ready_tx.read().get(hash).cloned()
}
pub fn provided_markers(&self) -> &HashMap<TxMarker, TxHash> {
&self.provided_markers
}
fn next_id(&mut self) -> u64 {
let id = self.id;
self.id = self.id.wrapping_add(1);
id
}
/// Adds a new transactions to the ready queue.
///
/// # Panics
///
/// If the pending transaction is not ready ([`PendingPoolTransaction::is_ready`])
/// or the transaction is already included.
pub fn add_transaction(
&mut self,
tx: PendingPoolTransaction,
) -> Result<Vec<Arc<PoolTransaction>>, PoolError> {
assert!(tx.is_ready(), "transaction must be ready",);
assert!(
!self.ready_tx.read().contains_key(&tx.transaction.hash()),
"transaction already included"
);
let (replaced_tx, unlocks) = self.replaced_transactions(&tx.transaction)?;
let id = self.next_id();
let hash = tx.transaction.hash();
let mut independent = true;
let mut requires_offset = 0;
let mut ready = self.ready_tx.write();
// Add links to transactions that unlock the current one
for mark in &tx.transaction.requires {
// Check if the transaction that satisfies the mark is still in the queue.
if let Some(other) = self.provided_markers.get(mark) {
let tx = ready.get_mut(other).expect("hash included;");
tx.unlocks.push(hash);
// tx still depends on other tx
independent = false;
} else {
requires_offset += 1;
}
}
// update markers
for mark in tx.transaction.provides.iter().cloned() {
self.provided_markers.insert(mark, hash);
}
let transaction = PoolTransactionRef { id, transaction: tx.transaction };
// add to the independent set
if independent {
self.independent_transactions.insert(transaction.clone());
}
// insert to ready queue
ready.insert(hash, ReadyTransaction { transaction, unlocks, requires_offset });
Ok(replaced_tx)
}
/// Removes and returns those transactions that got replaced by the `tx`
fn replaced_transactions(
&mut self,
tx: &PoolTransaction,
) -> Result<(Vec<Arc<PoolTransaction>>, Vec<TxHash>), PoolError> {
// check if we are replacing transactions
let remove_hashes: HashSet<_> =
tx.provides.iter().filter_map(|mark| self.provided_markers.get(mark)).collect();
// early exit if we are not replacing anything.
if remove_hashes.is_empty() {
return Ok((Vec::new(), Vec::new()));
}
// check if we're replacing the same transaction and if it can be replaced
let mut unlocked_tx = Vec::new();
{
// construct a list of unlocked transactions
// also check for transactions that shouldn't be replaced because underpriced
let ready = self.ready_tx.read();
for to_remove in remove_hashes.iter().filter_map(|hash| ready.get(*hash)) {
// if we're attempting to replace a transaction that provides the exact same markers
// (addr + nonce) then we check for gas price
if to_remove.provides() == tx.provides {
// check if underpriced
if tx.pending_transaction.transaction.max_fee_per_gas()
<= to_remove.max_fee_per_gas()
{
warn!(target: "txpool", "ready replacement transaction underpriced [{:?}]", tx.hash());
return Err(PoolError::ReplacementUnderpriced(Box::new(tx.clone())));
} else {
trace!(target: "txpool", "replacing ready transaction [{:?}] with higher gas price [{:?}]", to_remove.transaction.transaction.hash(), tx.hash());
}
}
unlocked_tx.extend(to_remove.unlocks.iter().copied())
}
}
let remove_hashes = remove_hashes.into_iter().copied().collect::<Vec<_>>();
let new_provides = tx.provides.iter().cloned().collect::<HashSet<_>>();
let removed_tx = self.remove_with_markers(remove_hashes, Some(new_provides));
Ok((removed_tx, unlocked_tx))
}
/// Removes the transactions from the ready queue and returns the removed transactions.
/// This will also remove all transactions that depend on those.
pub fn clear_transactions(&mut self, tx_hashes: &[TxHash]) -> Vec<Arc<PoolTransaction>> {
self.remove_with_markers(tx_hashes.to_vec(), None)
}
/// Removes the transactions that provide the marker
///
/// This will also remove all transactions that lead to the transaction that provides the
/// marker.
pub fn prune_tags(&mut self, marker: TxMarker) -> Vec<Arc<PoolTransaction>> {
let mut removed_tx = vec![];
// the markers to remove
let mut remove = vec![marker];
while let Some(marker) = remove.pop() {
let res = self
.provided_markers
.remove(&marker)
.and_then(|hash| self.ready_tx.write().remove(&hash));
if let Some(tx) = res {
let unlocks = tx.unlocks;
self.independent_transactions.remove(&tx.transaction);
let tx = tx.transaction.transaction;
// also prune previous transactions
{
let hash = tx.hash();
let mut ready = self.ready_tx.write();
let mut previous_markers = |marker| -> Option<Vec<TxMarker>> {
let prev_hash = self.provided_markers.get(marker)?;
let tx2 = ready.get_mut(prev_hash)?;
// remove hash
if let Some(idx) = tx2.unlocks.iter().position(|i| i == &hash) {
tx2.unlocks.swap_remove(idx);
}
if tx2.unlocks.is_empty() {
Some(tx2.transaction.transaction.provides.clone())
} else {
None
}
};
// find previous transactions
for marker in &tx.requires {
if let Some(mut tags_to_remove) = previous_markers(marker) {
remove.append(&mut tags_to_remove);
}
}
}
// add the transactions that just got unlocked to independent set
for hash in unlocks {
if let Some(tx) = self.ready_tx.write().get_mut(&hash) {
tx.requires_offset += 1;
if tx.requires_offset == tx.transaction.transaction.requires.len() {
self.independent_transactions.insert(tx.transaction.clone());
}
}
}
// finally, remove the markers that this transaction provides
let current_marker = ▮
for marker in &tx.provides {
let removed = self.provided_markers.remove(marker);
assert_eq!(
removed,
if current_marker == marker { None } else { Some(tx.hash()) },
"The pool contains exactly one transaction providing given tag; the removed transaction
claims to provide that tag, so it has to be mapped to it's hash; qed"
);
}
removed_tx.push(tx);
}
}
removed_tx
}
/// Removes transactions and those that depend on them and satisfy at least one marker in the
/// given filter set.
pub fn remove_with_markers(
&mut self,
mut tx_hashes: Vec<TxHash>,
marker_filter: Option<HashSet<TxMarker>>,
) -> Vec<Arc<PoolTransaction>> {
let mut removed = Vec::new();
let mut ready = self.ready_tx.write();
while let Some(hash) = tx_hashes.pop() {
if let Some(mut tx) = ready.remove(&hash) {
let invalidated = tx.transaction.transaction.provides.iter().filter(|mark| {
marker_filter.as_ref().map(|filter| !filter.contains(&**mark)).unwrap_or(true)
});
let mut removed_some_marks = false;
// remove entries from provided_markers
for mark in invalidated {
removed_some_marks = true;
self.provided_markers.remove(mark);
}
// remove from unlocks
for mark in &tx.transaction.transaction.requires {
if let Some(hash) = self.provided_markers.get(mark)
&& let Some(tx) = ready.get_mut(hash)
&& let Some(idx) = tx.unlocks.iter().position(|i| i == hash)
{
tx.unlocks.swap_remove(idx);
}
}
// remove from the independent set
self.independent_transactions.remove(&tx.transaction);
if removed_some_marks {
// remove all transactions that the current one unlocks
tx_hashes.append(&mut tx.unlocks);
}
// remove transaction
removed.push(tx.transaction.transaction);
}
}
removed
}
}
/// A reference to a transaction in the pool
#[derive(Clone, Debug)]
pub struct PoolTransactionRef {
/// actual transaction
pub transaction: Arc<PoolTransaction>,
/// identifier used to internally compare the transaction in the pool
pub id: u64,
}
impl Eq for PoolTransactionRef {}
impl PartialEq<Self> for PoolTransactionRef {
fn eq(&self, other: &Self) -> bool {
self.cmp(other) == Ordering::Equal
}
}
impl PartialOrd<Self> for PoolTransactionRef {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for PoolTransactionRef {
fn cmp(&self, other: &Self) -> Ordering {
self.transaction
.priority
.cmp(&other.transaction.priority)
.then_with(|| other.id.cmp(&self.id))
}
}
#[derive(Clone, Debug)]
pub struct ReadyTransaction {
/// ref to the actual transaction
pub transaction: PoolTransactionRef,
/// tracks the transactions that get unlocked by this transaction
pub unlocks: Vec<TxHash>,
/// amount of required markers that are inherently provided
pub requires_offset: usize,
}
impl ReadyTransaction {
pub fn provides(&self) -> &[TxMarker] {
&self.transaction.transaction.provides
}
pub fn max_fee_per_gas(&self) -> u128 {
self.transaction.transaction.max_fee_per_gas()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn can_id_txs() {
let addr = Address::random();
assert_eq!(to_marker(1, addr), to_marker(1, addr));
assert_ne!(to_marker(2, addr), to_marker(1, addr));
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/src/eth/pool/mod.rs | crates/anvil/src/eth/pool/mod.rs | //! # Transaction Pool implementation
//!
//! The transaction pool is responsible for managing a set of transactions that can be included in
//! upcoming blocks.
//!
//! The main task of the pool is to prepare an ordered list of transactions that are ready to be
//! included in a new block.
//!
//! Each imported block can affect the validity of transactions already in the pool.
//! The miner expects the most up-to-date transactions when attempting to create a new block.
//! After being included in a block, a transaction should be removed from the pool, this process is
//! called _pruning_ and due to separation of concerns is triggered externally.
//! The pool essentially performs following services:
//! * import transactions
//! * order transactions
//! * provide ordered set of transactions that are ready for inclusion
//! * prune transactions
//!
//! Each transaction in the pool contains markers that it _provides_ or _requires_. This property is
//! used to determine whether it can be included in a block (transaction is ready) or whether it
//! still _requires_ other transactions to be mined first (transaction is pending).
//! A transaction is associated with the nonce of the account it's sent from. A unique identifying
//! marker for a transaction is therefore the pair `(nonce + account)`. An incoming transaction with
//! a `nonce > nonce on chain` will _require_ `(nonce -1, account)` first, before it is ready to be
//! included in a block.
//!
//! This implementation is adapted from <https://github.com/paritytech/substrate/tree/master/client/transaction-pool>
use crate::{
eth::{
error::PoolError,
pool::transactions::{
PendingPoolTransaction, PendingTransactions, PoolTransaction, ReadyTransactions,
TransactionsIterator, TxMarker,
},
},
mem::storage::MinedBlockOutcome,
};
use alloy_primitives::{Address, TxHash};
use alloy_rpc_types::txpool::TxpoolStatus;
use anvil_core::eth::transaction::PendingTransaction;
use futures::channel::mpsc::{Receiver, Sender, channel};
use parking_lot::{Mutex, RwLock};
use std::{collections::VecDeque, fmt, sync::Arc};
pub mod transactions;
/// Transaction pool that performs validation.
#[derive(Default)]
pub struct Pool {
/// processes all pending transactions
inner: RwLock<PoolInner>,
/// listeners for new ready transactions
transaction_listener: Mutex<Vec<Sender<TxHash>>>,
}
// == impl Pool ==
impl Pool {
/// Returns an iterator that yields all transactions that are currently ready
pub fn ready_transactions(&self) -> TransactionsIterator {
self.inner.read().ready_transactions()
}
/// Returns all transactions that are not ready to be included in a block yet
pub fn pending_transactions(&self) -> Vec<Arc<PoolTransaction>> {
self.inner.read().pending_transactions.transactions().collect()
}
/// Returns the _pending_ transaction for that `hash` if it exists in the mempool
pub fn get_transaction(&self, hash: TxHash) -> Option<PendingTransaction> {
self.inner.read().get_transaction(hash)
}
/// Returns the number of tx that are ready and queued for further execution
pub fn txpool_status(&self) -> TxpoolStatus {
// Note: naming differs here compared to geth's `TxpoolStatus`
let pending: u64 = self.inner.read().ready_transactions.len().try_into().unwrap_or(0);
let queued: u64 = self.inner.read().pending_transactions.len().try_into().unwrap_or(0);
TxpoolStatus { pending, queued }
}
/// Invoked when a set of transactions ([Self::ready_transactions()]) was executed.
///
/// This will remove the transactions from the pool.
pub fn on_mined_block(&self, outcome: MinedBlockOutcome) -> PruneResult {
let MinedBlockOutcome { block_number, included, invalid } = outcome;
// remove invalid transactions from the pool
self.remove_invalid(invalid.into_iter().map(|tx| tx.hash()).collect());
// prune all the markers the mined transactions provide
let res = self
.prune_markers(block_number, included.into_iter().flat_map(|tx| tx.provides.clone()));
trace!(target: "txpool", "pruned transaction markers {:?}", res);
res
}
/// Removes ready transactions for the given iterator of identifying markers.
///
/// For each marker we can remove transactions in the pool that either provide the marker
/// directly or are a dependency of the transaction associated with that marker.
pub fn prune_markers(
&self,
block_number: u64,
markers: impl IntoIterator<Item = TxMarker>,
) -> PruneResult {
debug!(target: "txpool", ?block_number, "pruning transactions");
self.inner.write().prune_markers(markers)
}
/// Adds a new transaction to the pool
pub fn add_transaction(&self, tx: PoolTransaction) -> Result<AddedTransaction, PoolError> {
let added = self.inner.write().add_transaction(tx)?;
if let AddedTransaction::Ready(ref ready) = added {
self.notify_listener(ready.hash);
// also notify promoted transactions
for promoted in ready.promoted.iter().copied() {
self.notify_listener(promoted);
}
}
Ok(added)
}
/// Adds a new transaction listener to the pool that gets notified about every new ready
/// transaction
pub fn add_ready_listener(&self) -> Receiver<TxHash> {
const TX_LISTENER_BUFFER_SIZE: usize = 2048;
let (tx, rx) = channel(TX_LISTENER_BUFFER_SIZE);
self.transaction_listener.lock().push(tx);
rx
}
/// Returns true if this pool already contains the transaction
pub fn contains(&self, tx_hash: &TxHash) -> bool {
self.inner.read().contains(tx_hash)
}
/// Remove the given transactions from the pool
pub fn remove_invalid(&self, tx_hashes: Vec<TxHash>) -> Vec<Arc<PoolTransaction>> {
self.inner.write().remove_invalid(tx_hashes)
}
/// Remove transactions by sender
pub fn remove_transactions_by_address(&self, sender: Address) -> Vec<Arc<PoolTransaction>> {
self.inner.write().remove_transactions_by_address(sender)
}
/// Removes a single transaction from the pool
///
/// This is similar to `[Pool::remove_invalid()]` but for a single transaction.
///
/// **Note**: this will also drop any transaction that depend on the `tx`
pub fn drop_transaction(&self, tx: TxHash) -> Option<Arc<PoolTransaction>> {
trace!(target: "txpool", "Dropping transaction: [{:?}]", tx);
let removed = {
let mut pool = self.inner.write();
pool.ready_transactions.remove_with_markers(vec![tx], None)
};
trace!(target: "txpool", "Dropped transactions: {:?}", removed);
let mut dropped = None;
if !removed.is_empty() {
dropped = removed.into_iter().find(|t| *t.pending_transaction.hash() == tx);
}
dropped
}
/// Removes all transactions from the pool
pub fn clear(&self) {
let mut pool = self.inner.write();
pool.clear();
}
/// notifies all listeners about the transaction
fn notify_listener(&self, hash: TxHash) {
let mut listener = self.transaction_listener.lock();
// this is basically a retain but with mut reference
for n in (0..listener.len()).rev() {
let mut listener_tx = listener.swap_remove(n);
let retain = match listener_tx.try_send(hash) {
Ok(()) => true,
Err(e) => {
if e.is_full() {
warn!(
target: "txpool",
"[{:?}] Failed to send tx notification because channel is full",
hash,
);
true
} else {
false
}
}
};
if retain {
listener.push(listener_tx)
}
}
}
}
/// A Transaction Pool
///
/// Contains all transactions that are ready to be executed
#[derive(Debug, Default)]
struct PoolInner {
ready_transactions: ReadyTransactions,
pending_transactions: PendingTransactions,
}
// == impl PoolInner ==
impl PoolInner {
/// Returns an iterator over transactions that are ready.
fn ready_transactions(&self) -> TransactionsIterator {
self.ready_transactions.get_transactions()
}
/// Clears
fn clear(&mut self) {
self.ready_transactions.clear();
self.pending_transactions.clear();
}
/// checks both pools for the matching transaction
///
/// Returns `None` if the transaction does not exist in the pool
fn get_transaction(&self, hash: TxHash) -> Option<PendingTransaction> {
if let Some(pending) = self.pending_transactions.get(&hash) {
return Some(pending.transaction.pending_transaction.clone());
}
Some(
self.ready_transactions.get(&hash)?.transaction.transaction.pending_transaction.clone(),
)
}
/// Returns an iterator over all transactions in the pool filtered by the sender
pub fn transactions_by_sender(
&self,
sender: Address,
) -> impl Iterator<Item = Arc<PoolTransaction>> + '_ {
let pending_txs = self
.pending_transactions
.transactions()
.filter(move |tx| tx.pending_transaction.sender().eq(&sender));
let ready_txs = self
.ready_transactions
.get_transactions()
.filter(move |tx| tx.pending_transaction.sender().eq(&sender));
pending_txs.chain(ready_txs)
}
/// Returns true if this pool already contains the transaction
fn contains(&self, tx_hash: &TxHash) -> bool {
self.pending_transactions.contains(tx_hash) || self.ready_transactions.contains(tx_hash)
}
fn add_transaction(&mut self, tx: PoolTransaction) -> Result<AddedTransaction, PoolError> {
if self.contains(&tx.hash()) {
warn!(target: "txpool", "[{:?}] Already imported", tx.hash());
return Err(PoolError::AlreadyImported(Box::new(tx)));
}
let tx = PendingPoolTransaction::new(tx, self.ready_transactions.provided_markers());
trace!(target: "txpool", "[{:?}] {:?}", tx.transaction.hash(), tx);
// If all markers are not satisfied import to future
if !tx.is_ready() {
let hash = tx.transaction.hash();
self.pending_transactions.add_transaction(tx)?;
return Ok(AddedTransaction::Pending { hash });
}
self.add_ready_transaction(tx)
}
/// Adds the transaction to the ready queue
fn add_ready_transaction(
&mut self,
tx: PendingPoolTransaction,
) -> Result<AddedTransaction, PoolError> {
let hash = tx.transaction.hash();
trace!(target: "txpool", "adding ready transaction [{:?}]", hash);
let mut ready = ReadyTransaction::new(hash);
let mut tx_queue = VecDeque::from([tx]);
// tracks whether we're processing the given `tx`
let mut is_new_tx = true;
// take first transaction from the list
while let Some(current_tx) = tx_queue.pop_front() {
// also add the transaction that the current transaction unlocks
tx_queue.extend(
self.pending_transactions.mark_and_unlock(¤t_tx.transaction.provides),
);
let current_hash = current_tx.transaction.hash();
// try to add the transaction to the ready pool
match self.ready_transactions.add_transaction(current_tx) {
Ok(replaced_transactions) => {
if !is_new_tx {
ready.promoted.push(current_hash);
}
// tx removed from ready pool
ready.removed.extend(replaced_transactions);
}
Err(err) => {
// failed to add transaction
if is_new_tx {
debug!(target: "txpool", "[{:?}] Failed to add tx: {:?}", current_hash,
err);
return Err(err);
} else {
ready.discarded.push(current_hash);
}
}
}
is_new_tx = false;
}
// check for a cycle where importing a transaction resulted in pending transactions to be
// added while removing current transaction. in which case we move this transaction back to
// the pending queue
if ready.removed.iter().any(|tx| *tx.hash() == hash) {
self.ready_transactions.clear_transactions(&ready.promoted);
return Err(PoolError::CyclicTransaction);
}
Ok(AddedTransaction::Ready(ready))
}
/// Prunes the transactions that provide the given markers
///
/// This will effectively remove those transactions that satisfy the markers and transactions
/// from the pending queue might get promoted to if the markers unlock them.
pub fn prune_markers(&mut self, markers: impl IntoIterator<Item = TxMarker>) -> PruneResult {
let mut imports = vec![];
let mut pruned = vec![];
for marker in markers {
// mark as satisfied and store the transactions that got unlocked
imports.extend(self.pending_transactions.mark_and_unlock(Some(&marker)));
// prune transactions
pruned.extend(self.ready_transactions.prune_tags(marker.clone()));
}
let mut promoted = vec![];
let mut failed = vec![];
for tx in imports {
let hash = tx.transaction.hash();
match self.add_ready_transaction(tx) {
Ok(res) => promoted.push(res),
Err(e) => {
warn!(target: "txpool", "Failed to promote tx [{:?}] : {:?}", hash, e);
failed.push(hash)
}
}
}
PruneResult { pruned, failed, promoted }
}
/// Remove the given transactions from the pool
pub fn remove_invalid(&mut self, tx_hashes: Vec<TxHash>) -> Vec<Arc<PoolTransaction>> {
// early exit in case there is no invalid transactions.
if tx_hashes.is_empty() {
return vec![];
}
trace!(target: "txpool", "Removing invalid transactions: {:?}", tx_hashes);
let mut removed = self.ready_transactions.remove_with_markers(tx_hashes.clone(), None);
removed.extend(self.pending_transactions.remove(tx_hashes));
trace!(target: "txpool", "Removed invalid transactions: {:?}", removed);
removed
}
/// Remove transactions by sender address
pub fn remove_transactions_by_address(&mut self, sender: Address) -> Vec<Arc<PoolTransaction>> {
let tx_hashes =
self.transactions_by_sender(sender).map(move |tx| tx.hash()).collect::<Vec<TxHash>>();
if tx_hashes.is_empty() {
return vec![];
}
trace!(target: "txpool", "Removing transactions: {:?}", tx_hashes);
let mut removed = self.ready_transactions.remove_with_markers(tx_hashes.clone(), None);
removed.extend(self.pending_transactions.remove(tx_hashes));
trace!(target: "txpool", "Removed transactions: {:?}", removed);
removed
}
}
/// Represents the outcome of a prune
pub struct PruneResult {
/// a list of added transactions that a pruned marker satisfied
pub promoted: Vec<AddedTransaction>,
/// all transactions that failed to be promoted and now are discarded
pub failed: Vec<TxHash>,
/// all transactions that were pruned from the ready pool
pub pruned: Vec<Arc<PoolTransaction>>,
}
impl fmt::Debug for PruneResult {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(fmt, "PruneResult {{ ")?;
write!(
fmt,
"promoted: {:?}, ",
self.promoted.iter().map(|tx| *tx.hash()).collect::<Vec<_>>()
)?;
write!(fmt, "failed: {:?}, ", self.failed)?;
write!(
fmt,
"pruned: {:?}, ",
self.pruned.iter().map(|tx| *tx.pending_transaction.hash()).collect::<Vec<_>>()
)?;
write!(fmt, "}}")?;
Ok(())
}
}
#[derive(Clone, Debug)]
pub struct ReadyTransaction {
/// the hash of the submitted transaction
hash: TxHash,
/// transactions promoted to the ready queue
promoted: Vec<TxHash>,
/// transaction that failed and became discarded
discarded: Vec<TxHash>,
/// Transactions removed from the Ready pool
removed: Vec<Arc<PoolTransaction>>,
}
impl ReadyTransaction {
pub fn new(hash: TxHash) -> Self {
Self {
hash,
promoted: Default::default(),
discarded: Default::default(),
removed: Default::default(),
}
}
}
#[derive(Clone, Debug)]
pub enum AddedTransaction {
/// transaction was successfully added and being processed
Ready(ReadyTransaction),
/// Transaction was successfully added but not yet queued for processing
Pending {
/// the hash of the submitted transaction
hash: TxHash,
},
}
impl AddedTransaction {
pub fn hash(&self) -> &TxHash {
match self {
Self::Ready(tx) => &tx.hash,
Self::Pending { hash } => hash,
}
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.